From e2eb24a6972662c145947da4410b9c08a1071197 Mon Sep 17 00:00:00 2001 From: Amitabh Verma Date: Thu, 23 Jan 2025 13:47:56 -0500 Subject: [PATCH] Standalone auto-generated reconstruction GUI (#487) * pydantic-model initial commit - pydantic model UI widget creation based on CLI model - does no processing, creates yaml files * Updated prototype - partially working - refactored model to GUI implementation - added table to hold processing info, entries purge on completion - using click testing for now in prototyping - creates yaml to GUI - using napari notifications for msgs - testing on Windows * first working alpha version - Multi processing using pydantic models - Implemented a client/server approach for managing jobs queue - Updates each job status based on logs - Implemented Unique ID in CLI to associate Jobs with Process - Tested on Windows with locally modified submitit * possible fix for macOS - import OS specific pydantic imports for ModelMetaclass * use ver specific fix for pydantic instead of OS - pydantic possibly needs to be pinned at 1.10.19 @talonchandler - implemented ver specific import for now * set a timeout limit on job update thread - check for update on the job file, if not - notify user in process table and exit after a 2 min timeout * fixes & enhancements - fixes newly created yaml files to reside besides output zarrs - fixes Model numbers not correctly representing on Build & Run button - fixes "reconstruction algorithm" to show up as dropdown box - fixes "regularization strength" spinbox to accept 5 significant decimal place values - background path now accepts Path and str in model and shows up as directory selector button - each model container setting is now a collapsible item - fixes issues when widget is closed/hidden and then started again - all model(s) validation errors are not collected and presented in notification and highlighted on GUI in one go - each collapsible model entry also highlights if any validation errors are present - added confirm dialog for certain actions - added clear results button - job update thread will exit after a timeout limit and no update detected in job output file * Update setup.cfg - pinning pydantic lib ver to 1.10.19 * use PyQt6 for pyqtSignal use PyQt6 for pyqtSignal * standalone - fixes standalone widget * fixes for BG, etc - fixes BG selection and resetting if Path is invalid - fixes removal of row/stability - using Queued List to counter race conditions - added .zarr data validation check for Input, also used for BG - make uniqueID more unique - break thread update operations when user has cleared the results table - standalone GUI niceties * - fixes for user initiated row delete while processing another check if result row is deleted by user action while processing * major refactor to support positions in datasets - timeout of 5 min per Job - BF button error handling when not available - GUI initiates clearing logs on first run to avoid file exist errors - a single processing run now supports the CLI spawning multiple jobs * - fixes & enhancements - stand-alone GUI cmd - refactored UI based on suggestions - fixes cyclic import for stand-alone GUI - duplicates prev model settings when available - clears Model when changing Input datasets - load model looks for .yml files * checking for RuntimeWarning value * on-the-fly processing - checks if data is being acquired and utilizes polling (every 10s) to gather processing information and submits processing jobs once a minimum dim has been acquired - added a script to simulate acq of .zarr storage - added group-box for sections - added scrolling for tab so that it does not block horizontal resizing * Delete recOrder/tests/widget_tests/test_pydantic_model_widget.py * Delete recOrder/tests/widget_tests/test_simulate_acq.py * ditching v1.main from pydantic and reverting pydantic>=1.10.17 to test workflows * dont initialize server listening in main init of worker class - moved socket listening to sub method from init() and initialize when required for the first time (testing build & deploy) * incorporating discussed GUI changes - GUI changes to reflect sketch as discussed with @talonchandler @ieivanov - Output directory and validation - Open dataset after reconstruction based on user choice - pinning pydantic lib back to 1.10.19 * exit polling loop when closing app before finish * implemented a Stop method for On-The-Fly polling reconstructions implemented a Stop method for On-The-Fly polling reconstructions if required * GUI related - added info icon next to Input Store label when a dataset path is set which displays channel names for convenience, more metadata could be displayed that might be relevant for reconstruction - removed line widget at end of each model container to make scrollbar more apparent * create logs dir if it does not exist - we are only reading the location here but it needs to exist, the CLI is creating the logs based on the path * fixes output path not setting correctly * added pixel size meta to info icon * update output dir in defined models when changed * make on-the-fly entry scrollable and not block resizing * added script to simulate a "fake" recOrder acquisition - script to test on-the-fly reconstruction POC * fix for checking output path existing * fix for checking output path existing * logs folder to be created besides dataset - logs folder will reside next to the output dataset - fixed an issue with reconstructed data showing irrespective of selection * display SLURM related errors if Jobs output txt is empty * top scrollbar for model, container sizing now does not need second vert scrollbar * multi-pos bugfix + multiple enhancements fixes: - multi-pos dataset would be displayed after single pos processing enhancements: - CLI will print Job status when used as cmd line, not for GUI - use single socket connection when multi-pos is spawned by a request - added "rx" field to model-container - minor GUI tweaks * code formatting, minor refactoring, comments * with rx default as 1, catch and report OOM errors --- recOrder/acq/acquisition_workers.py | 1 + .../cli/apply_inverse_transfer_function.py | 42 +- recOrder/cli/gui_widget.py | 41 + recOrder/cli/jobs_mgmt.py | 184 + recOrder/cli/main.py | 6 + recOrder/cli/monitor.py | 64 +- recOrder/cli/parsing.py | 13 + recOrder/cli/reconstruct.py | 4 + recOrder/cli/settings.py | 3 +- recOrder/io/utils.py | 4 +- recOrder/plugin/gui.py | 6 +- recOrder/plugin/main_widget.py | 7 +- recOrder/plugin/tab_recon.py | 3918 +++++++++++++++++ recOrder/scripts/simulate_zarr_acq.py | 171 + recOrder/tests/util_tests/test_overlays.py | 3 +- setup.cfg | 2 +- 16 files changed, 4424 insertions(+), 45 deletions(-) create mode 100644 recOrder/cli/gui_widget.py create mode 100644 recOrder/cli/jobs_mgmt.py create mode 100644 recOrder/plugin/tab_recon.py create mode 100644 recOrder/scripts/simulate_zarr_acq.py diff --git a/recOrder/acq/acquisition_workers.py b/recOrder/acq/acquisition_workers.py index 4b19ab5e..ffd30517 100644 --- a/recOrder/acq/acquisition_workers.py +++ b/recOrder/acq/acquisition_workers.py @@ -598,6 +598,7 @@ def _reconstruct(self): transfer_function_dirpath=transfer_function_path, config_filepath=self.config_path, output_dirpath=reconstruction_path, + unique_id="recOrderAcq" ) # Read reconstruction to pass to emitters diff --git a/recOrder/cli/apply_inverse_transfer_function.py b/recOrder/cli/apply_inverse_transfer_function.py index b4f97a38..afb31e01 100644 --- a/recOrder/cli/apply_inverse_transfer_function.py +++ b/recOrder/cli/apply_inverse_transfer_function.py @@ -3,6 +3,7 @@ from functools import partial from pathlib import Path +import os import click import numpy as np import torch @@ -10,6 +11,9 @@ import submitit from iohub import open_ome_zarr +from typing import Final +from recOrder.cli import jobs_mgmt + from recOrder.cli import apply_inverse_models from recOrder.cli.parsing import ( config_filepath, @@ -18,6 +22,7 @@ processes_option, transfer_function_dirpath, ram_multiplier, + unique_id, ) from recOrder.cli.printing import echo_headline, echo_settings from recOrder.cli.settings import ReconstructionSettings @@ -28,6 +33,7 @@ from recOrder.io import utils from recOrder.cli.monitor import monitor_jobs +JM = jobs_mgmt.JobsManagement() def _check_background_consistency( background_shape, data_shape, input_channel_names @@ -293,6 +299,7 @@ def apply_inverse_transfer_function_cli( output_dirpath: Path, num_processes: int = 1, ram_multiplier: float = 1.0, + unique_id: str = "" ) -> None: output_metadata = get_reconstruction_output_metadata( input_position_dirpaths[0], config_filepath @@ -336,22 +343,25 @@ def apply_inverse_transfer_function_cli( f"{cpu_request} CPU{'s' if cpu_request > 1 else ''} and " f"{gb_ram_request} GB of memory per CPU." ) - executor = submitit.AutoExecutor(folder="logs") - + + name_without_ext = os.path.splitext(Path(output_dirpath).name)[0] + executor_folder = os.path.join(Path(output_dirpath).parent.absolute(), name_without_ext + "_logs") + executor = submitit.AutoExecutor(folder=Path(executor_folder)) + executor.update_parameters( slurm_array_parallelism=np.min([50, num_jobs]), slurm_mem_per_cpu=f"{gb_ram_request}G", slurm_cpus_per_task=cpu_request, slurm_time=60, slurm_partition="cpu", + timeout_min=jobs_mgmt.JOBS_TIMEOUT # more slurm_*** resource parameters here ) - + jobs = [] with executor.batch(): - for input_position_dirpath in input_position_dirpaths: - jobs.append( - executor.submit( + for input_position_dirpath in input_position_dirpaths: + job: Final = executor.submit( apply_inverse_transfer_function_single_position, input_position_dirpath, transfer_function_dirpath, @@ -359,13 +369,27 @@ def apply_inverse_transfer_function_cli( output_dirpath / Path(*input_position_dirpath.parts[-3:]), num_processes, output_metadata["channel_names"], - ) - ) + ) + jobs.append(job) echo_headline( f"{num_jobs} job{'s' if num_jobs > 1 else ''} submitted {'locally' if executor.cluster == 'local' else 'via ' + executor.cluster}." ) - monitor_jobs(jobs, input_position_dirpaths) + doPrint = True # CLI prints Job status when used as cmd line + if unique_id != "": # no unique_id means no job submission info being listened to + JM.start_client() + i=0 + for j in jobs: + job : submitit.Job = j + job_idx : str = job.job_id + position = input_position_dirpaths[i] + JM.put_Job_in_list(job, unique_id, str(job_idx), position, str(executor.folder.absolute())) + i += 1 + JM.send_data_thread() + JM.set_shorter_timeout() + doPrint = False # CLI printing disabled when using GUI + + monitor_jobs(jobs, input_position_dirpaths, doPrint) @click.command() diff --git a/recOrder/cli/gui_widget.py b/recOrder/cli/gui_widget.py new file mode 100644 index 00000000..5d2b2873 --- /dev/null +++ b/recOrder/cli/gui_widget.py @@ -0,0 +1,41 @@ +import sys +from PyQt6.QtWidgets import QApplication, QWidget, QVBoxLayout, QStyle +import click +from recOrder.plugin import tab_recon + +try: + import qdarktheme +except:pass + +PLUGIN_NAME = "recOrder: Computational Toolkit for Label-Free Imaging" +PLUGIN_ICON = "🔬" + +@click.command() +def gui(): + """GUI for recOrder: Computational Toolkit for Label-Free Imaging""" + + app = QApplication(sys.argv) + app.setStyle("Fusion") # Other options: "Fusion", "Windows", "macOS", "WindowsVista" + try: + qdarktheme.setup_theme("dark") + except:pass + window = MainWindow() + window.setWindowTitle(PLUGIN_ICON + " " + PLUGIN_NAME + " " + PLUGIN_ICON) + + pixmapi = getattr(QStyle.StandardPixmap, "SP_TitleBarMenuButton") + icon = app.style().standardIcon(pixmapi) + window.setWindowIcon(icon) + + window.show() + sys.exit(app.exec()) + +class MainWindow(QWidget): + def __init__(self): + super().__init__() + recon_tab = tab_recon.Ui_ReconTab_Form(stand_alone=True) + layout = QVBoxLayout() + self.setLayout(layout) + layout.addWidget(recon_tab.recon_tab_mainScrollArea) + +if __name__ == "__main__": + gui() \ No newline at end of file diff --git a/recOrder/cli/jobs_mgmt.py b/recOrder/cli/jobs_mgmt.py new file mode 100644 index 00000000..4e833631 --- /dev/null +++ b/recOrder/cli/jobs_mgmt.py @@ -0,0 +1,184 @@ +import os, json +from pathlib import Path +import socket +import submitit +import threading, time + +DIR_PATH = os.path.dirname(os.path.realpath(__file__)) +FILE_PATH = os.path.join(DIR_PATH, "main.py") + +SERVER_PORT = 8089 # Choose an available port +JOBS_TIMEOUT = 5 # 5 mins +SERVER_uIDsjobIDs = {} # uIDsjobIDs[uid][jid] = job + +class JobsManagement(): + + def __init__(self, *args, **kwargs): + self.clientsocket = None + self.uIDsjobIDs = {} # uIDsjobIDs[uid][jid] = job + self.DATA_QUEUE = [] + + def check_for_jobID_File(self, jobID, logs_path, extension="out"): + + if Path(logs_path).exists(): + files = os.listdir(logs_path) + try: + for file in files: + if file.endswith(extension): + if jobID in file: + file_path = os.path.join(logs_path, file) + f = open(file_path, "r") + txt = f.read() + f.close() + return txt + except Exception as exc: + print(exc.args) + return "" + + def set_shorter_timeout(self): + self.clientsocket.settimeout(30) + + def start_client(self): + try: + self.clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.clientsocket.settimeout(300) + self.clientsocket.connect(('localhost', SERVER_PORT)) + self.clientsocket.settimeout(None) + + thread = threading.Thread(target=self.stop_client) + thread.start() + except Exception as exc: + print(exc.args) + + # The stopClient() is called right with the startClient() but does not stop + # and essentially is a wait thread listening and is triggered by either a + # connection or timeout. Based on condition triggered by user, reconstruction + # completion or errors the end goal is to close the socket connection which + # would let the CLI exit. I could break it down to 2 parts but the idea was to + # keep the clientsocket.close() call within one method to make it easier to follow. + def stop_client(self): + try: + time.sleep(2) + while True: + time.sleep(1) + buf = "" + try: + buf = self.clientsocket.recv(1024) + except: + pass + if len(buf) > 0: + if b"\n" in buf: + dataList = buf.split(b"\n") + for data in dataList: + if len(data)>0: + decoded_string = data.decode() + json_str = str(decoded_string) + json_obj = json.loads(json_str) + u_idx = json_obj["uID"] + job_idx = str(json_obj["jID"]) + cmd = json_obj["command"] + if cmd == "clientRelease": + if self.has_submitted_job(u_idx, job_idx): + self.clientsocket.close() + break + if cmd == "cancel": + if self.has_submitted_job(u_idx, job_idx): + try: + job = self.uIDsjobIDs[u_idx][job_idx] + job.cancel() + except Exception as exc: + pass # possibility of throwing an exception based on diff. OS + forDeletions = [] + for uID in self.uIDsjobIDs.keys(): + for jID in self.uIDsjobIDs[uID].keys(): + job = self.uIDsjobIDs[uID][jID] + if job.done(): + forDeletions.append((uID, jID)) + for idx in range(len(forDeletions)): + del self.uIDsjobIDs[forDeletions[idx][0]][forDeletions[idx][1]] + forDeletions = [] + for uID in self.uIDsjobIDs.keys(): + if len(self.uIDsjobIDs[uID].keys()) == 0: + forDeletions.append(uID) + for idx in range(len(forDeletions)): + del self.uIDsjobIDs[forDeletions[idx]] + if len(self.uIDsjobIDs.keys()) == 0: + self.clientsocket.close() + break + except Exception as exc: + self.clientsocket.close() + print(exc.args) + + def check_all_ExpJobs_completion(self, uID): + if uID in SERVER_uIDsjobIDs.keys(): + for jobEntry in SERVER_uIDsjobIDs[uID].keys(): + job:submitit.Job = SERVER_uIDsjobIDs[uID][jobEntry]["job"] + jobBool = SERVER_uIDsjobIDs[uID][jobEntry]["bool"] + if job is not None and job.done() == False: + return False + if jobBool == False: + return False + return True + + def put_Job_completion_in_list(self, job_bool, uID: str, jID: str, mode="client"): + if uID in SERVER_uIDsjobIDs.keys(): + if jID in SERVER_uIDsjobIDs[uID].keys(): + SERVER_uIDsjobIDs[uID][jID]["bool"] = job_bool + + def add_data(self, data): + self.DATA_QUEUE.append(data) + + def send_data_thread(self): + thread = threading.Thread(target=self.send_data) + thread.start() + + def send_data(self): + data = "".join(self.DATA_QUEUE) + self.clientsocket.send(data.encode()) + self.DATA_QUEUE = [] + + def put_Job_in_list(self, job, uID: str, jID: str, well:str, log_folder_path:str="", mode="client"): + try: + well = str(well) + jID = str(jID) + if ".zarr" in well: + wells = well.split(".zarr") + well = wells[1].replace("\\","-").replace("/","-")[1:] + if mode == "client": + if uID not in self.uIDsjobIDs.keys(): + self.uIDsjobIDs[uID] = {} + self.uIDsjobIDs[uID][jID] = job + else: + if jID not in self.uIDsjobIDs[uID].keys(): + self.uIDsjobIDs[uID][jID] = job + json_obj = {uID:{"jID": str(jID), "pos": well, "log": log_folder_path}} + json_str = json.dumps(json_obj)+"\n" + self.add_data(json_str) + else: + # from server side jobs object entry is a None object + # this will be later checked as completion boolean for a ExpID which might + # have several Jobs associated with it + if uID not in SERVER_uIDsjobIDs.keys(): + SERVER_uIDsjobIDs[uID] = {} + SERVER_uIDsjobIDs[uID][jID] = {} + SERVER_uIDsjobIDs[uID][jID]["job"] = job + SERVER_uIDsjobIDs[uID][jID]["bool"] = False + else: + SERVER_uIDsjobIDs[uID][jID] = {} + SERVER_uIDsjobIDs[uID][jID]["job"] = job + SERVER_uIDsjobIDs[uID][jID]["bool"] = False + except Exception as exc: + print(exc.args) + + def has_submitted_job(self, uID: str, jID: str, mode="client")->bool: + jID = str(jID) + if mode == "client": + if uID in self.uIDsjobIDs.keys(): + if jID in self.uIDsjobIDs[uID].keys(): + return True + return False + else: + if uID in SERVER_uIDsjobIDs.keys(): + if jID in SERVER_uIDsjobIDs[uID].keys(): + return True + return False diff --git a/recOrder/cli/main.py b/recOrder/cli/main.py index a05fb452..2568d5ec 100644 --- a/recOrder/cli/main.py +++ b/recOrder/cli/main.py @@ -3,6 +3,8 @@ from recOrder.cli.apply_inverse_transfer_function import apply_inv_tf from recOrder.cli.compute_transfer_function import compute_tf from recOrder.cli.reconstruct import reconstruct +from recOrder.cli.gui_widget import gui + CONTEXT = {"help_option_names": ["-h", "--help"]} @@ -21,3 +23,7 @@ def cli(): cli.add_command(reconstruct) cli.add_command(compute_tf) cli.add_command(apply_inv_tf) +cli.add_command(gui) + +if __name__ == "__main__": + cli() \ No newline at end of file diff --git a/recOrder/cli/monitor.py b/recOrder/cli/monitor.py index a86b7fa6..474637af 100644 --- a/recOrder/cli/monitor.py +++ b/recOrder/cli/monitor.py @@ -7,28 +7,31 @@ import sys -def _move_cursor_up(n_lines): - sys.stdout.write("\033[F" * n_lines) +def _move_cursor_up(n_lines, do_print=True): + if do_print: + sys.stdout.write("\033[F" * n_lines) -def _print_status(jobs, position_dirpaths, elapsed_list, print_indices=None): +def _print_status(jobs, position_dirpaths, elapsed_list, print_indices=None, do_print=True): columns = [15, 30, 40, 50] # header - sys.stdout.write( - "\033[K" # clear line - "\033[96mID" # cyan - f"\033[{columns[0]}G WELL " - f"\033[{columns[1]}G STATUS " - f"\033[{columns[2]}G NODE " - f"\033[{columns[2]}G ELAPSED\n" - ) + if do_print: + sys.stdout.write( + "\033[K" # clear line + "\033[96mID" # cyan + f"\033[{columns[0]}G WELL " + f"\033[{columns[1]}G STATUS " + f"\033[{columns[2]}G NODE " + f"\033[{columns[2]}G ELAPSED\n" + ) if print_indices is None: print_indices = range(len(jobs)) complete_count = 0 + for i, (job, position_dirpath) in enumerate(zip(jobs, position_dirpaths)): try: node_name = job.get_info()["NodeList"] # slowest, so do this first @@ -43,22 +46,24 @@ def _print_status(jobs, position_dirpaths, elapsed_list, print_indices=None): elapsed_list[i] += 1 # inexact timing else: color = "\033[91m" # red - + if i in print_indices: - sys.stdout.write( - f"\033[K" # clear line - f"{color}{job.job_id}" - f"\033[{columns[0]}G {'/'.join(position_dirpath.parts[-3:])}" - f"\033[{columns[1]}G {job.state}" - f"\033[{columns[2]}G {node_name}" - f"\033[{columns[3]}G {elapsed_list[i]} s\n" - ) + if do_print: + sys.stdout.write( + f"\033[K" # clear line + f"{color}{job.job_id}" + f"\033[{columns[0]}G {'/'.join(position_dirpath.parts[-3:])}" + f"\033[{columns[1]}G {job.state}" + f"\033[{columns[2]}G {node_name}" + f"\033[{columns[3]}G {elapsed_list[i]} s\n" + ) sys.stdout.flush() - print( - f"\033[32m{complete_count}/{len(jobs)} jobs complete. " - " to move monitor to background. " - " twice to cancel jobs." - ) + if do_print: + print( + f"\033[32m{complete_count}/{len(jobs)} jobs complete. " + " to move monitor to background. " + " twice to cancel jobs." + ) return elapsed_list @@ -87,7 +92,7 @@ def _get_jobs_to_print(jobs, num_to_print): return job_indices_to_print -def monitor_jobs(jobs: list[submitit.Job], position_dirpaths: list[Path]): +def monitor_jobs(jobs: list[submitit.Job], position_dirpaths: list[Path], do_print=True): """Displays the status of a list of submitit jobs with corresponding paths. Parameters @@ -108,7 +113,7 @@ def monitor_jobs(jobs: list[submitit.Job], position_dirpaths: list[Path]): # print all jobs once if terminal is too small if shutil.get_terminal_size().lines - NON_JOB_LINES < len(jobs): - _print_status(jobs, position_dirpaths, elapsed_list) + _print_status(jobs, position_dirpaths, elapsed_list, do_print) # main monitor loop try: @@ -125,14 +130,15 @@ def monitor_jobs(jobs: list[submitit.Job], position_dirpaths: list[Path]): position_dirpaths, elapsed_list, job_indices_to_print, + do_print, ) time.sleep(1) - _move_cursor_up(num_jobs_to_print + 2) + _move_cursor_up(num_jobs_to_print + 2, do_print) # Print final status time.sleep(1) - _print_status(jobs, position_dirpaths, elapsed_list) + _print_status(jobs, position_dirpaths, elapsed_list, do_print=do_print) # cancel jobs if ctrl+c except KeyboardInterrupt: diff --git a/recOrder/cli/parsing.py b/recOrder/cli/parsing.py index d302bd80..d32f2ace 100644 --- a/recOrder/cli/parsing.py +++ b/recOrder/cli/parsing.py @@ -118,4 +118,17 @@ def decorator(f: Callable) -> Callable: help="SLURM RAM multiplier.", )(f) + return decorator + +def unique_id() -> Callable: + def decorator(f: Callable) -> Callable: + return click.option( + "--unique-id", + "-uid", + default="", + required=False, + type=str, + help="Unique ID.", + )(f) + return decorator \ No newline at end of file diff --git a/recOrder/cli/reconstruct.py b/recOrder/cli/reconstruct.py index 4dcd8aae..9c77c11e 100644 --- a/recOrder/cli/reconstruct.py +++ b/recOrder/cli/reconstruct.py @@ -14,6 +14,7 @@ output_dirpath, processes_option, ram_multiplier, + unique_id, ) @@ -23,12 +24,14 @@ @output_dirpath() @processes_option(default=1) @ram_multiplier() +@unique_id() def reconstruct( input_position_dirpaths, config_filepath, output_dirpath, num_processes, ram_multiplier, + unique_id, ): """ Reconstruct a dataset using a configuration file. This is a @@ -65,4 +68,5 @@ def reconstruct( output_dirpath, num_processes, ram_multiplier, + unique_id, ) diff --git a/recOrder/cli/settings.py b/recOrder/cli/settings.py index a7661fd9..39056ceb 100644 --- a/recOrder/cli/settings.py +++ b/recOrder/cli/settings.py @@ -1,5 +1,6 @@ import os from typing import List, Literal, Optional, Union +from pathlib import Path from pydantic.v1 import ( BaseModel, @@ -40,7 +41,7 @@ def swing_range(cls, v): class BirefringenceApplyInverseSettings(WavelengthIllumination): - background_path: str = "" + background_path: Union[str, Path] = "" remove_estimated_background: bool = False flip_orientation: bool = False rotate_orientation: bool = False diff --git a/recOrder/io/utils.py b/recOrder/io/utils.py index c29769cc..229ac496 100644 --- a/recOrder/io/utils.py +++ b/recOrder/io/utils.py @@ -154,8 +154,8 @@ def yaml_to_model(yaml_path: Path, model): Examples -------- - >>> from my_model import MyModel - >>> model = yaml_to_model('model.yaml', MyModel) + # >>> from my_model import MyModel + # >>> model = yaml_to_model('model.yaml', MyModel) """ yaml_path = Path(yaml_path) diff --git a/recOrder/plugin/gui.py b/recOrder/plugin/gui.py index 84cf37a7..0e27c00b 100644 --- a/recOrder/plugin/gui.py +++ b/recOrder/plugin/gui.py @@ -9,7 +9,7 @@ from qtpy import QtCore, QtGui, QtWidgets - +from recOrder.plugin import tab_recon class Ui_Form(object): def setupUi(self, Form): @@ -924,6 +924,10 @@ def setupUi(self, Form): self.scrollArea_4.setWidget(self.scrollAreaWidgetContents_4) self.gridLayout_6.addWidget(self.scrollArea_4, 4, 0, 1, 1) self.tabWidget.addTab(self.Acquisition, "") + + self.tab_reconstruction = tab_recon.Ui_ReconTab_Form(Form) + self.tabWidget.addTab(self.tab_reconstruction.recon_tab_mainScrollArea, 'Reconstruction') + self.Display = QtWidgets.QWidget() self.Display.setObjectName("Display") self.gridLayout_18 = QtWidgets.QGridLayout(self.Display) diff --git a/recOrder/plugin/main_widget.py b/recOrder/plugin/main_widget.py index 402d28d2..1f2b5ff9 100644 --- a/recOrder/plugin/main_widget.py +++ b/recOrder/plugin/main_widget.py @@ -90,6 +90,7 @@ def __init__(self, napari_viewer: Viewer): # Setup GUI elements self.ui = gui.Ui_Form() self.ui.setupUi(self) + self.ui.tab_reconstruction.set_viewer(napari_viewer) # Override initial tab focus self.ui.tabWidget.setCurrentIndex(0) @@ -909,7 +910,11 @@ def connect_to_mm(self): raise KeyError(msg) if not self.bf_channel_found: - self.ui.qbutton_acq_phase_from_bf.disconnect() + try: + self.ui.qbutton_acq_phase_from_bf.disconnect() + except Exception as exc: + print(exc.args) + logging.debug(exc.args) self.ui.qbutton_acq_phase_from_bf.setStyleSheet( self.disabled_button_style ) diff --git a/recOrder/plugin/tab_recon.py b/recOrder/plugin/tab_recon.py new file mode 100644 index 00000000..6a14b526 --- /dev/null +++ b/recOrder/plugin/tab_recon.py @@ -0,0 +1,3918 @@ +import os, json, subprocess, time, datetime, uuid +import socket, threading +from pathlib import Path + +from qtpy import QtCore +from qtpy.QtCore import Qt, QEvent, QThread +from qtpy.QtWidgets import * +from magicgui.widgets import * +from PyQt6.QtCore import pyqtSignal + +from iohub.ngff import open_ome_zarr + +from typing import List, Literal, Union, Final, Annotated +from magicgui import widgets +from magicgui.type_map import get_widget_class +import warnings + +from napari import Viewer + +from recOrder.io import utils +from recOrder.cli import settings, jobs_mgmt +from napari.utils import notifications + +import concurrent.futures + +import importlib.metadata + +import pydantic.v1, pydantic +from pydantic.v1 import ( + BaseModel, + Extra, + NonNegativeFloat, + NonNegativeInt, + PositiveFloat, + root_validator, + validator, +) + +try: + # Use version specific pydantic import for ModelMetaclass + # prefer to pin to 1.10.19 + version = importlib.metadata.version("pydantic") + # print("Your Pydantic library ver:{v}.".format(v=version)) + if version >= "2.0.0": + print( + "Your Pydantic library ver:{v}. Recommended ver is: 1.10.19".format( + v=version + ) + ) + from pydantic.main import ValidationError + from pydantic.main import BaseModel + from pydantic.main import ModelMetaclass + elif version >= "1.10.19": + from pydantic.main import ValidationError + from pydantic.main import BaseModel + from pydantic.main import ModelMetaclass + else: + print( + "Your Pydantic library ver:{v}. Recommended ver is: 1.10.19".format( + v=version + ) + ) + from pydantic.main import ValidationError + from pydantic.main import BaseModel + from pydantic.main import ModelMetaclass +except: + print("Pydantic library was not found. Ver 1.10.19 is recommended.") + +STATUS_submitted_pool = "Submitted_Pool" +STATUS_submitted_job = "Submitted_Job" +STATUS_running_pool = "Running_Pool" +STATUS_running_job = "Running_Job" +STATUS_finished_pool = "Finished_Pool" +STATUS_finished_job = "Finished_Job" +STATUS_errored_pool = "Errored_Pool" +STATUS_errored_job = "Errored_Job" +STATUS_user_cleared_job = "User_Cleared_Job" +STATUS_user_cancelled_job = "User_Cancelled_Job" + +MSG_SUCCESS = {"msg": "success"} +JOB_COMPLETION_STR = "Job completed successfully" +JOB_RUNNING_STR = "Starting with JobEnvironment" +JOB_TRIGGERED_EXC = "Submitted job triggered an exception" +JOB_OOM_EVENT = "oom_kill event" + +_validate_alert = "⚠" +_validate_ok = "✔️" +_green_dot = "🟢" +_red_dot = "🔴" +_info_icon = "ⓘ" + +# For now replicate CLI processing modes - these could reside in the CLI settings file as well +# for consistency +OPTION_TO_MODEL_DICT = { + "birefringence": {"enabled": False, "setting": None}, + "phase": {"enabled": False, "setting": None}, + "fluorescence": {"enabled": False, "setting": None}, +} + +CONTAINERS_INFO = {} + +# This keeps an instance of the MyWorker server that is listening +# napari will not stop processes and the Hide event is not reliable +HAS_INSTANCE = {"val": False, "instance": None} + +# Components Queue list for new Jobs spanned from single processing +NEW_WIDGETS_QUEUE = [] +NEW_WIDGETS_QUEUE_THREADS = [] +MULTI_JOBS_REFS = {} +ROW_POP_QUEUE = [] + +# Main class for the Reconstruction tab +# Not efficient since instantiated from GUI +# Does not have access to common functions in main_widget +# ToDo : From main_widget and pass self reference +class Ui_ReconTab_Form(QWidget): + + def __init__(self, parent=None, stand_alone=False): + super().__init__(parent) + self._ui = parent + self.stand_alone = stand_alone + self.viewer: Viewer = None + if HAS_INSTANCE["val"]: + self.current_dir_path = str(Path.cwd()) + self.directory = str(Path.cwd()) + self.input_directory = HAS_INSTANCE["input_directory"] + self.output_directory = HAS_INSTANCE["output_directory"] + self.model_directory = HAS_INSTANCE["model_directory"] + self.yaml_model_file = HAS_INSTANCE["yaml_model_file"] + else: + self.directory = str(Path.cwd()) + self.current_dir_path = str(Path.cwd()) + self.input_directory = str(Path.cwd()) + self.output_directory = str(Path.cwd()) + self.model_directory = str(Path.cwd()) + self.yaml_model_file = str(Path.cwd()) + + self.input_directory_dataset = None + self.input_directory_datasetMeta = None + self.input_channel_names = [] + + # Parent (Widget) which holds the GUI ############################## + self.recon_tab_mainScrollArea = QScrollArea() + self.recon_tab_mainScrollArea.setWidgetResizable(True) + + self.recon_tab_widget = QWidget() + self.recon_tab_widget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding + ) + self.recon_tab_layout = QVBoxLayout() + self.recon_tab_layout.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop) + self.recon_tab_layout.setContentsMargins(0, 0, 0, 0) + self.recon_tab_layout.setSpacing(0) + self.recon_tab_widget.setLayout(self.recon_tab_layout) + self.recon_tab_mainScrollArea.setWidget(self.recon_tab_widget) + + # Top Section Group - Data ############################## + group_box_Data_groupBox_widget = QGroupBox("Data") + group_box_Data_layout = QVBoxLayout() + group_box_Data_layout.setContentsMargins(0, 5, 0, 0) + group_box_Data_layout.setSpacing(0) + group_box_Data_groupBox_widget.setLayout(group_box_Data_layout) + + # Input Data ############################## + self.data_input_widget = QWidget() + self.data_input_widget_layout = QHBoxLayout() + self.data_input_widget_layout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + self.data_input_widget.setLayout(self.data_input_widget_layout) + + self.data_input_Label = widgets.Label(value="Input Store") + # self.data_input_Label.native.setMinimumWidth(97) + self.data_input_LineEdit = widgets.LineEdit(value=self.input_directory) + self.data_input_PushButton = widgets.PushButton(label="Browse") + # self.data_input_PushButton.native.setMinimumWidth(75) + self.data_input_PushButton.clicked.connect(self.browse_dir_path_input) + self.data_input_LineEdit.changed.connect( + self.read_and_set_input_path_on_validation + ) + + self.data_input_widget_layout.addWidget(self.data_input_Label.native) + self.data_input_widget_layout.addWidget( + self.data_input_LineEdit.native + ) + self.data_input_widget_layout.addWidget( + self.data_input_PushButton.native + ) + + # Output Data ############################## + self.data_output_widget = QWidget() + self.data_output_widget_layout = QHBoxLayout() + self.data_output_widget_layout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + self.data_output_widget.setLayout(self.data_output_widget_layout) + + self.data_output_Label = widgets.Label(value="Output Directory") + self.data_output_LineEdit = widgets.LineEdit( + value=self.output_directory + ) + self.data_output_PushButton = widgets.PushButton(label="Browse") + # self.data_output_PushButton.native.setMinimumWidth(75) + self.data_output_PushButton.clicked.connect( + self.browse_dir_path_output + ) + self.data_output_LineEdit.changed.connect( + self.read_and_set_out_path_on_validation + ) + + self.data_output_widget_layout.addWidget(self.data_output_Label.native) + self.data_output_widget_layout.addWidget( + self.data_output_LineEdit.native + ) + self.data_output_widget_layout.addWidget( + self.data_output_PushButton.native + ) + + self.data_input_Label.native.setMinimumWidth(115) + self.data_output_Label.native.setMinimumWidth(115) + + group_box_Data_layout.addWidget(self.data_input_widget) + group_box_Data_layout.addWidget(self.data_output_widget) + self.recon_tab_layout.addWidget(group_box_Data_groupBox_widget) + + ################################## + + # Middle Section - Models ############################## + # Selection modes, New, Load, Clear + # Pydantic Models ScrollArea + + group_box_Models_groupBox_widget = QGroupBox("Models") + group_box_Models_layout = QVBoxLayout() + group_box_Models_layout.setContentsMargins(0, 5, 0, 0) + group_box_Models_layout.setSpacing(0) + group_box_Models_groupBox_widget.setLayout(group_box_Models_layout) + + self.models_widget = QWidget() + self.models_widget_layout = QHBoxLayout() + self.models_widget_layout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + self.models_widget.setLayout(self.models_widget_layout) + + self.modes_selected = OPTION_TO_MODEL_DICT.copy() + + # Make a copy of the Reconstruction settings mode, these will be used as template + for mode in self.modes_selected.keys(): + self.modes_selected[mode]["setting"] = None + + # Checkboxes for the modes to select single or combination of modes + for mode in self.modes_selected.keys(): + self.modes_selected[mode]["Checkbox"] = widgets.Checkbox( + name=mode, label=mode + ) + self.models_widget_layout.addWidget( + self.modes_selected[mode]["Checkbox"].native + ) + + # PushButton to create a copy of the model - UI + self.models_new_PushButton = widgets.PushButton(label="New") + # self.models_new_PushButton.native.setMinimumWidth(100) + self.models_new_PushButton.clicked.connect(self.build_acq_contols) + + self.models_load_PushButton = DropButton(text="Load", recon_tab=self) + # self.models_load_PushButton.setMinimumWidth(90) + + # Passing model location label to model location selector + self.models_load_PushButton.clicked.connect( + lambda: self.browse_dir_path_model() + ) + + # PushButton to clear all copies of models that are create for UI + self.models_clear_PushButton = widgets.PushButton(label="Clear") + # self.models_clear_PushButton.native.setMinimumWidth(110) + self.models_clear_PushButton.clicked.connect(self.clear_all_models) + + self.models_widget_layout.addWidget(self.models_new_PushButton.native) + self.models_widget_layout.addWidget(self.models_load_PushButton) + self.models_widget_layout.addWidget( + self.models_clear_PushButton.native + ) + + # Middle scrollable component which will hold Editable/(vertical) Expanding UI + self.models_scrollArea = QScrollArea() + self.models_scrollArea.setWidgetResizable(True) + self.models_container_widget = DropWidget(self) + self.models_container_widget_layout = QVBoxLayout() + self.models_container_widget_layout.setContentsMargins(0, 0, 0, 0) + self.models_container_widget_layout.setSpacing(2) + self.models_container_widget_layout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + self.models_container_widget.setLayout( + self.models_container_widget_layout + ) + self.models_scrollArea.setWidget(self.models_container_widget) + + group_box_Models_layout.addWidget(self.models_widget) + group_box_Models_layout.addWidget(self.models_scrollArea) + + ################################## + + # Create the splitter to resize Middle and Bottom Sections if required ################################## + splitter = QSplitter() + splitter.setOrientation(Qt.Orientation.Vertical) + splitter.setSizes([600, 200]) + + self.recon_tab_layout.addWidget(splitter) + + # Reconstruction ################################## + # Run, Processing, On-The-Fly + group_box_Reconstruction_groupBox_widget = QGroupBox( + "Reconstruction Queue" + ) + group_box_Reconstruction_layout = QVBoxLayout() + group_box_Reconstruction_layout.setContentsMargins(5, 10, 5, 5) + group_box_Reconstruction_layout.setSpacing(2) + group_box_Reconstruction_groupBox_widget.setLayout( + group_box_Reconstruction_layout + ) + + splitter.addWidget(group_box_Models_groupBox_widget) + splitter.addWidget(group_box_Reconstruction_groupBox_widget) + + my_splitter_handle = splitter.handle(1) + my_splitter_handle.setStyleSheet("background: 1px rgb(128,128,128);") + splitter.setStyleSheet( + """QSplitter::handle:pressed {background-color: #ca5;}""" + ) + + # PushButton to validate and Run the yaml file(s) based on selection against the Input store + self.reconstruction_run_PushButton = widgets.PushButton( + name="RUN Model" + ) + self.reconstruction_run_PushButton.native.setMinimumWidth(100) + self.reconstruction_run_PushButton.clicked.connect( + self.build_model_and_run + ) + + group_box_Reconstruction_layout.addWidget( + self.reconstruction_run_PushButton.native + ) + + # Tabs - Processing & On-The-Fly + tabs_Reconstruction = QTabWidget() + group_box_Reconstruction_layout.addWidget(tabs_Reconstruction) + + # Table for Jobs processing entries + tab1_processing_widget = QWidget() + tab1_processing_widget_layout = QVBoxLayout() + tab1_processing_widget_layout.setContentsMargins(5, 5, 5, 5) + tab1_processing_widget_layout.setSpacing(2) + tab1_processing_widget.setLayout(tab1_processing_widget_layout) + self.proc_table_QFormLayout = QFormLayout() + self.proc_table_QFormLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + tab1_processing_form_widget = QWidget() + tab1_processing_form_widget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding + ) + tab1_processing_form_widget.setLayout(self.proc_table_QFormLayout) + tab1_processing_widget_layout.addWidget(tab1_processing_form_widget) + + _clear_results_btn = widgets.PushButton(label="Clear Results") + _clear_results_btn.clicked.connect(self.clear_results_table) + tab1_processing_widget_layout.addWidget(_clear_results_btn.native) + + # Table for On-The-Fly processing entries + tab2_processing_widget = QWidget() + tab2_processing_widget_layout = QVBoxLayout() + tab2_processing_widget_layout.setContentsMargins(0, 0, 0, 0) + tab2_processing_widget_layout.setSpacing(0) + tab2_processing_widget.setLayout(tab2_processing_widget_layout) + self.proc_OTF_table_QFormLayout = QFormLayout() + self.proc_OTF_table_QFormLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + _proc_OTF_table_widget = QWidget() + _proc_OTF_table_widget.setSizePolicy( + QSizePolicy.Expanding, QSizePolicy.Expanding + ) + _proc_OTF_table_widget.setLayout(self.proc_OTF_table_QFormLayout) + tab2_processing_widget_layout.addWidget(_proc_OTF_table_widget) + tab2_processing_widget.setMaximumHeight(100) + + tabs_Reconstruction.addTab(tab1_processing_widget, "Processing") + tabs_Reconstruction.addTab(tab2_processing_widget, "On-The-Fly") + + # Editable List holding pydantic class(es) as per user selection + self.pydantic_classes = list() + self.prev_model_settings = {} + self.index = 0 + self.pollData = False + + # Stores Model & Components values which cause validation failure - can be highlighted on the model field as Red + self.modelHighlighterVals = {} + + # handle napari's close widget and avoid starting a second server + if HAS_INSTANCE["val"]: + self.worker: MyWorker = HAS_INSTANCE["MyWorker"] + self.worker.set_new_instances( + self.proc_table_QFormLayout, self, self._ui + ) + else: + self.worker = MyWorker(self.proc_table_QFormLayout, self, self._ui) + HAS_INSTANCE["val"] = True + HAS_INSTANCE["MyWorker"] = self.worker + + self.app = QApplication.instance() + self.app.lastWindowClosed.connect( + self.myCloseEvent + ) # this line is connection to signal close + + ###################################################### + + # our defined close event since napari doesnt do + def myCloseEvent(self): + event = QEvent(QEvent.Type.Close) + self.closeEvent(event) + # self.app.exit() + + # on napari close - cleanup + def closeEvent(self, event): + if event.type() == QEvent.Type.Close: + self.worker.stop_server() + + def hideEvent(self, event): + if event.type() == QEvent.Type.Hide and ( + self._ui is not None and self._ui.isVisible() + ): + pass + + def showEvent(self, event): + if event.type() == QEvent.Type.Show: + pass + + def set_viewer(self, viewer): + self.viewer = viewer + + def show_dataset(self, data_path): + # Show reconstruction data + try: + if self.viewer is not None: + self.viewer.open(data_path, plugin="napari-ome-zarr") + except Exception as exc: + self.message_box(exc.args) + + def confirm_dialog(self, msg="Confirm your selection ?"): + qm = QMessageBox + ret = qm.question( + self.recon_tab_widget, + "Confirm", + msg, + qm.Yes | qm.No, + ) + if ret == qm.Yes: + return True + else: + return False + + # Copied from main_widget + # ToDo: utilize common functions + # Input data selector + def browse_dir_path_input(self): + if len(self.pydantic_classes) > 0 and not self.confirm_dialog( + "Changing Input Data will reset your models. Continue ?" + ): + return + else: + self.clear_all_models(silent=True) + try: + result = self.open_file_dialog( + self.input_directory, "dir", filter="ZARR Storage (*.zarr)" + ) + # .zarr is a folder but we could implement a filter to scan for "ending with" and present those if required + except Exception as exc: + self.message_box(exc.args) + return + + if result == "": + return + + self.data_input_LineEdit.value = result + + def browse_dir_path_output(self): + try: + result = self.open_file_dialog(self.output_directory, "dir") + except Exception as exc: + self.message_box(exc.args) + return + + if result == "": + return + + if not Path(result).exists(): + self.message_box("Output Directory path must exist !") + return + + self.data_output_LineEdit.value = result + + def browse_dir_path_inputBG(self, elem): + result = self.open_file_dialog(self.directory, "dir") + if result == "": + return + + ret, ret_msg = self.validate_input_data(result, BG=True) + if not ret: + self.message_box(ret_msg) + return + + elem.value = result + + def validate_input_data( + self, input_data_folder: str, metadata=False, BG=False + ) -> bool: + try: + self.input_channel_names = [] + self.data_input_Label.value = "Input Store" + input_paths = Path(input_data_folder) + with open_ome_zarr(input_paths, mode="r") as dataset: + try: + self.input_channel_names = dataset.channel_names + self.data_input_Label.value = ( + "Input Store" + " " + _info_icon + ) + self.data_input_Label.tooltip = ( + "Channel Names:\n- " + + "\n- ".join(self.input_channel_names) + ) + except Exception as exc: + print(exc.args) + + try: + for _, pos in dataset.positions(): + axes = pos.zgroup.attrs["multiscales"][0]["axes"] + string_array_n = [str(x["name"]) for x in axes] + string_array = [ + str(x) + for x in pos.zgroup.attrs["multiscales"][0][ + "datasets" + ][0]["coordinateTransformations"][0]["scale"] + ] + string_scale = [] + for i in range(len(string_array_n)): + string_scale.append( + "{n}={d}".format( + n=string_array_n[i], d=string_array[i] + ) + ) + txt = "\n\nScale: " + ", ".join(string_scale) + self.data_input_Label.tooltip += txt + break + except Exception as exc: + print(exc.args) + + if not BG and metadata: + self.input_directory_dataset = dataset + + if not BG: + self.pollData = False + zattrs = dataset.zattrs + if self.is_dataset_acq_running(zattrs): + if self.confirm_dialog( + msg="This seems like an in-process Acquisition. Would you like to process data on-the-fly ?" + ): + self.pollData = True + + return True, MSG_SUCCESS + raise Exception( + "Dataset does not appear to be a valid ome-zarr storage" + ) + except Exception as exc: + return False, exc.args + + # call back for input LineEdit path changed manually + # include data validation + def read_and_set_input_path_on_validation(self): + if ( + self.data_input_LineEdit.value is None + or len(self.data_input_LineEdit.value) == 0 + ): + self.data_input_LineEdit.value = self.input_directory + self.message_box("Input data path cannot be empty") + return + if not Path(self.data_input_LineEdit.value).exists(): + self.data_input_LineEdit.value = self.input_directory + self.message_box("Input data path must point to a valid location") + return + + result = self.data_input_LineEdit.value + valid, ret_msg = self.validate_input_data(result) + + if valid: + self.directory = Path(result).parent.absolute() + self.current_dir_path = result + self.input_directory = result + + self.prev_model_settings = {} + + self.save_last_paths() + else: + self.data_input_LineEdit.value = self.input_directory + self.message_box(ret_msg) + + self.data_output_LineEdit.value = Path( + self.input_directory + ).parent.absolute() + + def read_and_set_out_path_on_validation(self): + if ( + self.data_output_LineEdit.value is None + or len(self.data_output_LineEdit.value) == 0 + ): + self.data_output_LineEdit.value = self.output_directory + self.message_box("Output data path cannot be empty") + return + if not Path(self.data_output_LineEdit.value).exists(): + self.data_output_LineEdit.value = self.output_directory + self.message_box("Output data path must point to a valid location") + return + + self.output_directory = self.data_output_LineEdit.value + + self.validate_model_output_paths() + + def validate_model_output_paths(self): + if len(self.pydantic_classes) > 0: + for model_item in self.pydantic_classes: + output_LineEdit = model_item["output_LineEdit"] + output_Button = model_item["output_Button"] + model_item["output_parent_dir"] = self.output_directory + + full_out_path = os.path.join( + Path(self.output_directory).absolute(), + output_LineEdit.value, + ) + model_item["output"] = full_out_path + + save_path_exists = ( + True if Path(full_out_path).exists() else False + ) + output_LineEdit.label = ( + "" if not save_path_exists else (_validate_alert + " ") + ) + "Output Data:" + output_LineEdit.tooltip = ( + "" + if not save_path_exists + else (_validate_alert + "Output file exists") + ) + output_Button.text = ( + "" if not save_path_exists else (_validate_alert + " ") + ) + "Output Data:" + output_Button.tooltip = ( + "" + if not save_path_exists + else (_validate_alert + "Output file exists") + ) + + def is_dataset_acq_running(self, zattrs: dict) -> bool: + """ + Checks the zattrs for CurrentDimensions & FinalDimensions key and tries to figure if + data acquisition is running + """ + + required_order = ["time", "position", "z", "channel"] + if "CurrentDimensions" in zattrs.keys(): + my_dict = zattrs["CurrentDimensions"] + sorted_dict_acq = { + k: my_dict[k] + for k in sorted(my_dict, key=lambda x: required_order.index(x)) + } + if "FinalDimensions" in zattrs.keys(): + my_dict = zattrs["FinalDimensions"] + sorted_dict_final = { + k: my_dict[k] + for k in sorted(my_dict, key=lambda x: required_order.index(x)) + } + if sorted_dict_acq != sorted_dict_final: + return True + return False + + # Output data selector + def browse_model_dir_path_output(self, elem): + result = self.open_file_dialog(self.output_directory, "save") + if result == "": + return + + save_path_exists = True if Path(result).exists() else False + elem.label = "Output Data:" + ( + "" if not save_path_exists else (" " + _validate_alert) + ) + elem.tooltip = "" if not save_path_exists else "Output file exists" + + elem.value = Path(result).name + + self.save_last_paths() + + # call back for output LineEdit path changed manually + def read_and_set_output_path_on_validation(self, elem1, elem2, save_path): + if elem1.value is None or len(elem1.value) == 0: + elem1.value = Path(save_path).name + + save_path = os.path.join( + Path(self.output_directory).absolute(), elem1.value + ) + + save_path_exists = True if Path(save_path).exists() else False + elem1.label = ( + "" if not save_path_exists else (_validate_alert + " ") + ) + "Output Data:" + elem1.tooltip = ( + "" + if not save_path_exists + else (_validate_alert + "Output file exists") + ) + elem2.text = ( + "" if not save_path_exists else (_validate_alert + " ") + ) + "Output Data:" + elem2.tooltip = ( + "" + if not save_path_exists + else (_validate_alert + "Output file exists") + ) + + self.save_last_paths() + + # Copied from main_widget + # ToDo: utilize common functions + # Output data selector + def browse_dir_path_model(self): + results = self.open_file_dialog( + self.directory, "files", filter="YAML Files (*.yml)" + ) # returns list + if len(results) == 0 or results == "": + return + + self.model_directory = str(Path(results[0]).parent.absolute()) + self.directory = self.model_directory + self.current_dir_path = self.model_directory + + self.save_last_paths() + self.open_model_files(results) + + def open_model_files(self, results: List): + pydantic_models = list() + for result in results: + self.yaml_model_file = result + + with open(result, "r") as yaml_in: + yaml_object = utils.yaml.safe_load( + yaml_in + ) # yaml_object will be a list or a dict + jsonString = json.dumps(self.convert(yaml_object)) + json_out = json.loads(jsonString) + json_dict = dict(json_out) + + selected_modes = list(OPTION_TO_MODEL_DICT.copy().keys()) + exclude_modes = list(OPTION_TO_MODEL_DICT.copy().keys()) + + for k in range(len(selected_modes) - 1, -1, -1): + if selected_modes[k] in json_dict.keys(): + exclude_modes.pop(k) + else: + selected_modes.pop(k) + + pruned_pydantic_class, ret_msg = self.build_model(selected_modes) + if pruned_pydantic_class is None: + self.message_box(ret_msg) + return + + pydantic_model, ret_msg = self.get_model_from_file( + self.yaml_model_file + ) + if pydantic_model is None: + if ( + isinstance(ret_msg, List) + and len(ret_msg) == 2 + and len(ret_msg[0]["loc"]) == 3 + and ret_msg[0]["loc"][2] == "background_path" + ): + pydantic_model = pruned_pydantic_class # if only background_path fails validation + json_dict["birefringence"]["apply_inverse"][ + "background_path" + ] = "" + self.message_box( + "background_path:\nPath was invalid and will be reset" + ) + else: + self.message_box(ret_msg) + return + else: + # make sure "background_path" is valid + bg_loc = json_dict["birefringence"]["apply_inverse"][ + "background_path" + ] + if bg_loc != "": + extension = os.path.splitext(bg_loc)[1] + if len(extension) > 0: + bg_loc = Path( + os.path.join( + str(Path(bg_loc).parent.absolute()), + "background.zarr", + ) + ) + else: + bg_loc = Path(os.path.join(bg_loc, "background.zarr")) + if not bg_loc.exists() or not self.validate_input_data( + str(bg_loc) + ): + self.message_box( + "background_path:\nPwas invalid and will be reset" + ) + json_dict["birefringence"]["apply_inverse"][ + "background_path" + ] = "" + else: + json_dict["birefringence"]["apply_inverse"][ + "background_path" + ] = str(bg_loc.parent.absolute()) + + pydantic_model = self.create_acq_contols2( + selected_modes, exclude_modes, pydantic_model, json_dict + ) + if pydantic_model is None: + self.message_box("Error - pydantic model returned None") + return + + pydantic_models.append(pydantic_model) + + return pydantic_models + + # useful when using close widget and not napari close and we might need them again + def save_last_paths(self): + HAS_INSTANCE["current_dir_path"] = self.current_dir_path + HAS_INSTANCE["input_directory"] = self.input_directory + HAS_INSTANCE["output_directory"] = self.output_directory + HAS_INSTANCE["model_directory"] = self.model_directory + HAS_INSTANCE["yaml_model_file"] = self.yaml_model_file + + # clears the results table + def clear_results_table(self): + index = self.proc_table_QFormLayout.rowCount() + if index < 1: + self.message_box("There are no processing results to clear !") + return + if self.confirm_dialog(): + for i in range(self.proc_table_QFormLayout.rowCount()): + self.proc_table_QFormLayout.removeRow(0) + + def remove_row(self, row, expID): + try: + if row < self.proc_table_QFormLayout.rowCount(): + widgetItem = self.proc_table_QFormLayout.itemAt(row) + if widgetItem is not None: + name_widget = widgetItem.widget() + toolTip_string = str(name_widget.toolTip) + if expID in toolTip_string: + self.proc_table_QFormLayout.removeRow( + row + ) # removeRow vs takeRow for threads ? + except Exception as exc: + print(exc.args) + + # marks fields on the Model that cause a validation error + def model_highlighter(self, errs): + try: + for uid in errs.keys(): + self.modelHighlighterVals[uid] = {} + container = errs[uid]["cls"] + self.modelHighlighterVals[uid]["errs"] = errs[uid]["errs"] + self.modelHighlighterVals[uid]["items"] = [] + self.modelHighlighterVals[uid]["tooltip"] = [] + if len(errs[uid]["errs"]) > 0: + self.model_highlighter_setter( + errs[uid]["errs"], container, uid + ) + except Exception as exc: + print(exc.args) + # more of a test feature - no need to show up + + # format all model errors into a display format for napari error message box + def format_string_for_error_display(self, errs): + try: + ret_str = "" + for uid in errs.keys(): + if len(errs[uid]["errs"]) > 0: + ret_str += errs[uid]["collapsibleBox"] + "\n" + for idx in range(len(errs[uid]["errs"])): + ret_str += f"{'>'.join(errs[uid]['errs'][idx]['loc'])}:\n{errs[uid]['errs'][idx]['msg']} \n" + ret_str += "\n" + return ret_str + except Exception as exc: + return ret_str + + # recursively fix the container for highlighting + def model_highlighter_setter( + self, errs, container: Container, containerID, lev=0 + ): + try: + layout = container.native.layout() + for i in range(layout.count()): + item = layout.itemAt(i) + if item.widget(): + widget = layout.itemAt(i).widget() + if ( + ( + not isinstance(widget._magic_widget, CheckBox) + and not isinstance( + widget._magic_widget, PushButton + ) + ) + and not isinstance(widget._magic_widget, LineEdit) + and isinstance( + widget._magic_widget._inner_widget, Container + ) + and not (widget._magic_widget._inner_widget is None) + ): + self.model_highlighter_setter( + errs, + widget._magic_widget._inner_widget, + containerID, + lev + 1, + ) + else: + for idx in range(len(errs)): + if len(errs[idx]["loc"]) - 1 < lev: + pass + elif ( + isinstance(widget._magic_widget, CheckBox) + or isinstance(widget._magic_widget, LineEdit) + or isinstance(widget._magic_widget, PushButton) + ): + if widget._magic_widget.label == errs[idx][ + "loc" + ][lev].replace("_", " "): + if widget._magic_widget.tooltip is None: + widget._magic_widget.tooltip = "-\n" + self.modelHighlighterVals[containerID][ + "items" + ].append(widget._magic_widget) + self.modelHighlighterVals[containerID][ + "tooltip" + ].append(widget._magic_widget.tooltip) + widget._magic_widget.tooltip += ( + errs[idx]["msg"] + "\n" + ) + widget._magic_widget.native.setStyleSheet( + "border:1px solid rgb(255, 255, 0); border-width: 1px;" + ) + elif ( + widget._magic_widget._label_widget.value + == errs[idx]["loc"][lev].replace("_", " ") + ): + if ( + widget._magic_widget._label_widget.tooltip + is None + ): + widget._magic_widget._label_widget.tooltip = ( + "-\n" + ) + self.modelHighlighterVals[containerID][ + "items" + ].append( + widget._magic_widget._label_widget + ) + self.modelHighlighterVals[containerID][ + "tooltip" + ].append( + widget._magic_widget._label_widget.tooltip + ) + widget._magic_widget._label_widget.tooltip += ( + errs[idx]["msg"] + "\n" + ) + widget._magic_widget._label_widget.native.setStyleSheet( + "border:1px solid rgb(255, 255, 0); border-width: 1px;" + ) + if ( + widget._magic_widget._inner_widget.tooltip + is None + ): + widget._magic_widget._inner_widget.tooltip = ( + "-\n" + ) + self.modelHighlighterVals[containerID][ + "items" + ].append( + widget._magic_widget._inner_widget + ) + self.modelHighlighterVals[containerID][ + "tooltip" + ].append( + widget._magic_widget._inner_widget.tooltip + ) + widget._magic_widget._inner_widget.tooltip += ( + errs[idx]["msg"] + "\n" + ) + widget._magic_widget._inner_widget.native.setStyleSheet( + "border:1px solid rgb(255, 255, 0); border-width: 1px;" + ) + except Exception as exc: + print(exc.args) + + # recursively fix the container for highlighting + def model_reset_highlighter_setter(self): + try: + for containerID in self.modelHighlighterVals.keys(): + items = self.modelHighlighterVals[containerID]["items"] + tooltip = self.modelHighlighterVals[containerID]["tooltip"] + i = 0 + for widItem in items: + widItem.native.setStyleSheet( + "border:1px solid rgb(0, 0, 0); border-width: 0px;" + ) + widItem.tooltip = tooltip[i] + i += 1 + + except Exception as exc: + print(exc.args) + + except Exception as exc: + print(exc.args) + + # passes msg to napari notifications + def message_box(self, msg, type="exc"): + if len(msg) > 0: + try: + json_object = msg + json_txt = "" + for err in json_object: + json_txt = ( + json_txt + + "Loc: {loc}\nMsg:{msg}\nType:{type}\n\n".format( + loc=err["loc"], msg=err["msg"], type=err["type"] + ) + ) + json_txt = str(json_txt) + # ToDo: format it better + # formatted txt does not show up properly in msg-box ?? + except: + json_txt = str(msg) + + # show is a message box + if self.stand_alone: + self.message_box_stand_alone(json_txt) + else: + if type == "exc": + notifications.show_error(json_txt) + else: + notifications.show_info(json_txt) + + def message_box_stand_alone(self, msg): + q = QMessageBox( + QMessageBox.Warning, + "Message", + str(msg), + parent=self.recon_tab_widget, + ) + q.setStandardButtons(QMessageBox.StandardButton.Ok) + q.setIcon(QMessageBox.Icon.Warning) + q.exec_() + + def cancel_job(self, btn: PushButton): + if self.confirm_dialog(): + btn.enabled = False + btn.text = btn.text + " (cancel called)" + + def add_widget( + self, parentLayout: QVBoxLayout, expID, jID, table_entry_ID="", pos="" + ): + + jID = str(jID) + _cancelJobBtntext = "Cancel Job {jID} ({posName})".format( + jID=jID, posName=pos + ) + _cancelJobButton = widgets.PushButton( + name="JobID", label=_cancelJobBtntext, enabled=True, value=False + ) + _cancelJobButton.clicked.connect( + lambda: self.cancel_job(_cancelJobButton) + ) + _txtForInfoBox = "Updating {id}-{pos}: Please wait... \nJobID assigned: {jID} ".format( + id=table_entry_ID, jID=jID, pos=pos + ) + _scrollAreaCollapsibleBoxDisplayWidget = ScrollableLabel( + text=_txtForInfoBox + ) + + _scrollAreaCollapsibleBoxWidgetLayout = QVBoxLayout() + _scrollAreaCollapsibleBoxWidgetLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + + _scrollAreaCollapsibleBoxWidgetLayout.addWidget( + _cancelJobButton.native + ) + _scrollAreaCollapsibleBoxWidgetLayout.addWidget( + _scrollAreaCollapsibleBoxDisplayWidget + ) + + _scrollAreaCollapsibleBoxWidget = QWidget() + _scrollAreaCollapsibleBoxWidget.setLayout( + _scrollAreaCollapsibleBoxWidgetLayout + ) + _scrollAreaCollapsibleBox = QScrollArea() + _scrollAreaCollapsibleBox.setWidgetResizable(True) + _scrollAreaCollapsibleBox.setMinimumHeight(300) + _scrollAreaCollapsibleBox.setWidget(_scrollAreaCollapsibleBoxWidget) + + _collapsibleBoxWidgetLayout = QVBoxLayout() + _collapsibleBoxWidgetLayout.addWidget(_scrollAreaCollapsibleBox) + + _collapsibleBoxWidget = CollapsibleBox( + table_entry_ID + " - " + pos + ) # tableEntryID, tableEntryShortDesc - should update with processing status + _collapsibleBoxWidget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + _collapsibleBoxWidget.setContentLayout(_collapsibleBoxWidgetLayout) + + parentLayout.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop) + parentLayout.addWidget(_collapsibleBoxWidget) + + MULTI_JOBS_REFS[expID + jID] = {} + MULTI_JOBS_REFS[expID + jID]["cancelBtn"] = _cancelJobButton + MULTI_JOBS_REFS[expID + jID][ + "infobox" + ] = _scrollAreaCollapsibleBoxDisplayWidget + NEW_WIDGETS_QUEUE.remove(expID + jID) + + def add_table_entry_job(self, proc_params): + + tableEntryID = proc_params["tableEntryID"] + parentLayout: QVBoxLayout = proc_params["parent_layout"] + + _cancelJobButton = widgets.PushButton( + name="JobID", label="Cancel Job", value=False, enabled=False + ) + _cancelJobButton.clicked.connect( + lambda: self.cancel_job(_cancelJobButton) + ) + _txtForInfoBox = "Updating {id}: Please wait...".format( + id=tableEntryID + ) + _scrollAreaCollapsibleBoxDisplayWidget = ScrollableLabel( + text=_txtForInfoBox + ) + _scrollAreaCollapsibleBoxDisplayWidget.setFixedHeight(300) + + proc_params["table_entry_infoBox"] = ( + _scrollAreaCollapsibleBoxDisplayWidget + ) + proc_params["cancelJobButton"] = _cancelJobButton + parentLayout.addWidget(_cancelJobButton.native) + parentLayout.addWidget(_scrollAreaCollapsibleBoxDisplayWidget) + + return proc_params + + def add_remove_check_OTF_table_entry( + self, OTF_dir_path, bool_msg, do_check=False + ): + if do_check: + try: + for row in range(self.proc_OTF_table_QFormLayout.rowCount()): + widgetItem = self.proc_OTF_table_QFormLayout.itemAt(row) + if widgetItem is not None: + name_widget: QWidget = widgetItem.widget() + name_string = str(name_widget.objectName()) + if OTF_dir_path in name_string: + for item in name_widget.findChildren(QPushButton): + _poll_Stop_PushButton: QPushButton = item + return _poll_Stop_PushButton.isChecked() + return False + except Exception as exc: + print(exc.args) + return False + else: + if bool_msg: + _poll_otf_label = ScrollableLabel( + text=OTF_dir_path + " " + _green_dot + ) + _poll_Stop_PushButton = QPushButton("Stop") + _poll_Stop_PushButton.setCheckable( + True + ) # Make the button checkable + _poll_Stop_PushButton.clicked.connect( + lambda: self.stop_OTF_push_button_call( + _poll_otf_label, OTF_dir_path + " " + _red_dot + ) + ) + + _poll_data_widget = QWidget() + _poll_data_widget.setObjectName(OTF_dir_path) + _poll_data_widget_layout = QHBoxLayout() + _poll_data_widget.setLayout(_poll_data_widget_layout) + _poll_data_widget_layout.addWidget(_poll_otf_label) + _poll_data_widget_layout.addWidget(_poll_Stop_PushButton) + + self.proc_OTF_table_QFormLayout.insertRow(0, _poll_data_widget) + else: + try: + for row in range( + self.proc_OTF_table_QFormLayout.rowCount() + ): + widgetItem = self.proc_OTF_table_QFormLayout.itemAt( + row + ) + if widgetItem is not None: + name_widget: QWidget = widgetItem.widget() + name_string = str(name_widget.objectName()) + if OTF_dir_path in name_string: + self.proc_OTF_table_QFormLayout.removeRow(row) + except Exception as exc: + print(exc.args) + + def stop_OTF_push_button_call(self, label, txt): + _poll_otf_label: QLabel = label + _poll_otf_label.setText(txt) + self.setDisabled(True) + + # adds processing entry to _qwidgetTabEntry_layout as row item + # row item will be purged from table as processing finishes + # there could be 3 tabs for this processing table status + # Running, Finished, Errored + def addTableEntry(self, table_entry_ID, table_entry_short_desc, proc_params): + _scrollAreaCollapsibleBoxWidgetLayout = QVBoxLayout() + _scrollAreaCollapsibleBoxWidgetLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + + _scrollAreaCollapsibleBoxWidget = QWidget() + _scrollAreaCollapsibleBoxWidget.setLayout( + _scrollAreaCollapsibleBoxWidgetLayout + ) + _scrollAreaCollapsibleBoxWidget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + + _scrollAreaCollapsibleBox = QScrollArea() + _scrollAreaCollapsibleBox.setWidgetResizable(True) + _scrollAreaCollapsibleBox.setWidget(_scrollAreaCollapsibleBoxWidget) + _scrollAreaCollapsibleBox.setMinimumHeight(300) + _scrollAreaCollapsibleBox.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + + _collapsibleBoxWidgetLayout = QVBoxLayout() + _collapsibleBoxWidgetLayout.addWidget(_scrollAreaCollapsibleBox) + + _collapsibleBoxWidget = CollapsibleBox(table_entry_ID) + _collapsibleBoxWidget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + _collapsibleBoxWidget.setContentLayout(_collapsibleBoxWidgetLayout) + + _expandingTabEntryWidgetLayout = QVBoxLayout() + _expandingTabEntryWidgetLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + _expandingTabEntryWidgetLayout.addWidget(_collapsibleBoxWidget) + + _expandingTabEntryWidget = QWidget() + _expandingTabEntryWidget.toolTip = table_entry_short_desc + _expandingTabEntryWidget.setLayout(_expandingTabEntryWidgetLayout) + _expandingTabEntryWidget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + + proc_params["tableEntryID"] = table_entry_ID + proc_params["parent_layout"] = _scrollAreaCollapsibleBoxWidgetLayout + proc_params = self.add_table_entry_job(proc_params) + + # instead of adding, insert at 0 to keep latest entry on top + # self.proc_table_QFormLayout.addRow(_expandingTabEntryWidget) + self.proc_table_QFormLayout.insertRow(0, _expandingTabEntryWidget) + + proc_params["table_layout"] = self.proc_table_QFormLayout + proc_params["table_entry"] = _expandingTabEntryWidget + + self.worker.run_in_pool(proc_params) + # result = self.worker.getResult(proc_params["exp_id"]) + # print(result) + + # Builds the model as required + def build_model(self, selected_modes): + try: + birefringence = None + phase = None + fluorescence = None + chNames = ["State0"] + exclude_modes = ["birefringence", "phase", "fluorescence"] + if "birefringence" in selected_modes and "phase" in selected_modes: + birefringence = settings.BirefringenceSettings() + phase = settings.PhaseSettings() + chNames = ["State0", "State1", "State2", "State3"] + exclude_modes = ["fluorescence"] + elif "birefringence" in selected_modes: + birefringence = settings.BirefringenceSettings() + chNames = ["State0", "State1", "State2", "State3"] + exclude_modes = ["fluorescence", "phase"] + elif "phase" in selected_modes: + phase = settings.PhaseSettings() + chNames = ["BF"] + exclude_modes = ["birefringence", "fluorescence"] + elif "fluorescence" in selected_modes: + fluorescence = settings.FluorescenceSettings() + chNames = ["FL"] + exclude_modes = ["birefringence", "phase"] + + model = None + try: + model = settings.ReconstructionSettings( + input_channel_names=chNames, + birefringence=birefringence, + phase=phase, + fluorescence=fluorescence, + ) + except ValidationError as exc: + # use v1 and v2 differ for ValidationError - newer one is not caught properly + return None, exc.errors() + + model = self.fix_model( + model, exclude_modes, "input_channel_names", chNames + ) + return model, "+".join(selected_modes) + ": MSG_SUCCESS" + + except Exception as exc: + return None, exc.args + + # ToDo: Temporary fix to over ride the 'input_channel_names' default value + # Needs revisitation + def fix_model(self, model, exclude_modes, attr_key, attr_val): + try: + for mode in exclude_modes: + model = settings.ReconstructionSettings.copy( + model, + exclude={mode}, + deep=True, + update={attr_key: attr_val}, + ) + settings.ReconstructionSettings.__setattr__( + model, attr_key, attr_val + ) + if hasattr(model, attr_key): + model.__fields__[attr_key].default = attr_val + model.__fields__[attr_key].field_info.default = attr_val + except Exception as exc: + return print(exc.args) + return model + + # Creates UI controls from model based on selections + def build_acq_contols(self): + + # Make a copy of selections and unsed for deletion + selected_modes = [] + exclude_modes = [] + + for mode in self.modes_selected.keys(): + enabled = self.modes_selected[mode]["Checkbox"].value + if not enabled: + exclude_modes.append(mode) + else: + selected_modes.append(mode) + + self.create_acq_contols2(selected_modes, exclude_modes) + + def create_acq_contols2( + self, selected_modes, exclude_modes, my_loaded_model=None, json_dict=None + ): + # duplicate settings from the prev model on new model creation + if json_dict is None and len(self.pydantic_classes) > 0: + ret = self.build_model_and_run( + validate_return_prev_model_json_txt=True + ) + if ret is None: + return + key, json_txt = ret + self.prev_model_settings[key] = json.loads(json_txt) + if json_dict is None: + key = "-".join(selected_modes) + if key in self.prev_model_settings.keys(): + json_dict = self.prev_model_settings[key] + + # initialize the top container and specify what pydantic class to map from + if my_loaded_model is not None: + pydantic_class = my_loaded_model + else: + pydantic_class, ret_msg = self.build_model(selected_modes) + if pydantic_class is None: + self.message_box(ret_msg) + return + + # Final constant UI val and identifier + _idx: Final[int] = self.index + _str: Final[str] = str(uuid.uuid4()) + + # Container holding the pydantic UI components + # Multiple instances/copies since more than 1 might be created + recon_pydantic_container = widgets.Container( + name=_str, scrollable=False + ) + + self.add_pydantic_to_container( + pydantic_class, recon_pydantic_container, exclude_modes, json_dict + ) + + # Run a validation check to see if the selected options are permitted + # before we create the GUI + # get the kwargs from the container/class + pydantic_kwargs = {} + pydantic_kwargs, ret_msg = self.get_and_validate_pydantic_args( + recon_pydantic_container, + pydantic_class, + pydantic_kwargs, + exclude_modes, + ) + if pydantic_kwargs is None: + self.message_box(ret_msg) + return + + # For list element, this needs to be cleaned and parsed back as an array + input_channel_names, ret_msg = self.clean_string_for_list( + "input_channel_names", pydantic_kwargs["input_channel_names"] + ) + if input_channel_names is None: + self.message_box(ret_msg) + return + pydantic_kwargs["input_channel_names"] = input_channel_names + + time_indices, ret_msg = self.clean_string_int_for_list( + "time_indices", pydantic_kwargs["time_indices"] + ) + if time_indices is None: + self.message_box(ret_msg) + return + pydantic_kwargs["time_indices"] = time_indices + + if "birefringence" in pydantic_kwargs.keys(): + background_path, ret_msg = self.clean_path_string_when_empty( + "background_path", + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ], + ) + if background_path is None: + self.message_box(ret_msg) + return + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ] = background_path + + # validate and return errors if None + pydantic_model, ret_msg = self.validate_pydantic_model( + pydantic_class, pydantic_kwargs + ) + if pydantic_model is None: + self.message_box(ret_msg) + return + + # generate a json from the instantiated model, update the json_display + # most of this will end up in a table as processing proceeds + json_txt, ret_msg = self.validate_and_return_json(pydantic_model) + if json_txt is None: + self.message_box(ret_msg) + return + + # PushButton to delete a UI container + # Use case when a wrong selection of input modes get selected eg Bire+Fl + # Preferably this root level validation should occur before values arevalidated + # in order to display and avoid this to occur + _del_button = widgets.PushButton(name="Delete Model") + + c_mode = "-and-".join(selected_modes) + c_mode_short = "".join( + item[:3].capitalize() for item in selected_modes + ) + if c_mode in CONTAINERS_INFO.keys(): + CONTAINERS_INFO[c_mode] += 1 + else: + CONTAINERS_INFO[c_mode] = 1 + num_str = "{:02d}".format(CONTAINERS_INFO[c_mode]) + c_mode_str = f"{c_mode} - {num_str}" + + # Output Data location + # These could be multiple based on user selection for each model + # Inherits from Input by default at creation time + name_without_ext = os.path.splitext(Path(self.input_directory).name)[0] + save_path = os.path.join( + Path(self.output_directory).absolute(), + ( + name_without_ext + + ("_" + c_mode_short + "_" + num_str) + + ".zarr" + ), + ) + save_path_exists = True if Path(save_path).exists() else False + _output_data_loc = widgets.LineEdit( + value=Path(save_path).name, + tooltip=( + "" + if not save_path_exists + else (_validate_alert + " Output file exists") + ), + ) + _output_data_btn = widgets.PushButton( + text=("" if not save_path_exists else (_validate_alert + " ")) + + "Output Data:", + tooltip=( + "" + if not save_path_exists + else (_validate_alert + " Output file exists") + ), + ) + + # Passing location label to output location selector + _output_data_btn.clicked.connect( + lambda: self.browse_model_dir_path_output(_output_data_loc) + ) + _output_data_loc.changed.connect( + lambda: self.read_and_set_output_path_on_validation( + _output_data_loc, _output_data_btn, save_path + ) + ) + + _show_CheckBox = widgets.CheckBox( + name="Show after Reconstruction", value=True + ) + _show_CheckBox.max_width = 200 + _rx_Label = widgets.Label(value="rx") + _rx_LineEdit = widgets.LineEdit(name="rx", value=1) + _rx_LineEdit.max_width = 50 + _validate_button = widgets.PushButton(name="Validate") + + # Passing all UI components that would be deleted + _expandingTabEntryWidget = QWidget() + _del_button.clicked.connect( + lambda: self.delete_model( + _expandingTabEntryWidget, + recon_pydantic_container.native, + _output_data_loc.native, + _output_data_btn.native, + _show_CheckBox.native, + _validate_button.native, + _del_button.native, + _str, + ) + ) + + # HBox for Output Data + _hBox_widget = QWidget() + _hBox_layout = QHBoxLayout() + _hBox_layout.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop) + _hBox_widget.setLayout(_hBox_layout) + _hBox_layout.addWidget(_output_data_btn.native) + _hBox_layout.addWidget(_output_data_loc.native) + + # Add this container to the main scrollable widget + _scrollAreaCollapsibleBoxWidgetLayout = QVBoxLayout() + _scrollAreaCollapsibleBoxWidgetLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + + _scrollAreaCollapsibleBoxWidget = MyWidget() + _scrollAreaCollapsibleBoxWidget.setLayout( + _scrollAreaCollapsibleBoxWidgetLayout + ) + + _scrollAreaCollapsibleBox = QScrollArea() + _scrollAreaCollapsibleBox.setWidgetResizable(True) + _scrollAreaCollapsibleBox.setWidget(_scrollAreaCollapsibleBoxWidget) + + _collapsibleBoxWidgetLayout = QVBoxLayout() + _collapsibleBoxWidgetLayout.setAlignment(Qt.AlignmentFlag.AlignTop) + + scrollbar = _scrollAreaCollapsibleBox.horizontalScrollBar() + _scrollAreaCollapsibleBoxWidget.resized.connect( + lambda: self.check_scrollbar_visibility(scrollbar) + ) + + _scrollAreaCollapsibleBoxWidgetLayout.addWidget( + scrollbar, alignment=Qt.AlignmentFlag.AlignTop + ) # Place at the top + + _collapsibleBoxWidgetLayout.addWidget(_scrollAreaCollapsibleBox) + + _collapsibleBoxWidget = CollapsibleBox( + c_mode_str + ) # tableEntryID, tableEntryShortDesc - should update with processing status + + _validate_button.clicked.connect( + lambda: self.validate_model(_str, _collapsibleBoxWidget) + ) + + _hBox_widget2 = QWidget() + _hBox_layout2 = QHBoxLayout() + _hBox_layout2.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop) + _hBox_widget2.setLayout(_hBox_layout2) + _hBox_layout2.addWidget(_show_CheckBox.native) + _hBox_layout2.addWidget(_validate_button.native) + _hBox_layout2.addWidget(_del_button.native) + _hBox_layout2.addWidget(_rx_Label.native) + _hBox_layout2.addWidget(_rx_LineEdit.native) + + _expandingTabEntryWidgetLayout = QVBoxLayout() + _expandingTabEntryWidgetLayout.setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + _expandingTabEntryWidgetLayout.addWidget(_collapsibleBoxWidget) + + _expandingTabEntryWidget.toolTip = c_mode_str + _expandingTabEntryWidget.setLayout(_expandingTabEntryWidgetLayout) + _expandingTabEntryWidget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + _expandingTabEntryWidget.layout().setAlignment( + QtCore.Qt.AlignmentFlag.AlignTop + ) + + _scrollAreaCollapsibleBoxWidgetLayout.addWidget( + recon_pydantic_container.native + ) + _scrollAreaCollapsibleBoxWidgetLayout.addWidget(_hBox_widget) + _scrollAreaCollapsibleBoxWidgetLayout.addWidget(_hBox_widget2) + + _scrollAreaCollapsibleBox.setMinimumHeight( + _scrollAreaCollapsibleBoxWidgetLayout.sizeHint().height() + 20 + ) + _collapsibleBoxWidget.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + _collapsibleBoxWidget.setContentLayout(_collapsibleBoxWidgetLayout) + + self.models_container_widget_layout.addWidget(_expandingTabEntryWidget) + + # Store a copy of the pydantic container along with all its associated components and properties + # We dont needs a copy of the class but storing for now + # This will be used for making deletion edits and looping to create our final run output + # uuid - used for identiying in editable list + self.pydantic_classes.append( + { + "uuid": _str, + "c_mode_str": c_mode_str, + "collapsibleBoxWidget": _collapsibleBoxWidget, + "class": pydantic_class, + "input": self.data_input_LineEdit, + "output": os.path.join( + Path(self.output_directory).absolute(), + _output_data_loc.value, + ), + "output_parent_dir": str( + Path(self.output_directory).absolute() + ), + "output_LineEdit": _output_data_loc, + "output_Button": _output_data_btn, + "container": recon_pydantic_container, + "selected_modes": selected_modes.copy(), + "exclude_modes": exclude_modes.copy(), + "poll_data": self.pollData, + "show": _show_CheckBox, + "rx": _rx_LineEdit, + } + ) + self.index += 1 + + if self.index > 1: + self.reconstruction_run_PushButton.text = "RUN {n} Models".format( + n=self.index + ) + else: + self.reconstruction_run_PushButton.text = "RUN Model" + + return pydantic_model + + def check_scrollbar_visibility(self, scrollbar): + h_scrollbar = scrollbar + + # Hide scrollbar if not needed + h_scrollbar.setVisible(h_scrollbar.maximum() > h_scrollbar.minimum()) + + def validate_model(self, _str, _collapsibleBoxWidget): + i = 0 + model_entry_item = None + for item in self.pydantic_classes: + if item["uuid"] == _str: + model_entry_item = item + break + i += 1 + if model_entry_item is not None: + cls = item["class"] + cls_container = item["container"] + exclude_modes = item["exclude_modes"] + c_mode_str = item["c_mode_str"] + + # build up the arguments for the pydantic model given the current container + if cls is None: + self.message_box("No model defined !") + return + + pydantic_kwargs = {} + pydantic_kwargs, ret_msg = self.get_and_validate_pydantic_args( + cls_container, cls, pydantic_kwargs, exclude_modes + ) + if pydantic_kwargs is None: + self.message_box(ret_msg) + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + return + + input_channel_names, ret_msg = self.clean_string_for_list( + "input_channel_names", pydantic_kwargs["input_channel_names"] + ) + if input_channel_names is None: + self.message_box(ret_msg) + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + return + pydantic_kwargs["input_channel_names"] = input_channel_names + + time_indices, ret_msg = self.clean_string_int_for_list( + "time_indices", pydantic_kwargs["time_indices"] + ) + if time_indices is None: + self.message_box(ret_msg) + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + return + pydantic_kwargs["time_indices"] = time_indices + + time_indices, ret_msg = self.clean_string_int_for_list( + "time_indices", pydantic_kwargs["time_indices"] + ) + if time_indices is None: + self.message_box(ret_msg) + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + return + pydantic_kwargs["time_indices"] = time_indices + + if "birefringence" in pydantic_kwargs.keys(): + background_path, ret_msg = self.clean_path_string_when_empty( + "background_path", + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ], + ) + if background_path is None: + self.message_box(ret_msg) + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + return + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ] = background_path + + # validate and return errors if None + pydantic_model, ret_msg = self.validate_pydantic_model( + cls, pydantic_kwargs + ) + if pydantic_model is None: + self.message_box(ret_msg) + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + return + if ret_msg == MSG_SUCCESS: + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_ok}" + ) + else: + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + + # UI components deletion - maybe just needs the parent container instead of individual components + def delete_model(self, wid0, wid1, wid2, wid3, wid4, wid5, wid6, _str): + + if not self.confirm_dialog(): + return False + + # if wid5 is not None: + # wid5.setParent(None) + # if wid4 is not None: + # wid4.setParent(None) + # if wid3 is not None: + # wid3.setParent(None) + # if wid2 is not None: + # wid2.setParent(None) + # if wid1 is not None: + # wid1.setParent(None) + if wid0 is not None: + wid0.setParent(None) + + # Find and remove the class from our pydantic model list using uuid + i = 0 + for item in self.pydantic_classes: + if item["uuid"] == _str: + self.pydantic_classes.pop(i) + break + i += 1 + self.index = len(self.pydantic_classes) + if self.index > 1: + self.reconstruction_run_PushButton.text = "RUN {n} Models".format( + n=self.index + ) + else: + self.reconstruction_run_PushButton.text = "RUN Model" + + # Clear all the generated pydantic models and clears the pydantic model list + def clear_all_models(self, silent=False): + + if silent or self.confirm_dialog(): + index = self.models_container_widget_layout.count() - 1 + while index >= 0: + myWidget = self.models_container_widget_layout.itemAt( + index + ).widget() + if myWidget is not None: + myWidget.setParent(None) + index -= 1 + self.pydantic_classes.clear() + CONTAINERS_INFO.clear() + self.index = 0 + self.reconstruction_run_PushButton.text = "RUN Model" + self.prev_model_settings = {} + + # Displays the json output from the pydantic model UI selections by user + # Loops through all our stored pydantic classes + def build_model_and_run(self, validate_return_prev_model_json_txt=False): + # we dont want to have a partial run if there are N models + # so we will validate them all first and then run in a second loop + # first pass for validating + # second pass for creating yaml and processing + + if len(self.pydantic_classes) == 0: + self.message_box("Please create a processing model first !") + return + + self.model_reset_highlighter_setter() # reset the container elements that might be highlighted for errors + _collectAllErrors = {} + _collectAllErrorsBool = True + for item in self.pydantic_classes: + cls = item["class"] + cls_container = item["container"] + selected_modes = item["selected_modes"] + exclude_modes = item["exclude_modes"] + uuid_str = item["uuid"] + _collapsibleBoxWidget = item["collapsibleBoxWidget"] + c_mode_str = item["c_mode_str"] + + _collectAllErrors[uuid_str] = {} + _collectAllErrors[uuid_str]["cls"] = cls_container + _collectAllErrors[uuid_str]["errs"] = [] + _collectAllErrors[uuid_str]["collapsibleBox"] = c_mode_str + + # build up the arguments for the pydantic model given the current container + if cls is None: + self.message_box(ret_msg) + return + + # get the kwargs from the container/class + pydantic_kwargs = {} + pydantic_kwargs, ret_msg = self.get_and_validate_pydantic_args( + cls_container, cls, pydantic_kwargs, exclude_modes + ) + if pydantic_kwargs is None and not _collectAllErrorsBool: + self.message_box(ret_msg) + return + + # For list element, this needs to be cleaned and parsed back as an array + input_channel_names, ret_msg = self.clean_string_for_list( + "input_channel_names", pydantic_kwargs["input_channel_names"] + ) + if input_channel_names is None and not _collectAllErrorsBool: + self.message_box(ret_msg) + return + pydantic_kwargs["input_channel_names"] = input_channel_names + + time_indices, ret_msg = self.clean_string_int_for_list( + "time_indices", pydantic_kwargs["time_indices"] + ) + if time_indices is None and not _collectAllErrorsBool: + self.message_box(ret_msg) + return + pydantic_kwargs["time_indices"] = time_indices + + if "birefringence" in pydantic_kwargs.keys(): + background_path, ret_msg = self.clean_path_string_when_empty( + "background_path", + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ], + ) + if background_path is None and not _collectAllErrorsBool: + self.message_box(ret_msg) + return + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ] = background_path + + # validate and return errors if None + pydantic_model, ret_msg = self.validate_pydantic_model( + cls, pydantic_kwargs + ) + if ret_msg == MSG_SUCCESS: + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_ok}" + ) + else: + _collapsibleBoxWidget.setNewName( + f"{c_mode_str} {_validate_alert}" + ) + _collectAllErrors[uuid_str]["errs"] = ret_msg + if pydantic_model is None and not _collectAllErrorsBool: + self.message_box(ret_msg) + return + + # generate a json from the instantiated model, update the json_display + # most of this will end up in a table as processing proceeds + json_txt, ret_msg = self.validate_and_return_json(pydantic_model) + if json_txt is None and not _collectAllErrorsBool: + self.message_box(ret_msg) + return + + # check if we collected any validation errors before continuing + for uu_key in _collectAllErrors.keys(): + if len(_collectAllErrors[uu_key]["errs"]) > 0: + self.model_highlighter(_collectAllErrors) + fmt_str = self.format_string_for_error_display(_collectAllErrors) + self.message_box(fmt_str) + return + + if validate_return_prev_model_json_txt: + return "-".join(selected_modes), json_txt + + # generate a time-stamp for our yaml files to avoid overwriting + # files generated at the same time will have an index suffix + now = datetime.datetime.now() + ms = now.strftime("%f")[:3] + unique_id = now.strftime("%Y_%m_%d_%H_%M_%S_") + ms + + if self.pollData: + data = open_ome_zarr(self.input_directory, mode="r") + if "CurrentDimensions" in data.zattrs.keys(): + my_dict_time_indices = data.zattrs["CurrentDimensions"]["time"] + # get the prev time_index, since this is current acq + if my_dict_time_indices - 1 > 1: + time_indices = list(range(0, my_dict_time_indices)) + else: + time_indices = 0 + + pollDataThread = threading.Thread( + target=self.add_poll_loop, + args=(self.input_directory, my_dict_time_indices - 1), + ) + pollDataThread.start() + + i = 0 + for item in self.pydantic_classes: + i += 1 + cls = item["class"] + cls_container = item["container"] + selected_modes = item["selected_modes"] + exclude_modes = item["exclude_modes"] + c_mode_str = item["c_mode_str"] + output_LineEdit = item["output_LineEdit"] + output_parent_dir = item["output_parent_dir"] + + full_out_path = os.path.join( + output_parent_dir, output_LineEdit.value + ) + + # gather input/out locations + input_dir = f"{item['input'].value}" + output_dir = full_out_path + + # build up the arguments for the pydantic model given the current container + if cls is None: + self.message_box("No model defined !") + return + + pydantic_kwargs = {} + pydantic_kwargs, ret_msg = self.get_and_validate_pydantic_args( + cls_container, cls, pydantic_kwargs, exclude_modes + ) + if pydantic_kwargs is None: + self.message_box(ret_msg) + return + + input_channel_names, ret_msg = self.clean_string_for_list( + "input_channel_names", pydantic_kwargs["input_channel_names"] + ) + if input_channel_names is None: + self.message_box(ret_msg) + return + pydantic_kwargs["input_channel_names"] = input_channel_names + + if not self.pollData: + time_indices, ret_msg = self.clean_string_int_for_list( + "time_indices", pydantic_kwargs["time_indices"] + ) + if time_indices is None: + self.message_box(ret_msg) + return + pydantic_kwargs["time_indices"] = time_indices + + time_indices, ret_msg = self.clean_string_int_for_list( + "time_indices", pydantic_kwargs["time_indices"] + ) + if time_indices is None: + self.message_box(ret_msg) + return + pydantic_kwargs["time_indices"] = time_indices + + if "birefringence" in pydantic_kwargs.keys(): + background_path, ret_msg = self.clean_path_string_when_empty( + "background_path", + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ], + ) + if background_path is None: + self.message_box(ret_msg) + return + pydantic_kwargs["birefringence"]["apply_inverse"][ + "background_path" + ] = background_path + + # validate and return errors if None + pydantic_model, ret_msg = self.validate_pydantic_model( + cls, pydantic_kwargs + ) + if pydantic_model is None: + self.message_box(ret_msg) + return + + # generate a json from the instantiated model, update the json_display + # most of this will end up in a table as processing proceeds + json_txt, ret_msg = self.validate_and_return_json(pydantic_model) + if json_txt is None: + self.message_box(ret_msg) + return + + # save the yaml files + # path is next to saved data location + save_config_path = str(Path(output_dir).parent.absolute()) + yml_file_name = "-and-".join(selected_modes) + yml_file = ( + yml_file_name + "-" + unique_id + "-{:02d}".format(i) + ".yml" + ) + config_path = os.path.join(save_config_path, yml_file) + utils.model_to_yaml(pydantic_model, config_path) + + # Input params for table entry + # Once ALL entries are entered we can deleted ALL model containers + # Table will need a low priority update thread to refresh status queried from CLI + # Table entries will be purged on completion when Result is returned OK + # Table entries will show an error msg when processing finishes but Result not OK + # Table fields ID / DateTime, Reconstruction type, Input Location, Output Location, Progress indicator, Stop button + + # addl_txt = "ID:" + unique_id + "-"+ str(i) + "\nInput:" + input_dir + "\nOutput:" + output_dir + # self.json_display.value = self.json_display.value + addl_txt + "\n" + json_txt+ "\n\n" + expID = "{tID}-{idx}".format(tID=unique_id, idx=i) + tableID = "{tName}: ({tID}-{idx})".format( + tName=c_mode_str, tID=unique_id, idx=i + ) + tableDescToolTip = "{tName}: ({tID}-{idx})".format( + tName=yml_file_name, tID=unique_id, idx=i + ) + + proc_params = {} + proc_params["exp_id"] = expID + proc_params["desc"] = tableDescToolTip + proc_params["config_path"] = str(Path(config_path).absolute()) + proc_params["input_path"] = str(Path(input_dir).absolute()) + proc_params["output_path"] = str(Path(output_dir).absolute()) + proc_params["output_path_parent"] = str( + Path(output_dir).parent.absolute() + ) + proc_params["show"] = item["show"].value + proc_params["rx"] = item["rx"].value + + self.addTableEntry(tableID, tableDescToolTip, proc_params) + + def add_poll_loop(self, input_data_path, last_time_index): + _pydantic_classes = self.pydantic_classes.copy() + required_order = ["time", "position", "z", "channel"] + _pollData = True + + tableEntryWorker = AddOTFTableEntryWorkerThread( + input_data_path, True, False + ) + tableEntryWorker.add_tableOTFentry_signal.connect( + self.add_remove_check_OTF_table_entry + ) + tableEntryWorker.start() + _breakFlag = False + while True: + time.sleep(10) + zattrs_data = None + try: + _stopCalled = self.add_remove_check_OTF_table_entry( + input_data_path, True, do_check=True + ) + if _stopCalled: + tableEntryWorker2 = AddOTFTableEntryWorkerThread( + input_data_path, False, False + ) + tableEntryWorker2.add_tableOTFentry_signal.connect( + self.add_remove_check_OTF_table_entry + ) + tableEntryWorker2.start() + + # let child threads finish their work before exiting the parent thread + while tableEntryWorker2.isRunning(): + time.sleep(1) + time.sleep(5) + break + try: + data = open_ome_zarr(input_data_path, mode="r") + zattrs_data = data.zattrs + except PermissionError: + pass # On-The-Fly dataset will throw Permission Denied when being written + # Maybe we can read the zaatrs directly in that case + # If this write/read is a constant issue then the zattrs 'CurrentDimensions' key + # should be updated less frequently, instead of current design of updating with + # each image + + if zattrs_data is None: + zattrs_data = self.load_zattrs_directly_as_dict( + input_data_path + ) + + if zattrs_data is not None: + if "CurrentDimensions" in zattrs_data.keys(): + my_dict1 = zattrs_data["CurrentDimensions"] + sorted_dict_acq = { + k: my_dict1[k] + for k in sorted( + my_dict1, key=lambda x: required_order.index(x) + ) + } + my_dict_time_indices_curr = zattrs_data[ + "CurrentDimensions" + ]["time"] + # print(sorted_dict_acq) + + if "FinalDimensions" in zattrs_data.keys(): + my_dict2 = zattrs_data["FinalDimensions"] + sorted_dict_final = { + k: my_dict2[k] + for k in sorted( + my_dict2, key=lambda x: required_order.index(x) + ) + } + # print(sorted_dict_final) + + # use the prev time_index, since this is current acq and we need for other dims to finish acq for this t + # or when all dims match - signifying acq finished + if ( + my_dict_time_indices_curr - 2 > last_time_index + or json.dumps(sorted_dict_acq) + == json.dumps(sorted_dict_final) + ): + + now = datetime.datetime.now() + ms = now.strftime("%f")[:3] + unique_id = now.strftime("%Y_%m_%d_%H_%M_%S_") + ms + + i = 0 + for item in _pydantic_classes: + i += 1 + cls = item["class"] + cls_container = item["container"] + selected_modes = item["selected_modes"] + exclude_modes = item["exclude_modes"] + c_mode_str = item["c_mode_str"] + output_LineEdit = item["output_LineEdit"] + output_parent_dir = item["output_parent_dir"] + + full_out_path = os.path.join( + output_parent_dir, output_LineEdit.value + ) + # gather input/out locations + input_dir = f"{item['input'].value}" + output_dir = full_out_path + + pydantic_kwargs = {} + pydantic_kwargs, ret_msg = ( + self.get_and_validate_pydantic_args( + cls_container, + cls, + pydantic_kwargs, + exclude_modes, + ) + ) + + input_channel_names, ret_msg = ( + self.clean_string_for_list( + "input_channel_names", + pydantic_kwargs["input_channel_names"], + ) + ) + pydantic_kwargs["input_channel_names"] = ( + input_channel_names + ) + + if _pollData: + if json.dumps(sorted_dict_acq) == json.dumps( + sorted_dict_final + ): + time_indices = list( + range( + last_time_index, + my_dict_time_indices_curr, + ) + ) + _breakFlag = True + else: + time_indices = list( + range( + last_time_index, + my_dict_time_indices_curr - 2, + ) + ) + pydantic_kwargs["time_indices"] = time_indices + + if "birefringence" in pydantic_kwargs.keys(): + background_path, ret_msg = ( + self.clean_path_string_when_empty( + "background_path", + pydantic_kwargs["birefringence"][ + "apply_inverse" + ]["background_path"], + ) + ) + + pydantic_kwargs["birefringence"][ + "apply_inverse" + ]["background_path"] = background_path + + # validate and return errors if None + pydantic_model, ret_msg = ( + self.validate_pydantic_model( + cls, pydantic_kwargs + ) + ) + + # save the yaml files + # path is next to saved data location + save_config_path = str( + Path(output_dir).parent.absolute() + ) + yml_file_name = "-and-".join(selected_modes) + yml_file = ( + yml_file_name + + "-" + + unique_id + + "-{:02d}".format(i) + + ".yml" + ) + config_path = os.path.join( + save_config_path, yml_file + ) + utils.model_to_yaml(pydantic_model, config_path) + + expID = "{tID}-{idx}".format(tID=unique_id, idx=i) + tableID = "{tName}: ({tID}-{idx})".format( + tName=c_mode_str, tID=unique_id, idx=i + ) + tableDescToolTip = "{tName}: ({tID}-{idx})".format( + tName=yml_file_name, tID=unique_id, idx=i + ) + + proc_params = {} + proc_params["exp_id"] = expID + proc_params["desc"] = tableDescToolTip + proc_params["config_path"] = str( + Path(config_path).absolute() + ) + proc_params["input_path"] = str( + Path(input_dir).absolute() + ) + proc_params["output_path"] = str( + Path(output_dir).absolute() + ) + proc_params["output_path_parent"] = str( + Path(output_dir).parent.absolute() + ) + proc_params["show"] = False + proc_params["rx"] = 1 + + tableEntryWorker1 = AddTableEntryWorkerThread( + tableID, tableDescToolTip, proc_params + ) + tableEntryWorker1.add_tableentry_signal.connect( + self.addTableEntry + ) + tableEntryWorker1.start() + + if ( + json.dumps(sorted_dict_acq) + == json.dumps(sorted_dict_final) + and _breakFlag + ): + + tableEntryWorker2 = AddOTFTableEntryWorkerThread( + input_data_path, False, False + ) + tableEntryWorker2.add_tableOTFentry_signal.connect( + self.add_remove_check_OTF_table_entry + ) + tableEntryWorker2.start() + + # let child threads finish their work before exiting the parent thread + while ( + tableEntryWorker1.isRunning() + or tableEntryWorker2.isRunning() + ): + time.sleep(1) + time.sleep(5) + break + + last_time_index = my_dict_time_indices_curr - 2 + except Exception as exc: + print(exc.args) + print( + "Exiting polling for dataset: {data_path}".format( + data_path=input_data_path + ) + ) + break + + def load_zattrs_directly_as_dict(self, zattrsFilePathDir): + try: + file_path = os.path.join(zattrsFilePathDir, ".zattrs") + f = open(file_path, "r") + txt = f.read() + f.close() + return json.loads(txt) + except Exception as exc: + print(exc.args) + return None + + # ======= These function do not implement validation + # They simply make the data from GUI translate to input types + # that the model expects: for eg. GUI txt field will output only str + # when the model needs integers + + # util function to parse list elements displayed as string + def remove_chars(self, string, chars_to_remove): + for char in chars_to_remove: + string = string.replace(char, "") + return string + + # util function to parse list elements displayed as string + def clean_string_for_list(self, field, string): + chars_to_remove = ["[", "]", "'", '"', " "] + if isinstance(string, str): + string = self.remove_chars(string, chars_to_remove) + if len(string) == 0: + return None, {"msg": field + " is invalid"} + if "," in string: + string = string.split(",") + return string, MSG_SUCCESS + if isinstance(string, str): + string = [string] + return string, MSG_SUCCESS + return string, MSG_SUCCESS + + # util function to parse list elements displayed as string, int, int as list of strings, int range + # [1,2,3], 4,5,6 , 5-95 + def clean_string_int_for_list(self, field, string): + chars_to_remove = ["[", "]", "'", '"', " "] + if Literal[string] == Literal["all"]: + return string, MSG_SUCCESS + if Literal[string] == Literal[""]: + return string, MSG_SUCCESS + if isinstance(string, str): + string = self.remove_chars(string, chars_to_remove) + if len(string) == 0: + return None, {"msg": field + " is invalid"} + if "-" in string: + string = string.split("-") + if len(string) == 2: + try: + x = int(string[0]) + if not isinstance(x, int): + raise + except Exception as exc: + return None, { + "msg": field + " first range element is not an integer" + } + try: + y = int(string[1]) + if not isinstance(y, int): + raise + except Exception as exc: + return None, { + "msg": field + + " second range element is not an integer" + } + if y > x: + return list(range(x, y + 1)), MSG_SUCCESS + else: + return None, { + "msg": field + + " second integer cannot be smaller than first" + } + else: + return None, {"msg": field + " is invalid"} + if "," in string: + string = string.split(",") + return string, MSG_SUCCESS + return string, MSG_SUCCESS + + # util function to set path to empty - by default empty path has a "." + def clean_path_string_when_empty(self, field, string): + if isinstance(string, Path) and string == Path(""): + string = "" + return string, MSG_SUCCESS + return string, MSG_SUCCESS + + # get the pydantic_kwargs and catches any errors in doing so + def get_and_validate_pydantic_args( + self, cls_container, cls, pydantic_kwargs, exclude_modes + ): + try: + try: + self.get_pydantic_kwargs( + cls_container, cls, pydantic_kwargs, exclude_modes + ) + return pydantic_kwargs, MSG_SUCCESS + except ValidationError as exc: + return None, exc.errors() + except Exception as exc: + return None, exc.args + + # validate the model and return errors for user actioning + def validate_pydantic_model(self, cls, pydantic_kwargs): + # instantiate the pydantic model form the kwargs we just pulled + try: + try: + pydantic_model = settings.ReconstructionSettings.parse_obj( + pydantic_kwargs + ) + return pydantic_model, MSG_SUCCESS + except ValidationError as exc: + return None, exc.errors() + except Exception as exc: + return None, exc.args + + # test to make sure model coverts to json which should ensure compatibility with yaml export + def validate_and_return_json(self, pydantic_model): + try: + json_format = pydantic_model.json(indent=4) + return json_format, MSG_SUCCESS + except Exception as exc: + return None, exc.args + + # gets a copy of the model from a yaml file + # will get all fields (even those that are optional and not in yaml) and default values + # model needs further parsing against yaml file for fields + def get_model_from_file(self, model_file_path): + pydantic_model = None + try: + try: + pydantic_model = utils.yaml_to_model( + model_file_path, settings.ReconstructionSettings + ) + except ValidationError as exc: + return pydantic_model, exc.errors() + if pydantic_model is None: + raise Exception("utils.yaml_to_model - returned a None model") + return pydantic_model, MSG_SUCCESS + except Exception as exc: + return None, exc.args + + # handles json with boolean properly and converts to lowercase string + # as required + def convert(self, obj): + if isinstance(obj, bool): + return str(obj).lower() + if isinstance(obj, (list, tuple)): + return [self.convert(item) for item in obj] + if isinstance(obj, dict): + return { + self.convert(key): self.convert(value) + for key, value in obj.items() + } + return obj + + # Main function to add pydantic model to container + # https://github.com/chrishavlin/miscellaneous_python/blob/main/src/pydantic_magicgui_roundtrip.py + # Has limitation and can cause breakages for unhandled or incorrectly handled types + # Cannot handle Union types/typing - for now being handled explicitly + # Ignoring NoneType since those should be Optional but maybe needs displaying ?? + # ToDo: Needs revisitation, Union check + # Displaying Union field "time_indices" as LineEdit component + # excludes handles fields that are not supposed to show up from __fields__ + # json_dict adds ability to provide new set of default values at time of container creation + + def add_pydantic_to_container( + self, + py_model: Union[BaseModel, ModelMetaclass], + container: widgets.Container, + excludes=[], + json_dict=None, + ): + # recursively traverse a pydantic model adding widgets to a container. When a nested + # pydantic model is encountered, add a new nested container + + for field, field_def in py_model.__fields__.items(): + if field_def is not None and field not in excludes: + def_val = field_def.default + ftype = field_def.type_ + toolTip = "" + try: + for f_val in field_def.class_validators.keys(): + toolTip = f"{toolTip}{f_val} " + except Exception as e: + pass + if isinstance(ftype, BaseModel) or isinstance( + ftype, ModelMetaclass + ): + json_val = None + if json_dict is not None: + json_val = json_dict[field] + # the field is a pydantic class, add a container for it and fill it + new_widget_cls = widgets.Container + new_widget = new_widget_cls(name=field_def.name) + new_widget.tooltip = toolTip + self.add_pydantic_to_container( + ftype, new_widget, excludes, json_val + ) + # ToDo: Implement Union check, tried: + # pydantic.typing.is_union(ftype) + # isinstance(ftype, types.UnionType) + # https://stackoverflow.com/questions/45957615/how-to-check-a-variable-against-union-type-during-runtime + elif isinstance(ftype, type(Union[NonNegativeInt, List, str])): + if ( + field == "background_path" + ): # field == "background_path": + new_widget_cls, ops = get_widget_class( + def_val, + Annotated[Path, {"mode": "d"}], + dict(name=field, value=def_val), + ) + new_widget = new_widget_cls(**ops) + toolTip = ( + "Select the folder containing background.zarr" + ) + elif field == "time_indices": # field == "time_indices": + new_widget_cls, ops = get_widget_class( + def_val, str, dict(name=field, value=def_val) + ) + new_widget = new_widget_cls(**ops) + else: # other Union cases + new_widget_cls, ops = get_widget_class( + def_val, str, dict(name=field, value=def_val) + ) + new_widget = new_widget_cls(**ops) + new_widget.tooltip = toolTip + if isinstance(new_widget, widgets.EmptyWidget): + warnings.warn( + message=f"magicgui could not identify a widget for {py_model}.{field}, which has type {ftype}" + ) + elif isinstance(def_val, float): + # parse the field, add appropriate widget + def_step_size = 0.001 + if field_def.name == "regularization_strength": + def_step_size = 0.00001 + if def_val > -1 and def_val < 1: + new_widget_cls, ops = get_widget_class( + None, + ftype, + dict( + name=field_def.name, + value=def_val, + step=float(def_step_size), + ), + ) + new_widget = new_widget_cls(**ops) + new_widget.tooltip = toolTip + else: + new_widget_cls, ops = get_widget_class( + None, + ftype, + dict(name=field_def.name, value=def_val), + ) + new_widget = new_widget_cls(**ops) + new_widget.tooltip = toolTip + if isinstance(new_widget, widgets.EmptyWidget): + warnings.warn( + message=f"magicgui could not identify a widget for {py_model}.{field}, which has type {ftype}" + ) + else: + # parse the field, add appropriate widget + new_widget_cls, ops = get_widget_class( + None, ftype, dict(name=field_def.name, value=def_val) + ) + new_widget = new_widget_cls(**ops) + if isinstance(new_widget, widgets.EmptyWidget): + warnings.warn( + message=f"magicgui could not identify a widget for {py_model}.{field}, which has type {ftype}" + ) + else: + new_widget.tooltip = toolTip + if json_dict is not None and ( + not isinstance(new_widget, widgets.Container) + or (isinstance(new_widget, widgets.FileEdit)) + ): + if field in json_dict.keys(): + if isinstance(new_widget, widgets.CheckBox): + new_widget.value = ( + True if json_dict[field] == "true" else False + ) + elif isinstance(new_widget, widgets.FileEdit): + if len(json_dict[field]) > 0: + extension = os.path.splitext(json_dict[field])[ + 1 + ] + if len(extension) > 0: + new_widget.value = Path( + json_dict[field] + ).parent.absolute() # CLI accepts BG folder not .zarr + else: + new_widget.value = Path(json_dict[field]) + else: + new_widget.value = json_dict[field] + container.append(new_widget) + + # refer - add_pydantic_to_container() for comments + def get_pydantic_kwargs( + self, + container: widgets.Container, + pydantic_model, + pydantic_kwargs: dict, + excludes=[], + json_dict=None, + ): + # given a container that was instantiated from a pydantic model, get the arguments + # needed to instantiate that pydantic model from the container. + + # traverse model fields, pull out values from container + for field, field_def in pydantic_model.__fields__.items(): + if field_def is not None and field not in excludes: + ftype = field_def.type_ + if isinstance(ftype, BaseModel) or isinstance( + ftype, ModelMetaclass + ): + # go deeper + pydantic_kwargs[field] = ( + {} + ) # new dictionary for the new nest level + # any pydantic class will be a container, so pull that out to pass + # to the recursive call + sub_container = getattr(container, field_def.name) + self.get_pydantic_kwargs( + sub_container, + ftype, + pydantic_kwargs[field], + excludes, + json_dict, + ) + else: + # not a pydantic class, just pull the field value from the container + if hasattr(container, field_def.name): + value = getattr(container, field_def.name).value + pydantic_kwargs[field] = value + + # copied from main_widget + # file open/select dialog + def open_file_dialog(self, default_path, type, filter="All Files (*)"): + if type == "dir": + return self.open_dialog( + "select a directory", str(default_path), type, filter + ) + elif type == "file": + return self.open_dialog( + "select a file", str(default_path), type, filter + ) + elif type == "files": + return self.open_dialog( + "select file(s)", str(default_path), type, filter + ) + elif type == "save": + return self.open_dialog( + "save a file", str(default_path), type, filter + ) + else: + return self.open_dialog( + "select a directory", str(default_path), type, filter + ) + + def open_dialog(self, title, ref, type, filter="All Files (*)"): + """ + opens pop-up dialogue for the user to choose a specific file or directory. + + Parameters + ---------- + title: (str) message to display at the top of the pop up + ref: (str) reference path to start the search at + type: (str) type of file the user is choosing (dir, file, or save) + + Returns + ------- + + """ + + options = QFileDialog.DontUseNativeDialog + if type == "dir": + path = QFileDialog.getExistingDirectory( + None, title, ref, options=options + ) + elif type == "file": + path = QFileDialog.getOpenFileName( + None, title, ref, filter=filter, options=options + )[0] + elif type == "files": + path = QFileDialog.getOpenFileNames( + None, title, ref, filter=filter, options=options + )[0] + elif type == "save": + path = QFileDialog.getSaveFileName( + None, "Choose a save name", ref, filter=filter, options=options + )[0] + else: + raise ValueError("Did not understand file dialogue type") + + return path + + +class MyWorker: + + def __init__(self, formLayout, tab_recon: Ui_ReconTab_Form, parentForm): + super().__init__() + self.formLayout: QFormLayout = formLayout + self.tab_recon: Ui_ReconTab_Form = tab_recon + self.ui: QWidget = parentForm + self.max_cores = os.cpu_count() + # In the case of CLI, we just need to submit requests in a non-blocking way + self.threadPool = int(self.max_cores / 2) + self.results = {} + self.pool = None + self.futures = [] + # https://click.palletsprojects.com/en/stable/testing/ + # self.runner = CliRunner() + # jobs_mgmt.shared_var_jobs = self.JobsManager.shared_var_jobs + self.JobsMgmt = jobs_mgmt.JobsManagement() + self.useServer = True + self.serverRunning = True + self.server_socket = None + self.isInitialized = False + + def initialize(self): + if not self.isInitialized: + thread = threading.Thread(target=self.start_server) + thread.start() + self.workerThreadRowDeletion = RowDeletionWorkerThread( + self.formLayout + ) + self.workerThreadRowDeletion.removeRowSignal.connect( + self.tab_recon.remove_row + ) + self.workerThreadRowDeletion.start() + self.isInitialized = True + + def set_new_instances(self, formLayout, tab_recon, parentForm): + self.formLayout: QFormLayout = formLayout + self.tab_recon: Ui_ReconTab_Form = tab_recon + self.ui: QWidget = parentForm + self.workerThreadRowDeletion.set_new_instances(formLayout) + + def find_widget_row_in_layout(self, strID): + layout: QFormLayout = self.formLayout + for idx in range(0, layout.rowCount()): + widgetItem = layout.itemAt(idx) + name_widget = widgetItem.widget() + toolTip_string = str(name_widget.toolTip) + if strID in toolTip_string: + name_widget.setParent(None) + return idx + return -1 + + def start_server(self): + try: + if not self.useServer: + return + + self.server_socket = socket.socket( + socket.AF_INET, socket.SOCK_STREAM + ) + self.server_socket.bind(("localhost", jobs_mgmt.SERVER_PORT)) + self.server_socket.listen( + 50 + ) # become a server socket, maximum 50 connections + + while self.serverRunning: + client_socket, address = self.server_socket.accept() + if self.ui is not None and not self.ui.isVisible(): + break + try: + # dont block the server thread + thread = threading.Thread( + target=self.decode_client_data, + args=("", "", "", "", client_socket), + ) + thread.start() + except Exception as exc: + print(exc.args) + time.sleep(1) + + self.server_socket.close() + except Exception as exc: + if not self.serverRunning: + self.serverRunning = True + return # ignore - will cause an exception on napari close but that is fine and does the job + print(exc.args) + + def stop_server(self): + try: + if self.server_socket is not None: + self.serverRunning = False + self.server_socket.close() + except Exception as exc: + print(exc.args) + + def get_max_CPU_cores(self): + return self.max_cores + + def set_pool_threads(self, t): + if t > 0 and t < self.max_cores: + self.threadPool = t + + def start_pool(self): + if self.pool is None: + self.pool = concurrent.futures.ThreadPoolExecutor( + max_workers=self.threadPool + ) + + def shut_down_pool(self): + self.pool.shutdown(wait=True) + + # This method handles each client response thread. It parses the information received from the client + # and is responsible for parsing each well/pos Job if the case may be and starting individual update threads + # using the tableUpdateAndCleaupThread() method + # This is also handling an unused "CoNvErTeR" functioning that can be implemented on 3rd party apps + def decode_client_data(self, + expIdx="", + jobIdx="", + wellName="", + logs_folder_path="", + client_socket=None,): + + if client_socket is not None and expIdx == "" and jobIdx == "": + try: + buf = client_socket.recv(10240) + if len(buf) > 0: + if b"\n" in buf: + dataList = buf.split(b"\n") + else: + dataList = [buf] + for data in dataList: + if len(data) > 0: + decoded_string = data.decode() + if ( + "CoNvErTeR" in decoded_string + ): # this request came from an agnostic route - requires processing + json_str = str(decoded_string) + json_obj = json.loads(json_str) + converter_params = json_obj["CoNvErTeR"] + input_data = converter_params["input"] + output_data = converter_params["output"] + recon_params = converter_params["params"] + expID = recon_params["expID"] + mode = recon_params["mode"] + if "config_path" in recon_params.keys(): + config_path = recon_params["config_path"] + else: + config_path = "" + + proc_params = {} + proc_params["exp_id"] = expID + proc_params["desc"] = expID + proc_params["input_path"] = str(input_data) + proc_params["output_path"] = str(output_data) + proc_params["output_path_parent"] = str( + Path(output_data).parent.absolute() + ) + proc_params["show"] = False + proc_params["rx"] = 1 + + if config_path == "": + model = None + if ( + len(self.tab_recon.pydantic_classes) + > 0 + ): + for ( + item + ) in self.tab_recon.pydantic_classes: + if mode == item["selected_modes"]: + cls = item["class"] + cls_container = item[ + "container" + ] + exclude_modes = item[ + "exclude_modes" + ] + output_LineEdit = item[ + "output_LineEdit" + ] + output_parent_dir = item[ + "output_parent_dir" + ] + full_out_path = os.path.join( + output_parent_dir, + output_LineEdit.value, + ) + + # gather input/out locations + output_dir = full_out_path + if output_data == "": + output_data = output_dir + proc_params[ + "output_path" + ] = str(output_data) + + # build up the arguments for the pydantic model given the current container + if cls is None: + self.tab_recon.message_box( + "No model defined !" + ) + return + + pydantic_kwargs = {} + pydantic_kwargs, ret_msg = ( + self.tab_recon.get_and_validate_pydantic_args( + cls_container, + cls, + pydantic_kwargs, + exclude_modes, + ) + ) + if pydantic_kwargs is None: + self.tab_recon.message_box( + ret_msg + ) + return + + ( + input_channel_names, + ret_msg, + ) = self.tab_recon.clean_string_for_list( + "input_channel_names", + pydantic_kwargs[ + "input_channel_names" + ], + ) + if input_channel_names is None: + self.tab_recon.message_box( + ret_msg + ) + return + pydantic_kwargs[ + "input_channel_names" + ] = input_channel_names + + time_indices, ret_msg = ( + self.tab_recon.clean_string_int_for_list( + "time_indices", + pydantic_kwargs[ + "time_indices" + ], + ) + ) + if time_indices is None: + self.tab_recon.message_box( + ret_msg + ) + return + pydantic_kwargs[ + "time_indices" + ] = time_indices + + time_indices, ret_msg = ( + self.tab_recon.clean_string_int_for_list( + "time_indices", + pydantic_kwargs[ + "time_indices" + ], + ) + ) + if time_indices is None: + self.tab_recon.message_box( + ret_msg + ) + return + pydantic_kwargs[ + "time_indices" + ] = time_indices + + if ( + "birefringence" + in pydantic_kwargs.keys() + ): + ( + background_path, + ret_msg, + ) = self.tab_recon.clean_path_string_when_empty( + "background_path", + pydantic_kwargs[ + "birefringence" + ]["apply_inverse"][ + "background_path" + ], + ) + if background_path is None: + self.tab_recon.message_box( + ret_msg + ) + return + pydantic_kwargs[ + "birefringence" + ]["apply_inverse"][ + "background_path" + ] = background_path + + # validate and return errors if None + pydantic_model, ret_msg = ( + self.tab_recon.validate_pydantic_model( + cls, pydantic_kwargs + ) + ) + if pydantic_model is None: + self.tab_recon.message_box( + ret_msg + ) + return + model = pydantic_model + break + if model is None: + model, msg = self.tab_recon.build_model( + mode + ) + yaml_path = os.path.join( + str( + Path(output_data).parent.absolute() + ), + expID + ".yml", + ) + utils.model_to_yaml(model, yaml_path) + proc_params["config_path"] = str(yaml_path) + + tableEntryWorker = AddTableEntryWorkerThread( + expID, expID, proc_params + ) + tableEntryWorker.add_tableentry_signal.connect( + self.tab_recon.addTableEntry + ) + tableEntryWorker.start() + time.sleep(10) + return + else: + json_str = str(decoded_string) + json_obj = json.loads(json_str) + for k in json_obj: + expIdx = k + jobIdx = json_obj[k]["jID"] + wellName = json_obj[k]["pos"] + logs_folder_path = json_obj[k]["log"] + if ( + expIdx not in self.results.keys() + ): # this job came from agnostic CLI route - no processing + now = datetime.datetime.now() + ms = now.strftime("%f")[:3] + unique_id = ( + now.strftime("%Y_%m_%d_%H_%M_%S_") + ms + ) + expIdx = expIdx + "-" + unique_id + self.JobsMgmt.put_Job_in_list( + None, + expIdx, + str(jobIdx), + wellName, + mode="server", + ) + # print("Submitting Job: {job} expIdx: {expIdx}".format(job=jobIdx, expIdx=expIdx)) + thread = threading.Thread( + target=self.table_update_and_cleaup_thread, + args=( + expIdx, + jobIdx, + wellName, + logs_folder_path, + client_socket, + ), + ) + thread.start() + return + except Exception as exc: + print(exc.args) + + # the table update thread can be called from multiple points/threads + # on errors - table row item is updated but there is no row deletion + # on successful processing - the row item is expected to be deleted + # row is being deleted from a seperate thread for which we need to connect using signal + + # This is handling essentially each job thread. Points of entry are on a failed job submission + # which then calls this to update based on the expID (used for .yml naming). On successful job + # submissions jobID, the point of entry is via the socket connection the GUI is listening and + # then spawns a new thread to avoid blocking of other connections. + # If a job submission spawns more jobs then this also calls other methods via signal to create + # the required GUI components in the main thread. + # Once we have expID and jobID this thread periodically loops and updates each job status and/or + # the job error by reading the log files. Using certain keywords + # eg JOB_COMPLETION_STR = "Job completed successfully" we determine the progress. We also create + # a map for expID which might have multiple jobs to determine when a reconstruction is + # finished vs a single job finishing. + # The loop ends based on user, time-out, job(s) completion and errors and handles removal of + # processing GUI table items (on main thread). + # Based on the conditions the loop will end calling clientRelease() + def table_update_and_cleaup_thread( + self, + expIdx="", + jobIdx="", + wellName="", + logs_folder_path="", + client_socket=None, + ): + jobIdx = str(jobIdx) + + # ToDo: Another approach to this could be to implement a status thread on the client side + # Since the client is already running till the job is completed, the client could ping status + # at regular intervals and also provide results and exceptions we currently read from the file + # Currently we only send JobID/UniqueID pair from Client to Server. This would reduce multiple threads + # server side. + + if expIdx != "" and jobIdx != "": + # this request came from server listening so we wait for the Job to finish and update progress + if ( + expIdx not in self.results.keys() + ): + proc_params = {} + tableID = "{exp} - {job} ({pos})".format( + exp=expIdx, job=jobIdx, pos=wellName + ) + proc_params["exp_id"] = expIdx + proc_params["desc"] = tableID + proc_params["config_path"] = "" + proc_params["input_path"] = "" + proc_params["output_path"] = "" + proc_params["output_path_parent"] = "" + proc_params["show"] = False + proc_params["rx"] = 1 + + tableEntryWorker = AddTableEntryWorkerThread( + tableID, tableID, proc_params + ) + tableEntryWorker.add_tableentry_signal.connect( + self.tab_recon.addTableEntry + ) + tableEntryWorker.start() + + while expIdx not in self.results.keys(): + time.sleep(1) + + params = self.results[expIdx]["JobUNK"].copy() + params["status"] = STATUS_running_job + else: + params = self.results[expIdx]["JobUNK"].copy() + + if ( + jobIdx not in self.results[expIdx].keys() + and len(self.results[expIdx].keys()) == 1 + ): + # this is the first job + params["primary"] = True + self.results[expIdx][jobIdx] = params + elif ( + jobIdx not in self.results[expIdx].keys() + and len(self.results[expIdx].keys()) > 1 + ): + # this is a new job + # we need to create cancel and job status windows and add to parent container + params["primary"] = False + NEW_WIDGETS_QUEUE.append(expIdx + jobIdx) + parentLayout: QVBoxLayout = params["parent_layout"] + worker_thread = AddWidgetWorkerThread( + parentLayout, expIdx, jobIdx, params["desc"], wellName + ) + worker_thread.add_widget_signal.connect( + self.tab_recon.add_widget + ) + NEW_WIDGETS_QUEUE_THREADS.append(worker_thread) + + while len(NEW_WIDGETS_QUEUE_THREADS) > 0: + s_worker_thread = NEW_WIDGETS_QUEUE_THREADS.pop(0) + s_worker_thread.start() + time.sleep(1) + + # wait for new components reference + while expIdx + jobIdx in NEW_WIDGETS_QUEUE: + time.sleep(1) + + _cancelJobBtn = MULTI_JOBS_REFS[expIdx + jobIdx]["cancelBtn"] + _infoBox = MULTI_JOBS_REFS[expIdx + jobIdx]["infobox"] + params["table_entry_infoBox"] = _infoBox + params["cancelJobButton"] = _cancelJobBtn + + self.results[expIdx][jobIdx] = params + + _infoBox: ScrollableLabel = params["table_entry_infoBox"] + _cancelJobBtn: PushButton = params["cancelJobButton"] + + _txtForInfoBox = "Updating {id}-{pos}: Please wait... \nJobID assigned: {jID} ".format( + id=params["desc"], pos=wellName, jID=jobIdx + ) + try: + _cancelJobBtn.text = "Cancel Job {jID} ({posName})".format( + jID=jobIdx, posName=wellName + ) + _cancelJobBtn.enabled = True + _infoBox.setText(_txtForInfoBox) + except: + # deleted by user - no longer needs updating + params["status"] = STATUS_user_cleared_job + return + _tUpdateCount = 0 + _tUpdateCountTimeout = ( + jobs_mgmt.JOBS_TIMEOUT * 60 + ) # 5 mins - match executor time-out + _lastUpdate_jobTXT = "" + jobTXT = "" + # print("Updating Job: {job} expIdx: {expIdx}".format(job=jobIdx, expIdx=expIdx)) + while True: + time.sleep(1) # update every sec and exit on break + try: + if "cancel called" in _cancelJobBtn.text: + json_obj = { + "uID": expIdx, + "jID": jobIdx, + "command": "cancel", + } + json_str = json.dumps(json_obj) + "\n" + client_socket.send(json_str.encode()) + params["status"] = STATUS_user_cancelled_job + _infoBox.setText( + "User called for Cancel Job Request\n" + + "Please check terminal output for Job status..\n\n" + + jobTXT + ) + self.client_release( + expIdx, jobIdx, client_socket, params, reason=1 + ) + break # cancel called by user + if _infoBox == None: + params["status"] = STATUS_user_cleared_job + self.client_release( + expIdx, jobIdx, client_socket, params, reason=2 + ) + break # deleted by user - no longer needs updating + if _infoBox: + pass + except Exception as exc: + print(exc.args) + params["status"] = STATUS_user_cleared_job + self.client_release( + expIdx, jobIdx, client_socket, params, reason=3 + ) + break # deleted by user - no longer needs updating + if self.JobsMgmt.has_submitted_job( + expIdx, jobIdx, mode="server" + ): + if params["status"] in [STATUS_finished_job]: + self.client_release( + expIdx, jobIdx, client_socket, params, reason=4 + ) + break + elif params["status"] in [STATUS_errored_job]: + jobERR = self.JobsMgmt.check_for_jobID_File( + jobIdx, logs_folder_path, extension="err" + ) + _infoBox.setText( + jobIdx + "\n" + params["desc"] + "\n\n" + jobERR + ) + self.client_release( + expIdx, jobIdx, client_socket, params, reason=5 + ) + break + else: + jobTXT = self.JobsMgmt.check_for_jobID_File( + jobIdx, logs_folder_path, extension="out" + ) + try: + if jobTXT == "": # job file not created yet + # print(jobIdx + " not started yet") + time.sleep(2) + _tUpdateCount += 2 + if ( + _tUpdateCount > 10 + ): # if out file is empty for 10s, check the err file to update user + jobERR = self.JobsMgmt.check_for_jobID_File( + jobIdx, + logs_folder_path, + extension="err", + ) + if JOB_OOM_EVENT in jobERR: + params["status"] = STATUS_errored_job + _infoBox.setText( + jobERR + + "\n\n" + + jobTXT + ) + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=0, + ) + break + _infoBox.setText( + jobIdx + + "\n" + + params["desc"] + + "\n\n" + + jobERR + ) + if _tUpdateCount > _tUpdateCountTimeout: + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=0, + ) + break + elif params["status"] == STATUS_finished_job: + rowIdx = self.find_widget_row_in_layout(expIdx) + # check to ensure row deletion due to shrinking table + # if not deleted try to delete again + if rowIdx < 0: + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=6, + ) + break + else: + break + elif JOB_COMPLETION_STR in jobTXT: + params["status"] = STATUS_finished_job + _infoBox.setText(jobTXT) + # this is the only case where row deleting occurs + # we cant delete the row directly from this thread + # we will use the exp_id to identify and delete the row + # using pyqtSignal + # break - based on status + elif JOB_TRIGGERED_EXC in jobTXT: + params["status"] = STATUS_errored_job + jobERR = self.JobsMgmt.check_for_jobID_File( + jobIdx, logs_folder_path, extension="err" + ) + _infoBox.setText( + jobIdx + + "\n" + + params["desc"] + + "\n\n" + + jobTXT + + "\n\n" + + jobERR + ) + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=0, + ) + break + elif JOB_RUNNING_STR in jobTXT: + params["status"] = STATUS_running_job + _infoBox.setText(jobTXT) + _tUpdateCount += 1 + if _tUpdateCount > 60: + jobERR = self.JobsMgmt.check_for_jobID_File( + jobIdx, + logs_folder_path, + extension="err", + ) + if JOB_OOM_EVENT in jobERR: + params["status"] = STATUS_errored_job + _infoBox.setText( + jobERR + + "\n\n" + + jobTXT + ) + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=0, + ) + break + elif _lastUpdate_jobTXT != jobTXT: + # if there is an update reset counter + _tUpdateCount = 0 + _lastUpdate_jobTXT = jobTXT + else: + _infoBox.setText( + "Please check terminal output for Job status..\n\n" + + jobTXT + ) + if _tUpdateCount > _tUpdateCountTimeout: + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=0, + ) + break + else: + jobERR = self.JobsMgmt.check_for_jobID_File( + jobIdx, logs_folder_path, extension="err" + ) + _infoBox.setText( + jobIdx + + "\n" + + params["desc"] + + "\n\n" + + jobERR + ) + self.client_release( + expIdx, + jobIdx, + client_socket, + params, + reason=0, + ) + break + except Exception as exc: + print(exc.args) + else: + self.client_release( + expIdx, jobIdx, client_socket, params, reason=0 + ) + break + else: + # this would occur when an exception happens on the pool side before or during job submission + # we dont have a job ID and will update based on exp_ID/uID + # if job submission was not successful we can assume the client is not listening + # and does not require a clientRelease cmd + for uID in self.results.keys(): + params = self.results[uID]["JobUNK"] + if params["status"] in [STATUS_errored_pool]: + _infoBox = params["table_entry_infoBox"] + poolERR = params["error"] + _infoBox.setText(poolERR) + + def client_release(self, expIdx, jobIdx, client_socket, params, reason=0): + # only need to release client from primary job + # print("clientRelease Job: {job} expIdx: {expIdx} reason:{reason}".format(job=jobIdx, expIdx=expIdx, reason=reason)) + self.JobsMgmt.put_Job_completion_in_list(True, expIdx, jobIdx) + showData_thread = None + if params["primary"]: + if "show" in params: + if params["show"]: + # Read reconstruction data + showData_thread = ShowDataWorkerThread( + params["output_path"] + ) + showData_thread.show_data_signal.connect( + self.tab_recon.show_dataset + ) + showData_thread.start() + + # for multi-job expID we need to check completion for all of them + while not self.JobsMgmt.check_all_ExpJobs_completion(expIdx): + time.sleep(1) + + json_obj = { + "uID": expIdx, + "jID": jobIdx, + "command": "clientRelease", + } + json_str = json.dumps(json_obj) + "\n" + client_socket.send(json_str.encode()) + + if reason != 0: # remove processing entry when exiting without error + ROW_POP_QUEUE.append(expIdx) + # print("FINISHED") + + if self.pool is not None: + if self.pool._work_queue.qsize() == 0: + self.pool.shutdown() + self.pool = None + + if showData_thread is not None: + while showData_thread.isRunning(): + time.sleep(3) + + def run_in_pool(self, params): + if not self.isInitialized: + self.initialize() + + self.start_pool() + self.results[params["exp_id"]] = {} + self.results[params["exp_id"]]["JobUNK"] = params + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_running_pool + self.results[params["exp_id"]]["JobUNK"]["error"] = "" + + try: + # when a request on the listening port arrives with an empty path + # we can assume the processing was initiated outside this application + # we do not proceed with the processing and will display the results + if params["input_path"] != "": + f = self.pool.submit(self.run, params) + self.futures.append(f) + except Exception as exc: + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_errored_pool + self.results[params["exp_id"]]["JobUNK"]["error"] = str( + "\n".join(exc.args) + ) + self.table_update_and_cleaup_thread() + + def run_multi_in_pool(self, multi_params_as_list): + self.start_pool() + for params in multi_params_as_list: + self.results[params["exp_id"]] = {} + self.results[params["exp_id"]]["JobUNK"] = params + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_submitted_pool + self.results[params["exp_id"]]["JobUNK"]["error"] = "" + try: + self.pool.map(self.run, multi_params_as_list) + except Exception as exc: + for params in multi_params_as_list: + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_errored_pool + self.results[params["exp_id"]]["JobUNK"]["error"] = str( + "\n".join(exc.args) + ) + self.table_update_and_cleaup_thread() + + def get_results(self): + return self.results + + def get_result(self, exp_id): + return self.results[exp_id] + + def run(self, params): + # thread where work is passed to CLI which will handle the + # multi-processing aspects as Jobs + if params["exp_id"] not in self.results.keys(): + self.results[params["exp_id"]] = {} + self.results[params["exp_id"]]["JobUNK"] = params + self.results[params["exp_id"]]["JobUNK"]["error"] = "" + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_running_pool + + try: + # does need further threading ? probably not ! + thread = threading.Thread( + target=self.run_in_subprocess, args=(params,) + ) + thread.start() + + except Exception as exc: + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_errored_pool + self.results[params["exp_id"]]["JobUNK"]["error"] = str( + "\n".join(exc.args) + ) + self.table_update_and_cleaup_thread() + + def run_in_subprocess(self, params): + """function that initiates the processing on the CLI""" + try: + input_path = str(params["input_path"]) + config_path = str(params["config_path"]) + output_path = str(params["output_path"]) + uid = str(params["exp_id"]) + rx = str(params["rx"]) + mainfp = str(jobs_mgmt.FILE_PATH) + + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_submitted_job + + proc = subprocess.run( + [ + "python", + mainfp, + "reconstruct", + "-i", + input_path, + "-c", + config_path, + "-o", + output_path, + "-rx", + str(rx), + "-uid", + uid, + ] + ) + self.results[params["exp_id"]]["JobUNK"]["proc"] = proc + if proc.returncode != 0: + raise Exception( + "An error occurred in processing ! Check terminal output." + ) + + except Exception as exc: + self.results[params["exp_id"]]["JobUNK"][ + "status" + ] = STATUS_errored_pool + self.results[params["exp_id"]]["JobUNK"]["error"] = str( + "\n".join(exc.args) + ) + self.table_update_and_cleaup_thread() + + +class ShowDataWorkerThread(QThread): + """Worker thread for sending signal for adding component when request comes + from a different thread""" + + show_data_signal = pyqtSignal(str) + + def __init__(self, path): + super().__init__() + self.path = path + + def run(self): + # Emit the signal to add the widget to the main thread + self.show_data_signal.emit(self.path) + +class AddOTFTableEntryWorkerThread(QThread): + """Worker thread for sending signal for adding component when request comes + from a different thread""" + + add_tableOTFentry_signal = pyqtSignal(str, bool, bool) + + def __init__(self, OTF_dir_path, bool_msg, doCheck=False): + super().__init__() + self.OTF_dir_path = OTF_dir_path + self.bool_msg = bool_msg + self.doCheck = doCheck + + def run(self): + # Emit the signal to add the widget to the main thread + self.add_tableOTFentry_signal.emit( + self.OTF_dir_path, self.bool_msg, self.doCheck + ) + +class AddTableEntryWorkerThread(QThread): + """Worker thread for sending signal for adding component when request comes + from a different thread""" + + add_tableentry_signal = pyqtSignal(str, str, dict) + + def __init__(self, expID, desc, params): + super().__init__() + self.expID = expID + self.desc = desc + self.params = params + + def run(self): + # Emit the signal to add the widget to the main thread + self.add_tableentry_signal.emit(self.expID, self.desc, self.params) + +class AddWidgetWorkerThread(QThread): + """Worker thread for sending signal for adding component when request comes + from a different thread""" + + add_widget_signal = pyqtSignal(QVBoxLayout, str, str, str, str) + + def __init__(self, layout, expID, jID, desc, wellName): + super().__init__() + self.layout = layout + self.expID = expID + self.jID = jID + self.desc = desc + self.wellName = wellName + + def run(self): + # Emit the signal to add the widget to the main thread + self.add_widget_signal.emit( + self.layout, self.expID, self.jID, self.desc, self.wellName + ) + +class RowDeletionWorkerThread(QThread): + """Searches for a row based on its ID and then + emits a signal to QFormLayout on the main thread for deletion""" + + removeRowSignal = pyqtSignal(int, str) + + def __init__(self, formLayout): + super().__init__() + self.formLayout = formLayout + + def set_new_instances(self, formLayout): + self.formLayout: QFormLayout = formLayout + + # we might deal with race conditions with a shrinking table + # find out widget and return its index + def find_widget_row_in_layout(self, strID): + layout: QFormLayout = self.formLayout + for idx in range(0, layout.rowCount()): + widgetItem = layout.itemAt(idx) + if widgetItem is not None: + name_widget = widgetItem.widget() + toolTip_string = str(name_widget.toolTip) + if strID in toolTip_string: + name_widget.setParent(None) + return idx + return -1 + + def run(self): + while True: + if len(ROW_POP_QUEUE) > 0: + stringID = ROW_POP_QUEUE.pop(0) + # Emit the signal to remove the row + deleteRow = self.find_widget_row_in_layout(stringID) + if deleteRow > -1: + self.removeRowSignal.emit(int(deleteRow), str(stringID)) + time.sleep(1) + else: + time.sleep(5) + +class DropButton(QPushButton): + """A drag & drop PushButton to load model file(s)""" + + def __init__(self, text, parent=None, recon_tab: Ui_ReconTab_Form = None): + super().__init__(text, parent) + self.setAcceptDrops(True) + self.recon_tab = recon_tab + + def dragEnterEvent(self, event): + if event.mimeData().hasUrls(): + event.acceptProposedAction() + + def dropEvent(self, event): + files = [] + for url in event.mimeData().urls(): + filepath = url.toLocalFile() + files.append(filepath) + self.recon_tab.open_model_files(files) + +class DropWidget(QWidget): + """A drag & drop widget container to load model file(s)""" + + def __init__(self, recon_tab: Ui_ReconTab_Form = None): + super().__init__() + self.setAcceptDrops(True) + self.recon_tab = recon_tab + + def dragEnterEvent(self, event): + if event.mimeData().hasUrls(): + event.acceptProposedAction() + + def dropEvent(self, event): + files = [] + for url in event.mimeData().urls(): + filepath = url.toLocalFile() + files.append(filepath) + self.recon_tab.open_model_files(files) + +class ScrollableLabel(QScrollArea): + """A scrollable label widget used for Job entry""" + + def __init__(self, text, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.label = QLabel() + self.label.setWordWrap(True) + self.label.setText(text) + + layout = QVBoxLayout() + layout.setAlignment(Qt.AlignmentFlag.AlignTop) + layout.addWidget(self.label) + self.label.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding + ) + + container = QWidget() + container.setLayout(layout) + container.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding + ) + + self.setWidget(container) + self.setWidgetResizable(True) + self.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding + ) + self.setAlignment(Qt.AlignmentFlag.AlignTop) + + def setText(self, text): + self.label.setText(text) + +class MyWidget(QWidget): + resized = pyqtSignal() + + def __init__(self): + super().__init__() + + def resizeEvent(self, event): + self.resized.emit() + super().resizeEvent(event) + +class CollapsibleBox(QWidget): + """A collapsible widget""" + + def __init__(self, title="", parent=None, hasPydanticModel=False): + super(CollapsibleBox, self).__init__(parent) + + self.hasPydanticModel = hasPydanticModel + self.toggle_button = QToolButton( + text=title, checkable=True, checked=False + ) + self.toggle_button.setStyleSheet("QToolButton { border: none; }") + self.toggle_button.setToolButtonStyle( + QtCore.Qt.ToolButtonStyle.ToolButtonTextBesideIcon + ) + self.toggle_button.setArrowType(QtCore.Qt.ArrowType.RightArrow) + self.toggle_button.pressed.connect(self.on_pressed) + + self.toggle_animation = QtCore.QParallelAnimationGroup(self) + + self.content_area = QScrollArea(maximumHeight=0, minimumHeight=0) + self.content_area.setSizePolicy( + QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed + ) + self.content_area.setFrameShape(QFrame.Shape.NoFrame) + + lay = QVBoxLayout(self) + lay.setSpacing(0) + lay.setContentsMargins(0, 0, 0, 0) + lay.addWidget(self.toggle_button) + lay.addWidget(self.content_area) + + self.toggle_animation.addAnimation( + QtCore.QPropertyAnimation(self, b"minimumHeight") + ) + self.toggle_animation.addAnimation( + QtCore.QPropertyAnimation(self, b"maximumHeight") + ) + self.toggle_animation.addAnimation( + QtCore.QPropertyAnimation(self.content_area, b"maximumHeight") + ) + + def setNewName(self, name): + self.toggle_button.setText(name) + + # @QtCore.pyqtSlot() + def on_pressed(self): + checked = self.toggle_button.isChecked() + self.toggle_button.setArrowType( + QtCore.Qt.ArrowType.DownArrow + if not checked + else QtCore.Qt.ArrowType.RightArrow + ) + self.toggle_animation.setDirection( + QtCore.QAbstractAnimation.Direction.Forward + if not checked + else QtCore.QAbstractAnimation.Direction.Backward + ) + self.toggle_animation.start() + if checked and self.hasPydanticModel: + # do model verification on close + pass + + def setContentLayout(self, layout): + lay = self.content_area.layout() + del lay + self.content_area.setLayout(layout) + collapsed_height = ( + self.sizeHint().height() - self.content_area.maximumHeight() + ) + content_height = layout.sizeHint().height() + for i in range(self.toggle_animation.animationCount()): + animation = self.toggle_animation.animationAt(i) + animation.setDuration(500) + animation.setStartValue(collapsed_height) + animation.setEndValue(collapsed_height + content_height) + + content_animation = self.toggle_animation.animationAt( + self.toggle_animation.animationCount() - 1 + ) + content_animation.setDuration(500) + content_animation.setStartValue(0) + content_animation.setEndValue(content_height) + +# VScode debugging +if __name__ == "__main__": + import napari + + napari.Viewer() + napari.run() diff --git a/recOrder/scripts/simulate_zarr_acq.py b/recOrder/scripts/simulate_zarr_acq.py new file mode 100644 index 00000000..67b0fe09 --- /dev/null +++ b/recOrder/scripts/simulate_zarr_acq.py @@ -0,0 +1,171 @@ +from pathlib import Path +from iohub.convert import TIFFConverter +from iohub.ngff import open_ome_zarr +from recOrder.cli.utils import create_empty_hcs_zarr +from recOrder.cli import jobs_mgmt + +import time, threading, os, shutil, subprocess + +# This script is a demo .zarr acquisition simulation from an acquired .zarr store +# The script copies and writes additional metadata to .zattrs inserting two keys +# The two keys are "FinalDimensions" and "CurrentDimensions". +# The "FinalDimensions" key with (t,p,z,c) needs to be inserted when the dataset is created +# and then should be updated at close to ensure aborted acquisitions represent correct dimensions. +# The "CurrentDimensions" key should have the same (t,p,z,c) information and should be written out +# either with every new image, end of dimension OR at frequent intervals. +# Refer further notes below in the example regarding encountered issues. +# +# Refer to steps at the end of the file on steps to run this file + +#%% ############################################# +def convert_data(tif_path, latest_out_path, prefix="", data_type_str="ometiff"): + converter = TIFFConverter( + os.path.join(tif_path , prefix), + latest_out_path, + data_type=data_type_str, + grid_layout=False, + ) + converter.run() + +def run_convert(ome_tif_path): + out_path = os.path.join(Path(ome_tif_path).parent.absolute(), ("raw_" + Path(ome_tif_path).name + ".zarr")) + convert_data(ome_tif_path, out_path) + +#%% ############################################# + +def run_acq(input_path="", waitBetweenT=30): + + output_store_path = os.path.join(Path(input_path).parent.absolute(), ("acq_sim_" + Path(input_path).name)) + + if Path(output_store_path).exists(): + shutil.rmtree(output_store_path) + time.sleep(1) + + input_data = open_ome_zarr(input_path, mode="r") + channel_names = input_data.channel_names + + position_keys: list[tuple[str]] = [] + + for path, pos in input_data.positions(): + shape = pos["0"].shape + dtype = pos["0"].dtype + chunks = pos["0"].chunks + scale = (1, 1, 1, 1, 1) + position_keys.append(path.split("/")) + + create_empty_hcs_zarr( + output_store_path, + position_keys, + shape, + chunks, + scale, + channel_names, + dtype, + {}, + ) + output_dataset = open_ome_zarr(output_store_path, mode="r+") + + if "Summary" in input_data.zattrs.keys(): + output_dataset.zattrs["Summary"] = input_data.zattrs["Summary"] + + output_dataset.zattrs.update({"FinalDimensions": { + "channel": shape[1], + "position": len(position_keys), + "time": shape[0], + "z": shape[2] + } + }) + + total_time = shape[0] + total_pos = len(position_keys) + total_z = shape[2] + total_c = shape[1] + for t in range(total_time): + for p in range(total_pos): + for z in range(total_z): + for c in range(total_c): + position_key_string = "/".join(position_keys[p]) + img_src = input_data[position_key_string][0][t, c, z] + + img_data = output_dataset[position_key_string][0] + img_data[t, c, z] = img_src + + # Note: On-The-Fly dataset reconstruction will throw Permission Denied when being written + # Maybe we can read the zaatrs directly in that case as a file which is less blocking + # If this write/read is a constant issue then the zattrs 'CurrentDimensions' key + # should be updated less frequently, instead of current design of updating with + # each image + output_dataset.zattrs.update({"CurrentDimensions": { + "channel": total_c, + "position": p+1, + "time": t+1, + "z": z+1 + } + }) + + required_order = ['time', 'position', 'z', 'channel'] + my_dict = output_dataset.zattrs["CurrentDimensions"] + sorted_dict_acq = {k: my_dict[k] for k in sorted(my_dict, key=lambda x: required_order.index(x))} + print("Writer thread - Acquisition Dim:", sorted_dict_acq) + + + # reconThread = threading.Thread(target=doReconstruct, args=(output_store_path, t)) + # reconThread.start() + + time.sleep(waitBetweenT) # sleep after every t + + output_dataset.close + +def do_reconstruct(input_path, time_point): + + config_path = os.path.join(Path(input_path).parent.absolute(), "Bire-"+str(time_point)+".yml") + output_path = os.path.join(Path(input_path).parent.absolute(), "Recon_"+Path(input_path).name) + mainfp = str(jobs_mgmt.FILE_PATH) + + print("Processing {input} time_point={tp}".format(input=input_path, tp=time_point)) + + try: + proc = subprocess.run( + [ + "python", + mainfp, + "reconstruct", + "-i", + input_path, + "-c", + config_path, + "-o", + output_path, + "-rx", + str(20) + ] + ) + if proc.returncode != 0: + raise Exception("An error occurred in processing ! Check terminal output.") + except Exception as exc: + print(exc.args) + +#%% ############################################# +def run_acquire(input_path, waitBetweenT): + runThread1Acq = threading.Thread(target=run_acq, args=(input_path, waitBetweenT)) + runThread1Acq.start() + +#%% ############################################# +# Step 1: +# Convert an existing ome-tif recOrder acquisition, preferably with all dims (t, p, z, c) +# This will convert an existing ome-tif to a .zarr storage + +# ome_tif_path = "/ome-zarr_data/recOrderAcq/test/snap_6D_ometiff_1" +# runConvert(ome_tif_path) + +#%% ############################################# +# Step 2: +# run the test to simulate Acquiring a recOrder .zarr store + +input_path = "/ome-zarr_data/recOrderAcq/test/raw_snap_6D_ometiff_1.zarr" +waitBetweenT = 60 +run_acquire(input_path, waitBetweenT) + + + + diff --git a/recOrder/tests/util_tests/test_overlays.py b/recOrder/tests/util_tests/test_overlays.py index d483a26e..8354bd69 100644 --- a/recOrder/tests/util_tests/test_overlays.py +++ b/recOrder/tests/util_tests/test_overlays.py @@ -20,7 +20,7 @@ def _birefringence(draw): dtype, shape=shape, elements=st.floats( - min_value=0, + min_value=1.0000000168623835e-16, max_value=50, exclude_min=True, width=bit_width, @@ -40,6 +40,7 @@ def _birefringence(draw): ), ) ) + return retardance, orientation diff --git a/setup.cfg b/setup.cfg index f36021c7..9ca61f10 100644 --- a/setup.cfg +++ b/setup.cfg @@ -44,7 +44,7 @@ install_requires = wget>=3.2 psutil submitit - pydantic>=1.10.17 + pydantic==1.10.19 [options.extras_require] dev =