From 3ca0a572183eccbb468fcd3c49995713e943bf20 Mon Sep 17 00:00:00 2001 From: Elizabeth Esswein Date: Thu, 11 Apr 2024 16:54:04 -0400 Subject: [PATCH] add docs for new examples --- doc/bpmn/application.rst | 5 +- doc/bpmn/custom_task_spec.rst | 88 +++++----- doc/bpmn/imports.rst | 2 +- doc/bpmn/script_engine.rst | 313 ++++++++++++++++++++++++++++++++-- doc/bpmn/workflows.rst | 86 ++++++---- doc/conf.py | 2 +- 6 files changed, 401 insertions(+), 95 deletions(-) diff --git a/doc/bpmn/application.rst b/doc/bpmn/application.rst index eebf1724..851421e3 100644 --- a/doc/bpmn/application.rst +++ b/doc/bpmn/application.rst @@ -123,7 +123,7 @@ We initialize a scripting enviroment: .. code-block:: python script_env = TaskDataEnvironment({'datetime': datetime }) - >script_engine = PythonScriptEngine(script_env) + script_engine = PythonScriptEngine(script_env) The :code:`PythonScriptEngine` handles execution of script tasks and evaluation of gateway and DMN conditions. We'll create the script engine based on it; execution and evaluation will occur in the context of this enviroment. @@ -159,5 +159,6 @@ We then create our BPMN engine (:app:`engine/engine.py`) using each of these com .. code-block:: python from ..engine import BpmnEngine - engine = BpmnEngine(parser, serializer, handlers, script_env) + engine = BpmnEngine(parser, serializer, script_env) +The handlers are automatically passed to the curses UI by the main runner. diff --git a/doc/bpmn/custom_task_spec.rst b/doc/bpmn/custom_task_spec.rst index 69319e8d..5cf72859 100644 --- a/doc/bpmn/custom_task_spec.rst +++ b/doc/bpmn/custom_task_spec.rst @@ -7,7 +7,27 @@ starts with a timer, the timer waits until the event occurs; this might be days Of course, we can always check that it's waiting and serialize the workflow until that time. However, we might decide that we don't want SpiffWorkflow to manage this at all. We could do this with a custom task spec. -First we'll create a new class +The code for this example can be found in :app:`misc/custom_start_event.py`. + +There is a very simple diagram :bpmn:`timer_start.bpmn` with the process ID `timer_start` with a Start Event +with a Duration Timer of one day that can be used to illustrate how the custom task works. If you run this workflow +with any of the configurations provided, you'll see a `WAITING` Start Event; if you use the parser and serializer we +just created, you'll be propmted to complete the User Task immediately. + +To run this model with the custom spec: + +.. code:: python + + ./runner.py -e spiff_example.misc.custom_start_event add -p timer_start -b bpmn/tutorial/timer_start.bpmn + ./runner.py -e spiff_example.misc.custom_start_event + +First we'll create a new class. + +.. note:: + + It might be better have the class's init method take both the event definition to use *and* the timer event + definition. Unfortunately, our parser is not terribly intuitive or easily extendable, so I've done it this + way to make this a little easier to follow. .. code:: python @@ -27,7 +47,7 @@ First we'll create a new class super().__init__(wf_spec, bpmn_id, event_definition, **kwargs) self.timer_event = None -When we create our custom event, we'll check to see if we're creating a Start Event with a :code:`TimerEventDefinition`, and +When we create our custom spec, we'll check to see if we're creating a Start Event with a :code:`TimerEventDefinition`, and if so, we'll replace it with a :code:`NoneEventDefinition`. There are three different types of Timer Events, so we'll use the base class for all three to make sure we account for TimeDate, Duration, and Cycle. @@ -47,57 +67,44 @@ Whenever we create a custom task spec, we'll need to create a converter for it s .. code:: python from SpiffWorkflow.bpmn.serializer import BpmnWorkflowSerializer - from SpiffWorkflow.bpmn.serializer.default import EventConverter from SpiffWorkflow.spiff.serializer.task_spec import SpiffBpmnTaskConverter from SpiffWorkflow.spiff.serializer import DEFAULT_CONFIG class CustomStartEventConverter(SpiffBpmnTaskConverter): - def __init__(self, registry): - super().__init__(CustomStartEvent, registry) - def to_dict(self, spec): dct = super().to_dict(spec) - if spec.timer_event is not None: - dct['event_definition'] = self.registry.convert(spec.timer_event) - else: - dct['event_definition'] = self.registry.convert(spec.event_definition) + dct['event_definition'] = self.registry.convert(spec.event_definition) + dct['timer_event'] = self.registry.convert(spec.timer_event) return dct - - DEFAULT_CONFIG['task_specs'].remove(StartEventConverter) - DEFAULT_CONFIG['task_specs'].append(CustomStartEventConverter) - registry = BpmnWorkflowSerializer.configure(DEFAULT_CONFIG) - serializer = BpmnWorkflowSerializer(registry) + def from_dict(self, dct): + spec = super().from_dict(dct) + spec.event_definition = self.registry.restore(dct['event_definition']) + spec.timer_event = self.registry.restore(dct['timer_event']) + return spec Our converter will inherit from the :code:`SpiffBpmnTaskConverter`, since that's our base generic BPMN mixin class. +The parent converter will handle serializing the standard BPMN attributes, as well as attributes added in the +:code:`spiff` package. There is a similar base converter in the :code:`bpmn.serializer.helpers` package. -The :code:`SpiffBpmnTaskConverter` itself inherits from -:code:`SpiffWorkflow.bpmn.serializer.helpers.task_spec.BpmnTaskSpecConverter`. which provides some helper methods for -extracting standard attributes from tasks; the :code:`SpiffBpmnTaskConverter` does the same for extensions from the -:code:`spiff` package. - -We don't have to do much -- all we do is replace the event definition with the original. The timer event will be -moved when the task is restored, and this saves us from having to write a custom parser. - -.. note:: - - It might be better have the class's init method take both the event definition to use *and* the timer event - definition. Unfortunately, our parser is not terribly intuitive or easily extendable, so I've done it this - way to make this a little easier to follow. +A converter needs to implement two methods: :code:`to_dict` (which takes a task spec and returns a JSON-serializable +dictionary of its attributes) and :code:`from_dict` (which takes the dictionary and returns a task spec of the +appropriate type. We call the base method to do most of the work, and then update the result to reflect the changes +we made, in this case ensuring that both event definitions are handled. The parent converter also provides :code:`convert` +and :code:`restore` methods to serialize any object that Spiff's serializer knows how to handle. For more details about +the serializer, see :doc:`serialization`. -When we create our serializer, we need to tell it about this task. We'll remove the converter for the standard Start -Event and add the one we created to the configuration. We then get a registry of classes that the serializer knows -about that includes our custom spec, as well as all the other specs and initialize the serializer with it. +When we create our serializer, we need to tell it about this task. The serializer is initialized with a mapping +of object class to converter class, so we just need to add an entry for this mapping. -.. note:: +.. code:: python - The reason there are two steps involved (regurning a registry and *then* passing it to the serializer) rather - that using the configuration directly is to allow further customization of the :code:`registry`. Workflows - can contain arbtrary data, we want to provide developers the ability to serialization code for any object. See - :ref:`serializing_custom_objects` for more information about how this works. + SPIFF_CONFIG[CustomStartEvent] = CustomStartEventConverter + registry = FileSerializer.configure(SPIFF_CONFIG) + serializer = FileSerializer(dirname, registry=registry) -Finally, we have to update our parser: +We also have to tell the parser to use our class instead of the standard class. .. code:: python @@ -114,10 +121,3 @@ will use. This is a bit unintuitive, but that's how it works. Fortunately, we were able to reuse an existing Task Spec parser, which simplifies the process quite a bit. -Having created a parser and serializer, we could create a configuration module and instantiate an engine with these -components. - -There is a very simple diagram :bpmn:`timer_start.bpmn` with the process ID `timer_start` with a Start Event -with a Duration Timer of one day that can be used to illustrate how the custom task works. If you run this workflow -with any of the configurations provided, you'll see a `WAITING` Start Event; if you use the parser and serializer we -just created, you'll be propmted to complete the User Task immediately. diff --git a/doc/bpmn/imports.rst b/doc/bpmn/imports.rst index d5fa4ca9..fdce2d4a 100644 --- a/doc/bpmn/imports.rst +++ b/doc/bpmn/imports.rst @@ -124,7 +124,7 @@ Examples -------- - :doc:`serialization` -- :doc:`custom_task_specs` +- :doc:`custom_task_spec` DMN === diff --git a/doc/bpmn/script_engine.rst b/doc/bpmn/script_engine.rst index 23f7c078..e87ad5c0 100644 --- a/doc/bpmn/script_engine.rst +++ b/doc/bpmn/script_engine.rst @@ -16,15 +16,14 @@ Restricting the Script Environment The following example replaces the default global enviroment with the one provided by `RestrictedPython `_ -We've modified our engine configuration to use the restricted environment in :app:`spiff/restricted.py` +We've modified our engine configuration to use the restricted environment in :app:`misc/restricted.py` .. code:: python from RestrictedPython import safe_globals - from SpiffWorkflow.bpmn.PythonScriptEngineEnvironment import TaskDataEnvironment + from SpiffWorkflow.bpmn.script_engine import TaskDataEnvironment - restricted_env = TaskDataEnvironment(safe_globals) - restricted_script_engine = PythonScriptEngine(environment=restricted_env) + script_env = TaskDataEnvironment(safe_globals) We've also included a dangerous process in :bpmn:`dangerous.bpmn` @@ -48,7 +47,8 @@ You'll get an error, because imports have been restricted. .. note:: Since we used exactly the same parser and serializer, we can simply switch back and forth between these - two script engines (that is the only difference between the two configurations). + two script engines (that is the only difference between the two configurations). If you've made any + serializer or parser customizations, this is not likely to be possible. Making Custom Classes and Functions Available ============================================= @@ -74,7 +74,8 @@ We are not going to actually include a database or API and write code for connec it, but since we only have 7 products we can model our database with a simple dictionary lookup and just return the same static info for shipping for the purposes of the tutorial. -We'll customize our scripting environment in :app:`spiff/custom_object.py`: +We'll create these "services" along with serialization methods in :app:`spiff/product_info.py` (see +:ref:`serializing_custom_objects` for more information about serialization): .. code:: python @@ -97,12 +98,15 @@ We'll customize our scripting environment in :app:`spiff/custom_object.py`: def lookup_shipping_cost(shipping_method): return 25.00 if shipping_method == 'Overnight' else 5.00 +We'll then make the "services" available to our scripting environment. + +.. code:: python + script_env = TaskDataEnvironment({ 'datetime': datetime, 'lookup_product_info': lookup_product_info, 'lookup_shipping_cost': lookup_shipping_cost, }) - script_engine = PythonScriptEngine(script_env) .. note:: @@ -131,19 +135,17 @@ engine, but through a different method, with the help of some custom extensions The advantage of a Service Task is that it is a bit more transparent what is happening (at least at a conceptual level) than function calls embedded in a Script Task. -We implement the :code:`PythonScriptEngine.call_service` method in :app:`spiff/service_task.py`: +We customize a scripting environment to implement the :code:`call_service` method in :app:`spiff/service_task.py`: .. code:: python - service_task_env = TaskDataEnvironment({ - 'product_info_from_dict': product_info_from_dict, - 'datetime': datetime, - }) - - class ServiceTaskEngine(PythonScriptEngine): + class ServiceTaskEnvironment(TaskDataEnvironment): def __init__(self): - super().__init__(environment=service_task_env) + super().__init__(({ + 'datetime': datetime, + 'product_info_from_dict': product_info_from_dict, + }) def call_service(self, operation_name, operation_params, task_data): if operation_name == 'lookup_product_info': @@ -155,7 +157,7 @@ We implement the :code:`PythonScriptEngine.call_service` method in :app:`spiff/s raise Exception("Unknown Service!") return json.dumps(result) - service_task_engine = ServiceTaskEngine() + script_env = ServiceTaskEnvironment() Instead of adding our custom functions to the environment, we'll override :code:`call_service` and call them directly according to the `operation_name` that was given. The :code:`spiff` Service Task also evaluates the parameters @@ -207,3 +209,282 @@ To run this workflow: ./runner.py -e spiff_example.spiff.service_task add -p order_product \ -b bpmn/tutorial/{top_level_service_task,call_activity_service_task}.bpmn + +Generating BPMN Events Inside the Scripting Environment +======================================================= + +When calling external services, there is course a possibility that a failure could occur, and you might want to be +able to pass that information back into the workflow and define how to handle it there. + +In this example, we'll have a service that displays the contents of a file and handles :code:`FileNotFoundError`. We'll +use the diagram :bpmn:`event_handler.bpmn` and the code in :app:`misc/event_handler.py`. + +As in the previous section, we'll use the :code:`ServiceTask` from the :code:`spiff` package, but we'll need to extend +it. This is where we'll handle errors. + +We define the following error in our XML (we can do this in our +`modeler `_): + +.. code:: xml + + + + filename + + + +In our scripting enviroment, we'll implement a "read_file" service. This will of course raise an exception if the +requested file is missing, but will otherwise return the contents. + +.. code:: python + + class ServiceTaskEnvironment(TaskDataEnvironment): + + def call_service(self, operation_name, operation_params, context): + if operation_name == 'read_file': + return open(operation_params['filename']).read() + else: + raise ValueError('Unknown Service') + +And here is the code for our task spec. + +.. code:: python + + class EventHandlingServiceTask(ServiceTask): + + def _execute(self, my_task): + script_engine = my_task.workflow.script_engine + # The param also has a type, but I don't need it + params = dict((name, script_engine.evaluate(my_task, p['value'])) for name, p in self.operation_params.items()) + try: + result = script_engine.call_service(self.operation_name, params, my_task.data) + my_task.data[self._result_variable(my_task)] = result + return True + except FileNotFoundError as exc: + event_definition = ErrorEventDefinition('file_not_found', code='1') + event = BpmnEvent(event_definition, payload=params['filename']) + my_task.workflow.top_workflow.catch(event) + return False + except Exception as exc: + raise WorkflowTaskException('Service Task execution error', task=my_task, exception=exc) + +If the file was read successfully, we'll set a variable in our task data with the result (the name of the result variable +is optionally specified in the XML and the :code:`_result_variable` method returns either the specified name or a calculated +name otherwise). We return :code:`True` because the operation was a success (see :doc:`../concepts` for more information +about state transitions). + +We'll catch :code:`FileNotFoundError` and construct an event to send it back to the workflow. What we generate needs +to match what's in the XML. + +.. note:: + + If you are building an application, you'll probably need to manage known exceptions in a way that is accesible to + both your modeler and your execution engine, but here we'll just show how to build the event so that it can be + caught in the diagram in the task spec. + +We have to construct an :code:`EventDefinition` that matches what will be generated from the parsed XML (see +:ref:`events` for a general overview of BPMN event handling). SpiffWorkflow uses the :code:`EventDefinition` to +determine whether a particular task handles an event. The BPMN spec allows certain events, including Error Events, to +optionally contain a payload. In this case, we'll set the payload to be the name of the missing file, which can then be +displayed to the user. + +We pass our contructed event to the workflow's :code:`catch` method, which will check to see if there are any tasks +waiting for this event. Each task has a reference to its workflow, but this task occurs in a subworkflow. Event +handling is done at the outermost level so we'll use :code:`my_task.workflow.top_workflow` to get access to the top +level. + +We'll return :code:`False`, since the operation was not a success; this will prevent task execution on that branch, +but will not halt overall workflow execution. An unhandled exception, as in the last case, will cause the entire +workflow to halt. + +.. note:: + + The task spec is not the only place error handling could be implemented. I kind of like this approach, as the task + spec defines the behavior for a particular type of task and this is part of that. It would also be possible to extend + the :code:`PythonScriptEngine` to handle the errors. The main reason I didn't do that here is that this example + application can be made less complex if only a scripting environment is supplied. The script engine, unlike the script + enviroment, has access to the task and workflow (via the task), and the same thing could be done there as well. + + +To load this example: + +.. code:: console + + ./runner.py -e spiff_example.misc.event_handler add -p read_file -b bpmn/tutorial/event_handler.bpmn + ./runner.py -e spiff_example.misc.event_handler + +.. note:: + + When running this example, it will probably useful to change the task filter so that all tasks are visible. Set + the state to `ANY_MASK` to see all tasks. + +Threaded Service Task +===================== + +Suppose that we have some potentially time-consuming tasks and we want to execute them in threads so that we aren't +blocking the entire workflow from executing while it runs (the default behavior). In this section, we'll customize a +scripting enviroment that contains a thread pool. + +First let's write a simple "service" that simply waits. + +.. code:: python + + def wait(seconds, job_id): + time.sleep(seconds) + return f'{job_id} slept {seconds} seconds' + +We'll make this "service" available in our environment: + +.. code:: python + + class ServiceTaskEnvironment(TaskDataEnvironment): + + def __init__(self): + super().__init__() + self.pool = ThreadPoolExecutor(max_workers=10) + self.futures = {} + + def call_service(self, operation_name, operation_params, context): + if operation_name == 'wait': + seconds = randrange(1, 30) + return self.pool.submit(wait, seconds, operation_params['job_id']) + else: + raise ValueError("Unknown Service!") + +Our service will return a future, and we'll manage these futures via a custom task spec. The parent class is the +Service Task of the :code:`spiff` package, which provides us with an :code:`operation_name` and +:code:`operation_parameters`. Each parameter has a name and a type, but I don't need the type, so I'll just get the +values. The values are expressions that we evaluate against the task data. We'll map the future to the task in the script +environment. + +.. code:: python + + class ThreadedServiceTask(ServiceTask): + + def _execute(self, my_task): + script_engine = my_task.workflow.script_engine + params = dict((name, script_engine.evaluate(my_task, p['value'])) for name, p in self.operation_params.items()) + try: + future = script_engine.call_service(self.operation_name, params, my_task.data) + script_engine.environment.futures[future] = my_task + except Exception as exc: + raise WorkflowTaskException('Service Task execution error', task=my_task, exception=exc) + +Since our :code:`_execute` method returns :code:`None`, our task will transition to a :code:`STARTED` state (see +:doc:`../concepts` for more information about state transitions). SpiffWorkflow will ignore this task from this point on; +this means our engine has to take over. + +We'll extend the :code:`Instance` class (defined in :app:`engine/instance.py`) to also check these futures when waiting +tasks are refreshed. As jobs complete, we'll call :code:`task.complete` to mark the task :code:`COMPLETED`. The workflow +will then be able to continue down that branch. + +.. code:: python + + class ThreadInstance(Instance): + + def update_completed_futures(self): + futures = self.workflow.script_engine.environment.futures + finished = [f for f in futures if f.done()] + for future in finished: + task = futures.pop(future) + result = future.result() + task.data[task.task_spec._result_variable(task)] = result + task.complete() + + def run_ready_events(self): + self.update_completed_futures() + super().run_ready_events() + +.. note:: + + In a real application, you would probably want a separate service keeping track of the jobs and checking the + futures rather than polling in the engine, but that can't be easily set up in this example application. + +To load and run thie example (as in the previous example, it is probably a good idea to update the task filter to show all +tasks with the `ANY_MASK` state. + +.. code:: console + + ./runner.py -e spiff_example.misc.threaded_service_task add -p threaded_service -b bpmn/tutorial/threaded_service_task.bpmn + ./runner.py -e spiff_example.misc.threaded_service_task + + +Executing Scripts in a Subprocess +================================= + +In this section, we'll show how you might execute your scripts outside of the workflow execution context. This ia a little +contrived and there are undoubtedly better ways to accomplish it, but this has the advantage of being very simple. + +First we'll create an executable that can take a JSON-serialized context and an expression to evaluate or a script to execute +(see :app:`spiff_example/spiff/subprocess_engine.py`). This little program simply replicates the behavior of the default +script engine. + +We import our custom function here rather than our workflow's engine. We'll also import the registry used by our serializer; +we need to be able to generate JSON when we write our output, so we might as well reuse what we have. + +.. code:: python + + from .custom_exec import ( + lookup_product_info, + lookup_shipping_cost, + registry, + ) + +This emulates how the default script engine handles evaluation and execution. + +.. code:: python + + local_ctx = registry.restore(json.loads(args.context)) + global_ctx = globals() + global_ctx.update(local_ctx) + if args.external is not None: + global_ctx.update(registry.restore(json.loads(args.external))) + if args.method == 'eval': + result = eval(args.expr, global_ctx, local_ctx) + elif args.method == 'exec': + exec(args.script, global_ctx, local_ctx) + result = local_ctx + print(json.dumps(registry.convert(result))) + +Then we'll tell our scripting enviroment to use the script rather directly invoke :code:`eval` and :code:`exec`. + +.. code:: python + + class SubprocessScriptingEnvironment(BasePythonScriptEngineEnvironment): + + def __init__(self, executable, serializer, **kwargs): + super().__init__(**kwargs) + self.executable = executable + self.serializer = serializer + + def evaluate(self, expression, context, external_context=None): + output = self.run(['eval', expression], context, external_context) + return self.parse_output(output) + + def execute(self, script, context, external_context=None): + output = self.run(['exec', script], context, external_context) + DeepMerge.merge(context, self.parse_output(output)) + return True + + def run(self, args, context, external_context): + cmd = ['python', '-m', self.executable] + args + ['-c', json.dumps(registry.convert(context))] + if external_context is not None: + cmd.extend(['-x', json.dumps(registry.convert(external_context))]) + return subprocess.run(cmd, capture_output=True) + + def parse_output(self, output): + if output.stderr: + raise Exception(output.stderr.decode('utf-8')) + return registry.restore(json.loads(output.stdout)) + + executable = 'spiff_example.spiff.subprocess_engine' + script_env = SubprocessScriptingEnvironment(executable, serializer) + +To load this example: + +.. code:: console + + ./runner.py -e spiff_example.spiff.custom_exec add -p order_product \ + -b bpmn/tutorial/{top_level_script,call_activity_script}.bpmn + ./runner.py -e spiff_example.spiff.custom_exec + diff --git a/doc/bpmn/workflows.rst b/doc/bpmn/workflows.rst index 18285793..7d5c1ae4 100644 --- a/doc/bpmn/workflows.rst +++ b/doc/bpmn/workflows.rst @@ -9,7 +9,7 @@ From the :code:`start_workflow` method of our BPMN engine (:app:`engine/engine.p spec, sp_specs = self.serializer.get_workflow_spec(spec_id) wf = BpmnWorkflow(spec, sp_specs, script_engine=self._script_engine) wf_id = self.serializer.create_workflow(wf, spec_id) - return wf_id + return Instance(wf_id, workflow) We'll use our serializer to recreate the workflow spec based on the id. As discussed in :ref:`parsing_subprocesses`, a process has a top level specification and dictionary of process id -> spec containing any other processes referenced @@ -28,6 +28,8 @@ In the simplest case, running a workflow involves implementing the following loo until there are no tasks left to complete. +We'll refer to code from :app:`engine/instance.py` in the next few sections. + Here are our engine methods: .. code-block:: python @@ -54,7 +56,12 @@ it catches whatever event it is waiting on, at which point it becomes :code:`REA :code:`workflow.refresh_waiting_tasks` method iterates over all the waiting tasks and changes the state to :code:`READY` if the conditions for doing so have been met. -We'll cover using the `workflow.get_next_task` method and handling Human tasks later in this document. +We'll cover using the :code:`workflow.get_next_task` method and handling Human tasks later in this document. + +.. note:: + + The :code:`Instance` class also has a task filter attribute and a list of filtered tasks, which are used + by the UI, so we update that in these methods as weill. Tasks ===== @@ -72,6 +79,14 @@ don't have to pay much attention to most of them. A few of the important ones a * `description`: we use this attribute to provide a description of the BPMN task type * `manual`: :code:`True` if human input is required to complete tasks associated with this Task Spec +The :code:`manual` attribute is particularly important, because SpiffWorkflow does not include built-in +handling of these tasks so you'll need to implement this as part of your application. We'll go over how this is +handled in this application in the next section. + +.. note:: + + NoneTasks (BPMN tasks with no more specific type assigned) are treated as Manual Tasks by SpiffWorkflow. + BPMN Task Specs have the following additional attributes. * `bpmn_id`: the ID of the BPMN Task (this will be :code:`None` if the task is not visible on the diagram) @@ -80,16 +95,17 @@ BPMN Task Specs have the following additional attributes. * `documentation`: the contents of the BPMN `documentation` element for the Task In the example application, we use these :code:`bpmn_name` (or :code:`name` when a :code:`bpmn_name` isn't specified), -and :code:`lane` to display information about the tasks in a workflow (see the :code:`update_task_tree` method of -:app:`curses_ui/workflow_view.py`). +and :code:`lane` to display information about the tasks in a workflow: -The :code:`manual` attribute is particularly important, because SpiffWorkflow does not include built-in -handling of these tasks so you'll need to implement this as part of your application. We'll go over how this is -handled in this application in the next section. - -.. note:: +.. code:: python - NoneTasks (BPMN tasks with no more specific type assigned) are treated as Manual Tasks by SpiffWorkflow. + def get_task_display_info(self, task): + return { + 'depth': task.depth, + 'state': TaskState.get_name(task.state), + 'name': task.task_spec.bpmn_name or task.task_spec.name, + 'lane': task.task_spec.lane, + } Instantiated Tasks ------------------ @@ -125,14 +141,14 @@ Our User and Manual Task handlers render the instructions (this code is from :ap from jinja2 import Template - def get_instructions(self): - instructions = f'{self.task.task_spec.bpmn_name}\n\n' + def set_instructions(self, task): + user_input = self.ui._states['user_input'] + user_input.instructions = f'{self.task.task_spec.bpmn_name}\n\n' text = self.task.task_spec.extensions.get('instructionsForEndUser') if text is not None: template = Template(text) - instructions += template.render(self.task.data) - instructions += '\n\n' - return instructions + user_input.instructions += template.render(self.task.data) + user_input.instructions += '\n\n' We're not going to attempt to handle Markdown in a curses UI, so we'll assume we just have text. However, we do want to be able to incorporate data specific to the workflow in information that is presented to a user; this is @@ -149,20 +165,19 @@ We won't go into the details about how the form screen works, as it's specific t library itself; instead we'll skip to the code that runs the task after it has been presented to the user; any application needs to do this. -Simply running the task is sufficient for Manual Tasks. +When our form is submitted, we ask our :code:`Instance` to update the task data (if applicable, as in the case of a +form) and run the task. .. code-block:: python - def on_complete(self, results): - self.task.run() - -However, we need to extend this method for User Tasks, to incorporate the user-submitted data into the workflow: - -.. code-block:: python - - def on_complete(self, results): - self.task.set_data(**results) - super().on_complete(results) + def run_task(self, task, data=None): + if data is not None: + task.set_data(**data) + task.run() + if not self.step: + self.run_until_user_input_required() + else: + self.update_task_filter() Here we are setting a key for each field in the form. Other possible options here are to set one key that contains all of the form data, or map the schema to Python class and use that in lieu of a dictionary. It's up to you to @@ -174,7 +189,7 @@ simple example next. We'll refer to the process modeled in :bpmn:`task_types.bpmn` contains a simple form which asks a user to input a product and quantity as well a manual task presenting the order information at the end of the process (the form is -defined in :form:`select_product_and_quantity.json` +defined in :form:`select_product_and_quantity.json`) After the user submits the form, we'll collect the results in the following dictionary: @@ -227,13 +242,15 @@ Filtering Tasks SpiffWorkflow has two methods for retrieving tasks: -- :code:`workflow.get_tasks`: returns a list of matching tasks, or an empty list +- :code:`workflow.get_tasks`: returns an iterator over matching tasks, or an empty list - :code:`workflow.get_next_task`: returns the first matching task, or None -Both of these methods use the same helper classes and take the same arguments -- the only difference is the return -type. +Both of these methods use the same helper classes and take the same arguments -- the only difference is the return type. -These methods return a :code:`TaskIterator`, which in turn uses a :code:`TaskFilter` to determine what tasks match. +These methods create a :code:`TaskIterator`. The an optional first argument of a task to begin the iteration from (if it is +not provided, iteration begins are the root). This is useful if you know you want to continue executing a workflow from a +particular place. The remainder of the arguments are keyword arguments that are passed directly into a :code:`TaskFilter`, +which will determine which tasks match. Tasks can be filtered by: @@ -260,6 +277,9 @@ correspond to which states). from SpiffWorkflow.util.task import TaskState +We can use this object to translate an integer to a human-readable name using :code:`TaskState.get_name(task.state)`; +there is also a corresponding :code:`TaskState.get_value` method that goes from name to integer. + Ready Human Tasks ^^^^^^^^^^^^^^^^^ @@ -336,6 +356,8 @@ Additionally, the class has a few extra attributes to make it more convenient to These methods exist on the top level workflow as well, and return :code:`None`. +.. _events: + Events ====== @@ -373,3 +395,5 @@ of event and might be used to help determine this. Once you have determined which workflow should receive the event, you can pass it to :code:`workflow.catch` to handle it. +In :doc:`script_engine`, there is an example of how to create an event and pass it back to a workflow when executing +a Service Task; this shows how you might construct a :code:`BpmnEvent` to pass to :code:`workflow.catch`. diff --git a/doc/conf.py b/doc/conf.py index 3221acf1..e93b96c1 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -37,7 +37,7 @@ ] # Configure links to example repo -branch = 'improvement/better-interactive-workflow-runner' +branch = 'improvement/tweaks-and-extra-examples' extlinks = { 'example': (f'https://github.com/sartography/spiff-example-cli/tree/{branch}/' + '%s', '%s'), 'bpmn': (f'https://github.com/sartography/spiff-example-cli/tree/{branch}/bpmn/tutorial/' + '%s', '%s'),