Skip to content

Executor

Base executor for running DAG tasks.

DAG executor with context-based worker pool for async task execution.

Executor

Async DAG executor with context-based worker pool.

Source code in shutils/dag/executor.py
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
class Executor:
    """Async DAG executor with context-based worker pool."""

    def __init__(
        self,
        dag: DAG,
        runtime: Runtime | None = None,
        config: ExecutorConfig | None = None,
    ):
        """Initialize the executor.

        Args:
            dag: The DAG to execute.
            runtime: Optional runtime for tracking active contexts.
            config: Executor configuration.
        """
        if config is None:
            config = ExecutorConfig()
        if runtime is None:
            self.runtime = Runtime()
        else:
            self.runtime = runtime
        self.dag = dag
        self._config = config
        if self._config.process_pool_worker_num != 0:
            self._process_pool = ProcessPoolExecutor(max_workers=self._config.process_pool_worker_num)
        else:
            self._process_pool = None
        if self._config.thread_pool_worker_num != 0:
            self.__thread_pool = ThreadPoolExecutor(max_workers=self._config.thread_pool_worker_num)
        else:
            self.__thread_pool = None

        self._worker_idle: dict[int, bool] = {idx: False for idx in range(self._config.context_worker_num)}
        self._context_queue = ContextQueue()

    async def run(self, input_context: Context | list[Context] | None = None) -> list[OutputContext]:
        """Execute the DAG with given input contexts and return outputs.

        Args:
            input_context: Single context, list of contexts, or None for a default context.

        Returns:
            List of output contexts from the DAG sink node.
        """
        if input_context is None:
            input_context = [Context(self.runtime)]
        elif isinstance(input_context, Context):
            input_context = [input_context]
        elif isinstance(input_context, list):
            pass
        else:
            raise ValueError("context must be a Context or a list of Context")
        logger.info(f"[Executor.run]: length: {len(input_context)}, input: {input_context}")

        for context in input_context:
            await context.async_context.complete(self.dag.in_task)
            await self._context_queue.async_queue.put(context)
        logger.info("[Executor.run]: put input context to async queue done")

        env = Environment(self.runtime, self._process_pool, self.dag)
        worker_tasks = [
            asyncio.create_task(self._worker_loop(idx, env))
            for idx in range(self._config.context_worker_num)
        ]
        output = await asyncio.gather(*worker_tasks)
        output_context = []
        for output_context_list in output:
            output_context.extend(output_context_list)

        for task in self.dag.tasks.values():
            if isinstance(task, ShutdownTask):
                task.shutdown()
            elif isinstance(task, AsyncShutdownTask):
                await task.shutdown()
        return output_context

    async def _run_task(
        self, idx: int, sub_idx: int, task: TaskBase, in_context: Context, env: Environment
    ) -> tuple[list[Context], bool]:
        if task in in_context.awake_time:
            if in_context.awake_time[task] > time.time():
                logger.debug(f"{in_context} cannot awake now")
                await self._context_queue.async_queue.put(in_context)
                return [], False
            logger.debug(f"{in_context} can awake now")
            in_context.awake_time.pop(task)

        context_list = []
        try:
            logger.debug(f"[Worker{idx}-{sub_idx}]: {in_context} begin running {task}")
            if isinstance(task, ForegroundTask):
                if isinstance(task, SyncTask):
                    context_list = task(in_context, env)
                else:
                    raise ValueError(f"[Worker{idx}-{sub_idx}]: Unknown task type in forground mode: {type(task)}")
            else:
                if isinstance(task, AsyncTask):
                    context_list = await task(in_context, env)
                elif isinstance(task, SyncTask):
                    if self.__thread_pool:
                        loop = asyncio.get_running_loop()
                        context_list = await loop.run_in_executor(self.__thread_pool, task, in_context, env)
                    else:
                        context_list = await asyncio.to_thread(task, in_context, env)
                else:
                    raise ValueError(f"[Worker{idx}-{sub_idx}]: Unknown task type: {type(task)}")
            logger.debug(f"[Worker{idx}-{sub_idx}]: {in_context} running {task} done, out[{context_list}]")
        except Exception as e:
            if task.config.retry_times > 0 and await in_context.async_task_state.retry(task) <= task.config.retry_times:
                if task.config.retry_interval != 0:
                    if callable(task.config.retry_interval):
                        interval = task.config.retry_interval(in_context)
                    else:
                        interval = task.config.retry_interval
                    in_context._awake_interval(interval, task)
                logger.warning(
                    f"[Worker{idx}-{sub_idx}]: {in_context} running {task} failed, "
                    f"retrying... error: {type(e).__name__}: {e}"
                )
                traceback.print_exc()
                await self._context_queue.async_queue.put(in_context)
                return [], False
            logger.error(f"[Worker{idx}-{sub_idx}]: {in_context} running {task} failed, error: {type(e).__name__}: {e}")
            traceback.print_exc()
            in_context.error_info = ErrorInfo(has_error=True, exception=e, error_node=task.id)
            await in_context.async_context.destory()

        for idx, out_context in enumerate(context_list):
            if not isinstance(out_context, LoopContext) and not isinstance(out_context, RateLimitContext):
                await out_context.async_context.complete(task)
            if isinstance(out_context, RateLimitContext):
                context_list[idx] = out_context.context

        return context_list, True

    @asynccontextmanager
    async def check_get_context(
        self, timeout: float | None = None, use_counter: bool = True
    ) -> AsyncGenerator[Context]:
        """Context manager that gets a context from the queue with optional timeout.

        Args:
            timeout: Seconds to wait for a context.
            use_counter: Whether to check the runtime counter for StopContext.
        """
        counter = self.runtime.counter
        if not use_counter or counter > 0:
            async with asyncio.timeout(timeout):
                async with self._context_queue.async_queue._get_with_context() as context:
                    pass
            yield context
        else:
            yield StopContext()

    @staticmethod
    async def _async_limit(semaphore: asyncio.Semaphore, coro: Coroutine):
        """Run a coroutine under a semaphore for concurrency limiting."""
        async with semaphore:
            return await coro

    async def _worker_loop(self, idx: int, env: Environment) -> list[OutputContext]:
        output_context: list[OutputContext] = []
        worker_storage = {}
        idle_count = 0
        while True:
            try:
                async with self.check_get_context(self._config.context_queue_timeout) as in_context:
                    self._worker_idle[idx] = False
                    logger.debug(f"[Worker{idx}]: get context[{in_context}] from async queue done")
                    if isinstance(in_context, StopContext):
                        logger.info(f"[Worker{idx}]: get StopContext, break")
                        break
                    if in_context.is_destory():
                        logger.error(f"[Worker{idx}]: Context {in_context} is destory, skip")
                        continue

                    avaliable_tasks = await in_context.async_task_state.avaliable_task()
                    if not avaliable_tasks:
                        logger.error(f"[Worker{idx}]: Context {in_context} do not have avaliable task, will destory")
                        await in_context.async_context.destory()
                        continue

                tasks = [
                    self._run_task(idx, sub_idx, task, in_context, env) for sub_idx, task in enumerate(avaliable_tasks)
                ]
                if self._config.task_worker_num > 0:
                    semaphore = asyncio.Semaphore(self._config.task_worker_num)
                    tasks = [self._async_limit(semaphore, task) for task in tasks]
                token = _worker_context_var.set(worker_storage)
                task_results = await asyncio.gather(*tasks)
                _worker_context_var.reset(token)

                context_list = []
                avaliable_task_set = set(avaliable_tasks)
                for task, (task_context_list, succeed) in zip(avaliable_tasks, task_results, strict=True):
                    context_list.extend(task_context_list)
                    if not succeed:
                        avaliable_task_set.discard(task)
                avaliable_tasks = list(avaliable_task_set)
                if len(tasks) > 1:
                    # need deduplicate
                    context_list = list(set(context_list))
                filted_context_list = []
                for context in context_list:
                    if context.freezing:
                        logger.debug(f"{context} is freezing, skip")
                        continue
                    filted_context_list.append(context)
                context_list = filted_context_list
                logger.debug(
                    f"[Worker{idx}]: {in_context} run tasks done, "
                    f"out[{context_list}], tasks[{avaliable_tasks}]"
                )
                if avaliable_tasks:
                    await self._context_postprocess(in_context, context_list, avaliable_tasks)
                for out_context in context_list:
                    if isinstance(out_context, OutputContext):
                        output_context.append(out_context)
                    else:
                        if out_context == in_context:
                            await self._context_queue.async_queue.put(out_context, ContextPriority.LIFO)
                        elif isinstance(out_context, LoopContext):
                            await self._context_queue.async_queue.put(out_context, ContextPriority.FIFO_LOW)
                        else:
                            await self._context_queue.async_queue.put(out_context, ContextPriority.FIFO_HIGH)
            except TimeoutError:
                logger.debug(f"[Worker{idx}]: context queue get timeout, skip")
                self._worker_idle[idx] = True
                if all(self._worker_idle.values()):
                    idle_count += 1
                    if idle_count >= self._config.worker_idle_times:
                        logger.error(
                            f"[Worker{idx}]: all workers idle for {idle_count} times, break! "
                            "Please check whether there are deadlock tasks in the DAG!"
                        )
                        break
                continue
        return output_context

    async def _context_postprocess(
        self, in_context: Iterable[Context] | Context, output_context: Iterable[Context], running_tasks: list[TaskBase]
    ):
        if not self._config.enable_context_gc and not self._config.enable_context_bypass:
            return

        if len(running_tasks) == 1 and running_tasks[0] == self.dag.out_task:
            return

        if isinstance(in_context, Context):
            in_context = [in_context]
        in_context_set = set([context for context in in_context if not context.is_destory()])
        out_context_set = set([context for context in output_context if not context.is_destory()])

        if self._config.enable_context_gc and in_context_set:
            # collect all output contexts
            for context in output_context:
                # collect parent contexts
                parent_context = await context.async_context.parent_context()
                while parent_context is not None and not parent_context.context.is_destory():
                    out_context_set.add(parent_context.context)
                    parent_context = await parent_context.parent_context()
                # collect child contexts
                child_wrapper = context.async_context
                while await child_wrapper.child_context_num():
                    async for child_ctx in child_wrapper.iter_child_context():
                        if not child_ctx.context.is_destory():
                            out_context_set.add(child_ctx.context)

            # contexts that are in input context but not in output context should be destoryed
            destory_context_set = in_context_set - out_context_set
            for context in destory_context_set:
                logger.debug(f"[ContextGC]: {context} is not in output context, destory")
                await context.async_context.destory()

        if self._config.enable_context_bypass and out_context_set:
            # collect all input contexts
            for context in in_context:
                # collect parent contexts
                parent_context = await context.async_context.parent_context()
                while parent_context is not None and not parent_context.context.is_destory():
                    in_context_set.add(parent_context.context)
                    parent_context = await parent_context.parent_context()
                # collect child contexts
                child_iter = context.async_context
                while await child_iter.child_context_num():
                    async for child_ctx in child_iter.iter_child_context():
                        if not child_ctx.context.is_destory():
                            in_context_set.add(child_ctx.context)

            # contexts that are in output context but not in input context should mask it's bypass tasks
            new_context_set = out_context_set - in_context_set
            new_context_set = {item for item in new_context_set if not isinstance(item, LoopContext)}
            running_task_set = set(running_tasks)
            for context in new_context_set:
                current_running_tasks = context._completed_tasks & running_task_set
                bypass_tasks = self.dag._get_bypass_tasks(current_running_tasks)
                logger.debug(f"[ContextBypass]: {context} is not in input context, mask bypass tasks: {bypass_tasks}")
                for bypass_task in bypass_tasks:
                    await context.async_context.complete(bypass_task)

__init__(dag, runtime=None, config=None)

Initialize the executor.

Parameters:

Name Type Description Default
dag DAG

The DAG to execute.

required
runtime Runtime | None

Optional runtime for tracking active contexts.

None
config ExecutorConfig | None

Executor configuration.

None
Source code in shutils/dag/executor.py
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
def __init__(
    self,
    dag: DAG,
    runtime: Runtime | None = None,
    config: ExecutorConfig | None = None,
):
    """Initialize the executor.

    Args:
        dag: The DAG to execute.
        runtime: Optional runtime for tracking active contexts.
        config: Executor configuration.
    """
    if config is None:
        config = ExecutorConfig()
    if runtime is None:
        self.runtime = Runtime()
    else:
        self.runtime = runtime
    self.dag = dag
    self._config = config
    if self._config.process_pool_worker_num != 0:
        self._process_pool = ProcessPoolExecutor(max_workers=self._config.process_pool_worker_num)
    else:
        self._process_pool = None
    if self._config.thread_pool_worker_num != 0:
        self.__thread_pool = ThreadPoolExecutor(max_workers=self._config.thread_pool_worker_num)
    else:
        self.__thread_pool = None

    self._worker_idle: dict[int, bool] = {idx: False for idx in range(self._config.context_worker_num)}
    self._context_queue = ContextQueue()

check_get_context(timeout=None, use_counter=True) async

Context manager that gets a context from the queue with optional timeout.

Parameters:

Name Type Description Default
timeout float | None

Seconds to wait for a context.

None
use_counter bool

Whether to check the runtime counter for StopContext.

True
Source code in shutils/dag/executor.py
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
@asynccontextmanager
async def check_get_context(
    self, timeout: float | None = None, use_counter: bool = True
) -> AsyncGenerator[Context]:
    """Context manager that gets a context from the queue with optional timeout.

    Args:
        timeout: Seconds to wait for a context.
        use_counter: Whether to check the runtime counter for StopContext.
    """
    counter = self.runtime.counter
    if not use_counter or counter > 0:
        async with asyncio.timeout(timeout):
            async with self._context_queue.async_queue._get_with_context() as context:
                pass
        yield context
    else:
        yield StopContext()

run(input_context=None) async

Execute the DAG with given input contexts and return outputs.

Parameters:

Name Type Description Default
input_context Context | list[Context] | None

Single context, list of contexts, or None for a default context.

None

Returns:

Type Description
list[OutputContext]

List of output contexts from the DAG sink node.

Source code in shutils/dag/executor.py
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
async def run(self, input_context: Context | list[Context] | None = None) -> list[OutputContext]:
    """Execute the DAG with given input contexts and return outputs.

    Args:
        input_context: Single context, list of contexts, or None for a default context.

    Returns:
        List of output contexts from the DAG sink node.
    """
    if input_context is None:
        input_context = [Context(self.runtime)]
    elif isinstance(input_context, Context):
        input_context = [input_context]
    elif isinstance(input_context, list):
        pass
    else:
        raise ValueError("context must be a Context or a list of Context")
    logger.info(f"[Executor.run]: length: {len(input_context)}, input: {input_context}")

    for context in input_context:
        await context.async_context.complete(self.dag.in_task)
        await self._context_queue.async_queue.put(context)
    logger.info("[Executor.run]: put input context to async queue done")

    env = Environment(self.runtime, self._process_pool, self.dag)
    worker_tasks = [
        asyncio.create_task(self._worker_loop(idx, env))
        for idx in range(self._config.context_worker_num)
    ]
    output = await asyncio.gather(*worker_tasks)
    output_context = []
    for output_context_list in output:
        output_context.extend(output_context_list)

    for task in self.dag.tasks.values():
        if isinstance(task, ShutdownTask):
            task.shutdown()
        elif isinstance(task, AsyncShutdownTask):
            await task.shutdown()
    return output_context

ExecutorConfig dataclass

Configuration for the DAG executor.

Attributes:

Name Type Description
context_worker_num int

Number of worker coroutines for processing contexts.

task_worker_num int

Max concurrent tasks per context (0 means unlimited).

context_queue_timeout float | None

Seconds to wait for a context before timing out.

thread_pool_worker_num int | None

Thread pool size for sync tasks (0 disables).

process_pool_worker_num int | None

Process pool size for process tasks (0 disables).

enable_context_gc bool

Whether to garbage-collect orphaned contexts.

enable_context_bypass bool

Whether to auto-skip bypass tasks for new contexts.

worker_idle_times int

Number of consecutive idle checks before stopping all workers.

Source code in shutils/dag/executor.py
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
@dataclass
class ExecutorConfig:
    """Configuration for the DAG executor.

    Attributes:
        context_worker_num: Number of worker coroutines for processing contexts.
        task_worker_num: Max concurrent tasks per context (0 means unlimited).
        context_queue_timeout: Seconds to wait for a context before timing out.
        thread_pool_worker_num: Thread pool size for sync tasks (0 disables).
        process_pool_worker_num: Process pool size for process tasks (0 disables).
        enable_context_gc: Whether to garbage-collect orphaned contexts.
        enable_context_bypass: Whether to auto-skip bypass tasks for new contexts.
        worker_idle_times: Number of consecutive idle checks before stopping all workers.
    """
    context_worker_num: int = 1
    task_worker_num: int = 1
    context_queue_timeout: float | None = 1
    thread_pool_worker_num: int | None = 0
    process_pool_worker_num: int | None = 0
    enable_context_gc: bool = True
    enable_context_bypass: bool = True
    worker_idle_times: int = 20

WorkerLocalProxy

Thread-local-like proxy backed by a contextvars.ContextVar.

Source code in shutils/dag/executor.py
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
class WorkerLocalProxy:
    """Thread-local-like proxy backed by a contextvars.ContextVar."""

    def __getattr__(self, name: str):
        try:
            # 获取当前上下文中的 worker 存储,然后从该存储中获取属性
            # Get the worker storage from the current context, then get the attribute from it
            return _worker_context_var.get()[name]
        except (LookupError, KeyError):
            raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") from None

    def __setattr__(self, name: str, value: Any):
        try:
            # 获取当前上下文中的 worker 存储,然后设置该存储的属性
            # Get the worker storage from the current context, then set an attribute on it
            _worker_context_var.get()[name] = value
        except LookupError:
            raise RuntimeError(
                "Cannot set attribute on worker_local outside of a running worker context."
            ) from None

    def __delattr__(self, name: str):
        try:
            # 删除属性
            del _worker_context_var.get()[name]
        except (LookupError, KeyError):
            raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'") from None

    def __contains__(self, item: str) -> bool:
        try:
            return item in _worker_context_var.get()
        except LookupError:
            return False