Skip to content

prefect.logging.handlers

APILogHandler

Bases: Handler

A logging handler that sends logs to the Prefect API.

Sends log records to the APILogWorker which manages sending batches of logs in the background.

Source code in prefect/logging/handlers.py
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
class APILogHandler(logging.Handler):
    """
    A logging handler that sends logs to the Prefect API.

    Sends log records to the `APILogWorker` which manages sending batches of logs in
    the background.
    """

    @classmethod
    def flush(cls):
        """
        Tell the `APILogWorker` to send any currently enqueued logs and block until
        completion.

        Use `aflush` from async contexts instead.
        """
        loop = get_running_loop()
        if loop:
            if in_global_loop():  # Guard against internal misuse
                raise RuntimeError(
                    "Cannot call `APILogWorker.flush` from the global event loop; it"
                    " would block the event loop and cause a deadlock. Use"
                    " `APILogWorker.aflush` instead."
                )

            # Not ideal, but this method is called by the stdlib and cannot return a
            # coroutine so we just schedule the drain in a new thread and continue
            from_sync.call_soon_in_new_thread(create_call(APILogWorker.drain_all))
            return None
        else:
            # We set a timeout of 5s because we don't want to block forever if the worker
            # is stuck. This can occur when the handler is being shutdown and the
            # `logging._lock` is held but the worker is attempting to emit logs resulting
            # in a deadlock.
            return APILogWorker.drain_all(timeout=5)

    @classmethod
    def aflush(cls):
        """
        Tell the `APILogWorker` to send any currently enqueued logs and block until
        completion.

        If called in a synchronous context, will only block up to 5s before returning.
        """

        if not get_running_loop():
            raise RuntimeError(
                "`aflush` cannot be used from a synchronous context; use `flush`"
                " instead."
            )

        return APILogWorker.drain_all()

    def emit(self, record: logging.LogRecord):
        """
        Send a log to the `APILogWorker`
        """
        try:
            profile = prefect.context.get_settings_context()

            if not PREFECT_LOGGING_TO_API_ENABLED.value_from(profile.settings):
                return  # Respect the global settings toggle
            if not getattr(record, "send_to_api", True):
                return  # Do not send records that have opted out
            if not getattr(record, "send_to_orion", True):
                return  # Backwards compatibility

            log = self.prepare(record)
            APILogWorker.instance().send(log)

        except Exception:
            self.handleError(record)

    def handleError(self, record: logging.LogRecord) -> None:
        _, exc, _ = sys.exc_info()

        if isinstance(exc, MissingContextError):
            log_handling_when_missing_flow = (
                PREFECT_LOGGING_TO_API_WHEN_MISSING_FLOW.value()
            )
            if log_handling_when_missing_flow == "warn":
                # Warn when a logger is used outside of a run context, the stack level here
                # gets us to the user logging call
                warnings.warn(str(exc), stacklevel=8)
                return
            elif log_handling_when_missing_flow == "ignore":
                return
            else:
                raise exc

        # Display a longer traceback for other errors
        return super().handleError(record)

    def prepare(self, record: logging.LogRecord) -> Dict[str, Any]:
        """
        Convert a `logging.LogRecord` to the API `LogCreate` schema and serialize.

        This infers the linked flow or task run from the log record or the current
        run context.

        If a flow run id cannot be found, the log will be dropped.

        Logs exceeding the maximum size will be dropped.
        """
        flow_run_id = getattr(record, "flow_run_id", None)
        task_run_id = getattr(record, "task_run_id", None)

        if not flow_run_id:
            try:
                context = prefect.context.get_run_context()
            except MissingContextError:
                raise MissingContextError(
                    f"Logger {record.name!r} attempted to send logs to the API without"
                    " a flow run id. The API log handler can only send logs within"
                    " flow run contexts unless the flow run id is manually provided."
                ) from None

            if hasattr(context, "flow_run"):
                flow_run_id = context.flow_run.id
            elif hasattr(context, "task_run"):
                flow_run_id = context.task_run.flow_run_id
                task_run_id = task_run_id or context.task_run.id
            else:
                raise ValueError(
                    "Encountered malformed run context. Does not contain flow or task "
                    "run information."
                )

        # Parsing to a `LogCreate` object here gives us nice parsing error messages
        # from the standard lib `handleError` method if something goes wrong and
        # prevents malformed logs from entering the queue
        try:
            is_uuid_like = isinstance(flow_run_id, uuid.UUID) or (
                isinstance(flow_run_id, str) and uuid.UUID(flow_run_id)
            )
        except ValueError:
            is_uuid_like = False

        log = LogCreate(
            flow_run_id=flow_run_id if is_uuid_like else None,
            task_run_id=task_run_id,
            name=record.name,
            level=record.levelno,
            timestamp=pendulum.from_timestamp(
                getattr(record, "created", None) or time.time()
            ),
            message=self.format(record),
        ).dict(json_compatible=True)

        log_size = log["__payload_size__"] = self._get_payload_size(log)
        if log_size > PREFECT_LOGGING_TO_API_MAX_LOG_SIZE.value():
            raise ValueError(
                f"Log of size {log_size} is greater than the max size of "
                f"{PREFECT_LOGGING_TO_API_MAX_LOG_SIZE.value()}"
            )

        return log

    def _get_payload_size(self, log: Dict[str, Any]) -> int:
        return len(json.dumps(log).encode())

aflush classmethod

Tell the APILogWorker to send any currently enqueued logs and block until completion.

If called in a synchronous context, will only block up to 5s before returning.

Source code in prefect/logging/handlers.py
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
@classmethod
def aflush(cls):
    """
    Tell the `APILogWorker` to send any currently enqueued logs and block until
    completion.

    If called in a synchronous context, will only block up to 5s before returning.
    """

    if not get_running_loop():
        raise RuntimeError(
            "`aflush` cannot be used from a synchronous context; use `flush`"
            " instead."
        )

    return APILogWorker.drain_all()

emit

Send a log to the APILogWorker

Source code in prefect/logging/handlers.py
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
def emit(self, record: logging.LogRecord):
    """
    Send a log to the `APILogWorker`
    """
    try:
        profile = prefect.context.get_settings_context()

        if not PREFECT_LOGGING_TO_API_ENABLED.value_from(profile.settings):
            return  # Respect the global settings toggle
        if not getattr(record, "send_to_api", True):
            return  # Do not send records that have opted out
        if not getattr(record, "send_to_orion", True):
            return  # Backwards compatibility

        log = self.prepare(record)
        APILogWorker.instance().send(log)

    except Exception:
        self.handleError(record)

flush classmethod

Tell the APILogWorker to send any currently enqueued logs and block until completion.

Use aflush from async contexts instead.

Source code in prefect/logging/handlers.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
@classmethod
def flush(cls):
    """
    Tell the `APILogWorker` to send any currently enqueued logs and block until
    completion.

    Use `aflush` from async contexts instead.
    """
    loop = get_running_loop()
    if loop:
        if in_global_loop():  # Guard against internal misuse
            raise RuntimeError(
                "Cannot call `APILogWorker.flush` from the global event loop; it"
                " would block the event loop and cause a deadlock. Use"
                " `APILogWorker.aflush` instead."
            )

        # Not ideal, but this method is called by the stdlib and cannot return a
        # coroutine so we just schedule the drain in a new thread and continue
        from_sync.call_soon_in_new_thread(create_call(APILogWorker.drain_all))
        return None
    else:
        # We set a timeout of 5s because we don't want to block forever if the worker
        # is stuck. This can occur when the handler is being shutdown and the
        # `logging._lock` is held but the worker is attempting to emit logs resulting
        # in a deadlock.
        return APILogWorker.drain_all(timeout=5)

prepare

Convert a logging.LogRecord to the API LogCreate schema and serialize.

This infers the linked flow or task run from the log record or the current run context.

If a flow run id cannot be found, the log will be dropped.

Logs exceeding the maximum size will be dropped.

Source code in prefect/logging/handlers.py
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
def prepare(self, record: logging.LogRecord) -> Dict[str, Any]:
    """
    Convert a `logging.LogRecord` to the API `LogCreate` schema and serialize.

    This infers the linked flow or task run from the log record or the current
    run context.

    If a flow run id cannot be found, the log will be dropped.

    Logs exceeding the maximum size will be dropped.
    """
    flow_run_id = getattr(record, "flow_run_id", None)
    task_run_id = getattr(record, "task_run_id", None)

    if not flow_run_id:
        try:
            context = prefect.context.get_run_context()
        except MissingContextError:
            raise MissingContextError(
                f"Logger {record.name!r} attempted to send logs to the API without"
                " a flow run id. The API log handler can only send logs within"
                " flow run contexts unless the flow run id is manually provided."
            ) from None

        if hasattr(context, "flow_run"):
            flow_run_id = context.flow_run.id
        elif hasattr(context, "task_run"):
            flow_run_id = context.task_run.flow_run_id
            task_run_id = task_run_id or context.task_run.id
        else:
            raise ValueError(
                "Encountered malformed run context. Does not contain flow or task "
                "run information."
            )

    # Parsing to a `LogCreate` object here gives us nice parsing error messages
    # from the standard lib `handleError` method if something goes wrong and
    # prevents malformed logs from entering the queue
    try:
        is_uuid_like = isinstance(flow_run_id, uuid.UUID) or (
            isinstance(flow_run_id, str) and uuid.UUID(flow_run_id)
        )
    except ValueError:
        is_uuid_like = False

    log = LogCreate(
        flow_run_id=flow_run_id if is_uuid_like else None,
        task_run_id=task_run_id,
        name=record.name,
        level=record.levelno,
        timestamp=pendulum.from_timestamp(
            getattr(record, "created", None) or time.time()
        ),
        message=self.format(record),
    ).dict(json_compatible=True)

    log_size = log["__payload_size__"] = self._get_payload_size(log)
    if log_size > PREFECT_LOGGING_TO_API_MAX_LOG_SIZE.value():
        raise ValueError(
            f"Log of size {log_size} is greater than the max size of "
            f"{PREFECT_LOGGING_TO_API_MAX_LOG_SIZE.value()}"
        )

    return log

PrefectConsoleHandler

Bases: StreamHandler

Source code in prefect/logging/handlers.py
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
class PrefectConsoleHandler(logging.StreamHandler):
    def __init__(
        self,
        stream=None,
        highlighter: Highlighter = PrefectConsoleHighlighter,
        styles: Dict[str, str] = None,
        level: Union[int, str] = logging.NOTSET,
    ):
        """
        The default console handler for Prefect, which highlights log levels,
        web and file URLs, flow and task (run) names, and state types in the
        local console (terminal).

        Highlighting can be toggled on/off with the PREFECT_LOGGING_COLORS setting.
        For finer control, use logging.yml to add or remove styles, and/or
        adjust colors.
        """
        super().__init__(stream=stream)

        styled_console = PREFECT_LOGGING_COLORS.value()
        markup_console = PREFECT_LOGGING_MARKUP.value()
        if styled_console:
            highlighter = highlighter()
            theme = Theme(styles, inherit=False)
        else:
            highlighter = NullHighlighter()
            theme = Theme(inherit=False)

        self.level = level
        self.console = Console(
            highlighter=highlighter,
            theme=theme,
            file=self.stream,
            markup=markup_console,
        )

    def emit(self, record: logging.LogRecord):
        try:
            message = self.format(record)
            self.console.print(message, soft_wrap=True)
        except RecursionError:
            # This was copied over from logging.StreamHandler().emit()
            # https://bugs.python.org/issue36272
            raise
        except Exception:
            self.handleError(record)

"""