mirror of https://github.com/python/cpython
Merged logging cookbook update from 3.2.
This commit is contained in:
commit
452806c8e3
|
@ -1343,3 +1343,231 @@ These are not "true" .gz files, as they are bare compressed data, with no
|
|||
"container" such as you’d find in an actual gzip file. This snippet is just
|
||||
for illustration purposes.
|
||||
|
||||
A more elaborate multiprocessing example
|
||||
----------------------------------------
|
||||
|
||||
The following working example shows how logging can be used with multiprocessing
|
||||
using configuration files. The configurations are fairly simple, but serve to
|
||||
illustrate how more complex ones could be implemented in a real multiprocessing
|
||||
scenario.
|
||||
|
||||
In the example, the main process spawns a listener process and some worker
|
||||
processes. Each of the main process, the listener and the workers have three
|
||||
separate configurations (the workers all share the same configuration). We can
|
||||
see logging in the main process, how the workers log to a QueueHandler and how
|
||||
the listener implements a QueueListener and a more complex logging
|
||||
configuration, and arranges to dispatch events received via the queue to the
|
||||
handlers specified in the configuration. Note that these configurations are
|
||||
purely illustrative, but you should be able to adapt this example to your own
|
||||
scenario.
|
||||
|
||||
Here's the script - the docstrings and the comments hopefully explain how it
|
||||
works::
|
||||
|
||||
import logging
|
||||
import logging.config
|
||||
import logging.handlers
|
||||
from multiprocessing import Process, Queue, Event, current_process
|
||||
import os
|
||||
import random
|
||||
import time
|
||||
|
||||
class MyHandler(object):
|
||||
"""
|
||||
A simple handler for logging events. It runs in the listener process and
|
||||
dispatches events to loggers based on the name in the received record,
|
||||
which then get dispatched, by the logging system, to the handlers
|
||||
configured for those records.
|
||||
"""
|
||||
def handle(self, record):
|
||||
logger = logging.getLogger(record.name)
|
||||
# The process name is transformed just to show that it's the listener
|
||||
# doing the logging to files and console
|
||||
record.processName = '%s (for %s)' % (current_process().name, record.processName)
|
||||
logger.handle(record)
|
||||
|
||||
def listener_process(q, stop_event, config):
|
||||
"""
|
||||
This could be done in the main process, but is just done in a separate
|
||||
process for illustrative purposes.
|
||||
|
||||
This initialises logging according to the specified configuration,
|
||||
starts the listener and waits for the main process to signal completion
|
||||
via the event. The listener is then stopped, and the process exits.
|
||||
"""
|
||||
logging.config.dictConfig(config)
|
||||
listener = logging.handlers.QueueListener(q, MyHandler())
|
||||
listener.start()
|
||||
if os.name == 'posix':
|
||||
# On POSIX, the setup logger will have been configured in the
|
||||
# parent process, but should have been disabled following the
|
||||
# dictConfig call.
|
||||
# On Windows, since fork isn't used, the setup logger won't
|
||||
# exist in the child, so it would be created and the message
|
||||
# would appear - hence the "if posix" clause.
|
||||
logger = logging.getLogger('setup')
|
||||
logger.critical('Should not appear, because of disabled logger ...')
|
||||
stop_event.wait()
|
||||
listener.stop()
|
||||
|
||||
def worker_process(config):
|
||||
"""
|
||||
A number of these are spawned for the purpose of illustration. In
|
||||
practice, they could be a heterogenous bunch of processes rather than
|
||||
ones which are identical to each other.
|
||||
|
||||
This initialises logging according to the specified configuration,
|
||||
and logs a hundred messages with random levels to randomly selected
|
||||
loggers.
|
||||
|
||||
A small sleep is added to allow other processes a chance to run. This
|
||||
is not strictly needed, but it mixes the output from the different
|
||||
processes a bit more than if it's left out.
|
||||
"""
|
||||
logging.config.dictConfig(config)
|
||||
levels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
|
||||
logging.CRITICAL]
|
||||
loggers = ['foo', 'foo.bar', 'foo.bar.baz',
|
||||
'spam', 'spam.ham', 'spam.ham.eggs']
|
||||
if os.name == 'posix':
|
||||
# On POSIX, the setup logger will have been configured in the
|
||||
# parent process, but should have been disabled following the
|
||||
# dictConfig call.
|
||||
# On Windows, since fork isn't used, the setup logger won't
|
||||
# exist in the child, so it would be created and the message
|
||||
# would appear - hence the "if posix" clause.
|
||||
logger = logging.getLogger('setup')
|
||||
logger.critical('Should not appear, because of disabled logger ...')
|
||||
for i in range(100):
|
||||
lvl = random.choice(levels)
|
||||
logger = logging.getLogger(random.choice(loggers))
|
||||
logger.log(lvl, 'Message no. %d', i)
|
||||
time.sleep(0.01)
|
||||
|
||||
def main():
|
||||
q = Queue()
|
||||
# The main process gets a simple configuration which prints to the console.
|
||||
config_initial = {
|
||||
'version': 1,
|
||||
'formatters': {
|
||||
'detailed': {
|
||||
'class': 'logging.Formatter',
|
||||
'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
|
||||
}
|
||||
},
|
||||
'handlers': {
|
||||
'console': {
|
||||
'class': 'logging.StreamHandler',
|
||||
'level': 'INFO',
|
||||
},
|
||||
},
|
||||
'root': {
|
||||
'level': 'DEBUG',
|
||||
'handlers': ['console']
|
||||
},
|
||||
}
|
||||
# The worker process configuration is just a QueueHandler attached to the
|
||||
# root logger, which allows all messages to be sent to the queue.
|
||||
# We disable existing loggers to disable the "setup" logger used in the
|
||||
# parent process. This is needed on POSIX because the logger will
|
||||
# be there in the child following a fork().
|
||||
config_worker = {
|
||||
'version': 1,
|
||||
'disable_existing_loggers': True,
|
||||
'handlers': {
|
||||
'queue': {
|
||||
'class': 'logging.handlers.QueueHandler',
|
||||
'queue': q,
|
||||
},
|
||||
},
|
||||
'root': {
|
||||
'level': 'DEBUG',
|
||||
'handlers': ['queue']
|
||||
},
|
||||
}
|
||||
# The listener process configuration shows that the full flexibility of
|
||||
# logging configuration is available to dispatch events to handlers however
|
||||
# you want.
|
||||
# We disable existing loggers to disable the "setup" logger used in the
|
||||
# parent process. This is needed on POSIX because the logger will
|
||||
# be there in the child following a fork().
|
||||
config_listener = {
|
||||
'version': 1,
|
||||
'disable_existing_loggers': True,
|
||||
'formatters': {
|
||||
'detailed': {
|
||||
'class': 'logging.Formatter',
|
||||
'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
|
||||
},
|
||||
'simple': {
|
||||
'class': 'logging.Formatter',
|
||||
'format': '%(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
|
||||
}
|
||||
},
|
||||
'handlers': {
|
||||
'console': {
|
||||
'class': 'logging.StreamHandler',
|
||||
'level': 'INFO',
|
||||
'formatter': 'simple',
|
||||
},
|
||||
'file': {
|
||||
'class': 'logging.FileHandler',
|
||||
'filename': 'mplog.log',
|
||||
'mode': 'w',
|
||||
'formatter': 'detailed',
|
||||
},
|
||||
'foofile': {
|
||||
'class': 'logging.FileHandler',
|
||||
'filename': 'mplog-foo.log',
|
||||
'mode': 'w',
|
||||
'formatter': 'detailed',
|
||||
},
|
||||
'errors': {
|
||||
'class': 'logging.FileHandler',
|
||||
'filename': 'mplog-errors.log',
|
||||
'mode': 'w',
|
||||
'level': 'ERROR',
|
||||
'formatter': 'detailed',
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
'foo': {
|
||||
'handlers' : ['foofile']
|
||||
}
|
||||
},
|
||||
'root': {
|
||||
'level': 'DEBUG',
|
||||
'handlers': ['console', 'file', 'errors']
|
||||
},
|
||||
}
|
||||
# Log some initial events, just to show that logging in the parent works
|
||||
# normally.
|
||||
logging.config.dictConfig(config_initial)
|
||||
logger = logging.getLogger('setup')
|
||||
logger.info('About to create workers ...')
|
||||
workers = []
|
||||
for i in range(5):
|
||||
wp = Process(target=worker_process, name='worker %d' % (i + 1),
|
||||
args=(config_worker,))
|
||||
workers.append(wp)
|
||||
wp.start()
|
||||
logger.info('Started worker: %s', wp.name)
|
||||
logger.info('About to create listener ...')
|
||||
stop_event = Event()
|
||||
lp = Process(target=listener_process, name='listener',
|
||||
args=(q, stop_event, config_listener))
|
||||
lp.start()
|
||||
logger.info('Started listener')
|
||||
# We now hang around for the workers to finish their work.
|
||||
for wp in workers:
|
||||
wp.join()
|
||||
# Workers all done, listening can now stop.
|
||||
# Logging in the parent still works normally.
|
||||
logger.info('Telling listener to stop ...')
|
||||
stop_event.set()
|
||||
lp.join()
|
||||
logger.info('All done.')
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
|
Loading…
Reference in New Issue