Improved logging cookbook for logging with multiprocessing.
This commit is contained in:
parent
0d4bcf4c71
commit
e6f1e435d4
|
@ -807,6 +807,106 @@ the basis for code meeting your own specific requirements::
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
A variant of the above script keeps the logging in the main process, in a
|
||||||
|
separate thread::
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import logging.config
|
||||||
|
import logging.handlers
|
||||||
|
from multiprocessing import Process, Queue
|
||||||
|
import random
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
|
||||||
|
def logger_thread(q):
|
||||||
|
while True:
|
||||||
|
record = q.get()
|
||||||
|
if record is None:
|
||||||
|
break
|
||||||
|
logger = logging.getLogger(record.name)
|
||||||
|
logger.handle(record)
|
||||||
|
|
||||||
|
|
||||||
|
def worker_process(q):
|
||||||
|
qh = logging.handlers.QueueHandler(q)
|
||||||
|
root = logging.getLogger()
|
||||||
|
root.setLevel(logging.DEBUG)
|
||||||
|
root.addHandler(qh)
|
||||||
|
levels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR,
|
||||||
|
logging.CRITICAL]
|
||||||
|
loggers = ['foo', 'foo.bar', 'foo.bar.baz',
|
||||||
|
'spam', 'spam.ham', 'spam.ham.eggs']
|
||||||
|
for i in range(100):
|
||||||
|
lvl = random.choice(levels)
|
||||||
|
logger = logging.getLogger(random.choice(loggers))
|
||||||
|
logger.log(lvl, 'Message no. %d', i)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
q = Queue()
|
||||||
|
d = {
|
||||||
|
'version': 1,
|
||||||
|
'formatters': {
|
||||||
|
'detailed': {
|
||||||
|
'class': 'logging.Formatter',
|
||||||
|
'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'handlers': {
|
||||||
|
'console': {
|
||||||
|
'class': 'logging.StreamHandler',
|
||||||
|
'level': 'INFO',
|
||||||
|
},
|
||||||
|
'file': {
|
||||||
|
'class': 'logging.FileHandler',
|
||||||
|
'filename': 'mplog.log',
|
||||||
|
'mode': 'w',
|
||||||
|
'formatter': 'detailed',
|
||||||
|
},
|
||||||
|
'foofile': {
|
||||||
|
'class': 'logging.FileHandler',
|
||||||
|
'filename': 'mplog-foo.log',
|
||||||
|
'mode': 'w',
|
||||||
|
'formatter': 'detailed',
|
||||||
|
},
|
||||||
|
'errors': {
|
||||||
|
'class': 'logging.FileHandler',
|
||||||
|
'filename': 'mplog-errors.log',
|
||||||
|
'mode': 'w',
|
||||||
|
'level': 'ERROR',
|
||||||
|
'formatter': 'detailed',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'loggers': {
|
||||||
|
'foo': {
|
||||||
|
'handlers' : ['foofile']
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'root': {
|
||||||
|
'level': 'DEBUG',
|
||||||
|
'handlers': ['console', 'file', 'errors']
|
||||||
|
},
|
||||||
|
}
|
||||||
|
workers = []
|
||||||
|
for i in range(5):
|
||||||
|
wp = Process(target=worker_process, name='worker %d' % (i + 1), args=(q,))
|
||||||
|
workers.append(wp)
|
||||||
|
wp.start()
|
||||||
|
logging.config.dictConfig(d)
|
||||||
|
lp = threading.Thread(target=logger_thread, args=(q,))
|
||||||
|
lp.start()
|
||||||
|
# At this point, the main process could do some useful work of its own
|
||||||
|
# Once it's done that, it can wait for the workers to terminate...
|
||||||
|
for wp in workers:
|
||||||
|
wp.join()
|
||||||
|
# And now tell the logging thread to finish up, too
|
||||||
|
q.put(None)
|
||||||
|
lp.join()
|
||||||
|
|
||||||
|
This variant shows how you can e.g. apply configuration for particular loggers
|
||||||
|
- e.g. the ``foo`` logger has a special handler which stores all events in the
|
||||||
|
``foo`` subsystem in a file ``mplog-foo.log``. This will be used by the logging
|
||||||
|
machinery in the main process (even though the logging events are generated in
|
||||||
|
the worker processes) to direct the messages to the appropriate destinations.
|
||||||
|
|
||||||
Using file rotation
|
Using file rotation
|
||||||
-------------------
|
-------------------
|
||||||
|
|
Loading…
Reference in New Issue