xref: /aosp_15_r20/prebuilts/build-tools/common/py3-stdlib/logging/handlers.py (revision cda5da8d549138a6648c5ee6d7a49cf8f4a657be)
1# Copyright 2001-2021 by Vinay Sajip. All Rights Reserved.
2#
3# Permission to use, copy, modify, and distribute this software and its
4# documentation for any purpose and without fee is hereby granted,
5# provided that the above copyright notice appear in all copies and that
6# both that copyright notice and this permission notice appear in
7# supporting documentation, and that the name of Vinay Sajip
8# not be used in advertising or publicity pertaining to distribution
9# of the software without specific, written prior permission.
10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
17"""
18Additional handlers for the logging package for Python. The core package is
19based on PEP 282 and comments thereto in comp.lang.python.
20
21Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved.
22
23To use, simply 'import logging.handlers' and log away!
24"""
25
26import io, logging, socket, os, pickle, struct, time, re
27from stat import ST_DEV, ST_INO, ST_MTIME
28import queue
29import threading
30import copy
31
32#
33# Some constants...
34#
35
36DEFAULT_TCP_LOGGING_PORT    = 9020
37DEFAULT_UDP_LOGGING_PORT    = 9021
38DEFAULT_HTTP_LOGGING_PORT   = 9022
39DEFAULT_SOAP_LOGGING_PORT   = 9023
40SYSLOG_UDP_PORT             = 514
41SYSLOG_TCP_PORT             = 514
42
43_MIDNIGHT = 24 * 60 * 60  # number of seconds in a day
44
45class BaseRotatingHandler(logging.FileHandler):
46    """
47    Base class for handlers that rotate log files at a certain point.
48    Not meant to be instantiated directly.  Instead, use RotatingFileHandler
49    or TimedRotatingFileHandler.
50    """
51    namer = None
52    rotator = None
53
54    def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
55        """
56        Use the specified filename for streamed logging
57        """
58        logging.FileHandler.__init__(self, filename, mode=mode,
59                                     encoding=encoding, delay=delay,
60                                     errors=errors)
61        self.mode = mode
62        self.encoding = encoding
63        self.errors = errors
64
65    def emit(self, record):
66        """
67        Emit a record.
68
69        Output the record to the file, catering for rollover as described
70        in doRollover().
71        """
72        try:
73            if self.shouldRollover(record):
74                self.doRollover()
75            logging.FileHandler.emit(self, record)
76        except Exception:
77            self.handleError(record)
78
79    def rotation_filename(self, default_name):
80        """
81        Modify the filename of a log file when rotating.
82
83        This is provided so that a custom filename can be provided.
84
85        The default implementation calls the 'namer' attribute of the
86        handler, if it's callable, passing the default name to
87        it. If the attribute isn't callable (the default is None), the name
88        is returned unchanged.
89
90        :param default_name: The default name for the log file.
91        """
92        if not callable(self.namer):
93            result = default_name
94        else:
95            result = self.namer(default_name)
96        return result
97
98    def rotate(self, source, dest):
99        """
100        When rotating, rotate the current log.
101
102        The default implementation calls the 'rotator' attribute of the
103        handler, if it's callable, passing the source and dest arguments to
104        it. If the attribute isn't callable (the default is None), the source
105        is simply renamed to the destination.
106
107        :param source: The source filename. This is normally the base
108                       filename, e.g. 'test.log'
109        :param dest:   The destination filename. This is normally
110                       what the source is rotated to, e.g. 'test.log.1'.
111        """
112        if not callable(self.rotator):
113            # Issue 18940: A file may not have been created if delay is True.
114            if os.path.exists(source):
115                os.rename(source, dest)
116        else:
117            self.rotator(source, dest)
118
119class RotatingFileHandler(BaseRotatingHandler):
120    """
121    Handler for logging to a set of files, which switches from one file
122    to the next when the current file reaches a certain size.
123    """
124    def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
125                 encoding=None, delay=False, errors=None):
126        """
127        Open the specified file and use it as the stream for logging.
128
129        By default, the file grows indefinitely. You can specify particular
130        values of maxBytes and backupCount to allow the file to rollover at
131        a predetermined size.
132
133        Rollover occurs whenever the current log file is nearly maxBytes in
134        length. If backupCount is >= 1, the system will successively create
135        new files with the same pathname as the base file, but with extensions
136        ".1", ".2" etc. appended to it. For example, with a backupCount of 5
137        and a base file name of "app.log", you would get "app.log",
138        "app.log.1", "app.log.2", ... through to "app.log.5". The file being
139        written to is always "app.log" - when it gets filled up, it is closed
140        and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
141        exist, then they are renamed to "app.log.2", "app.log.3" etc.
142        respectively.
143
144        If maxBytes is zero, rollover never occurs.
145        """
146        # If rotation/rollover is wanted, it doesn't make sense to use another
147        # mode. If for example 'w' were specified, then if there were multiple
148        # runs of the calling application, the logs from previous runs would be
149        # lost if the 'w' is respected, because the log file would be truncated
150        # on each run.
151        if maxBytes > 0:
152            mode = 'a'
153        if "b" not in mode:
154            encoding = io.text_encoding(encoding)
155        BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
156                                     delay=delay, errors=errors)
157        self.maxBytes = maxBytes
158        self.backupCount = backupCount
159
160    def doRollover(self):
161        """
162        Do a rollover, as described in __init__().
163        """
164        if self.stream:
165            self.stream.close()
166            self.stream = None
167        if self.backupCount > 0:
168            for i in range(self.backupCount - 1, 0, -1):
169                sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
170                dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
171                                                        i + 1))
172                if os.path.exists(sfn):
173                    if os.path.exists(dfn):
174                        os.remove(dfn)
175                    os.rename(sfn, dfn)
176            dfn = self.rotation_filename(self.baseFilename + ".1")
177            if os.path.exists(dfn):
178                os.remove(dfn)
179            self.rotate(self.baseFilename, dfn)
180        if not self.delay:
181            self.stream = self._open()
182
183    def shouldRollover(self, record):
184        """
185        Determine if rollover should occur.
186
187        Basically, see if the supplied record would cause the file to exceed
188        the size limit we have.
189        """
190        # See bpo-45401: Never rollover anything other than regular files
191        if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
192            return False
193        if self.stream is None:                 # delay was set...
194            self.stream = self._open()
195        if self.maxBytes > 0:                   # are we rolling over?
196            msg = "%s\n" % self.format(record)
197            self.stream.seek(0, 2)  #due to non-posix-compliant Windows feature
198            if self.stream.tell() + len(msg) >= self.maxBytes:
199                return True
200        return False
201
202class TimedRotatingFileHandler(BaseRotatingHandler):
203    """
204    Handler for logging to a file, rotating the log file at certain timed
205    intervals.
206
207    If backupCount is > 0, when rollover is done, no more than backupCount
208    files are kept - the oldest ones are deleted.
209    """
210    def __init__(self, filename, when='h', interval=1, backupCount=0,
211                 encoding=None, delay=False, utc=False, atTime=None,
212                 errors=None):
213        encoding = io.text_encoding(encoding)
214        BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
215                                     delay=delay, errors=errors)
216        self.when = when.upper()
217        self.backupCount = backupCount
218        self.utc = utc
219        self.atTime = atTime
220        # Calculate the real rollover interval, which is just the number of
221        # seconds between rollovers.  Also set the filename suffix used when
222        # a rollover occurs.  Current 'when' events supported:
223        # S - Seconds
224        # M - Minutes
225        # H - Hours
226        # D - Days
227        # midnight - roll over at midnight
228        # W{0-6} - roll over on a certain day; 0 - Monday
229        #
230        # Case of the 'when' specifier is not important; lower or upper case
231        # will work.
232        if self.when == 'S':
233            self.interval = 1 # one second
234            self.suffix = "%Y-%m-%d_%H-%M-%S"
235            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
236        elif self.when == 'M':
237            self.interval = 60 # one minute
238            self.suffix = "%Y-%m-%d_%H-%M"
239            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
240        elif self.when == 'H':
241            self.interval = 60 * 60 # one hour
242            self.suffix = "%Y-%m-%d_%H"
243            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
244        elif self.when == 'D' or self.when == 'MIDNIGHT':
245            self.interval = 60 * 60 * 24 # one day
246            self.suffix = "%Y-%m-%d"
247            self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
248        elif self.when.startswith('W'):
249            self.interval = 60 * 60 * 24 * 7 # one week
250            if len(self.when) != 2:
251                raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
252            if self.when[1] < '0' or self.when[1] > '6':
253                raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
254            self.dayOfWeek = int(self.when[1])
255            self.suffix = "%Y-%m-%d"
256            self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
257        else:
258            raise ValueError("Invalid rollover interval specified: %s" % self.when)
259
260        self.extMatch = re.compile(self.extMatch, re.ASCII)
261        self.interval = self.interval * interval # multiply by units requested
262        # The following line added because the filename passed in could be a
263        # path object (see Issue #27493), but self.baseFilename will be a string
264        filename = self.baseFilename
265        if os.path.exists(filename):
266            t = os.stat(filename)[ST_MTIME]
267        else:
268            t = int(time.time())
269        self.rolloverAt = self.computeRollover(t)
270
271    def computeRollover(self, currentTime):
272        """
273        Work out the rollover time based on the specified time.
274        """
275        result = currentTime + self.interval
276        # If we are rolling over at midnight or weekly, then the interval is already known.
277        # What we need to figure out is WHEN the next interval is.  In other words,
278        # if you are rolling over at midnight, then your base interval is 1 day,
279        # but you want to start that one day clock at midnight, not now.  So, we
280        # have to fudge the rolloverAt value in order to trigger the first rollover
281        # at the right time.  After that, the regular interval will take care of
282        # the rest.  Note that this code doesn't care about leap seconds. :)
283        if self.when == 'MIDNIGHT' or self.when.startswith('W'):
284            # This could be done with less code, but I wanted it to be clear
285            if self.utc:
286                t = time.gmtime(currentTime)
287            else:
288                t = time.localtime(currentTime)
289            currentHour = t[3]
290            currentMinute = t[4]
291            currentSecond = t[5]
292            currentDay = t[6]
293            # r is the number of seconds left between now and the next rotation
294            if self.atTime is None:
295                rotate_ts = _MIDNIGHT
296            else:
297                rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
298                    self.atTime.second)
299
300            r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
301                currentSecond)
302            if r < 0:
303                # Rotate time is before the current time (for example when
304                # self.rotateAt is 13:45 and it now 14:15), rotation is
305                # tomorrow.
306                r += _MIDNIGHT
307                currentDay = (currentDay + 1) % 7
308            result = currentTime + r
309            # If we are rolling over on a certain day, add in the number of days until
310            # the next rollover, but offset by 1 since we just calculated the time
311            # until the next day starts.  There are three cases:
312            # Case 1) The day to rollover is today; in this case, do nothing
313            # Case 2) The day to rollover is further in the interval (i.e., today is
314            #         day 2 (Wednesday) and rollover is on day 6 (Sunday).  Days to
315            #         next rollover is simply 6 - 2 - 1, or 3.
316            # Case 3) The day to rollover is behind us in the interval (i.e., today
317            #         is day 5 (Saturday) and rollover is on day 3 (Thursday).
318            #         Days to rollover is 6 - 5 + 3, or 4.  In this case, it's the
319            #         number of days left in the current week (1) plus the number
320            #         of days in the next week until the rollover day (3).
321            # The calculations described in 2) and 3) above need to have a day added.
322            # This is because the above time calculation takes us to midnight on this
323            # day, i.e. the start of the next day.
324            if self.when.startswith('W'):
325                day = currentDay # 0 is Monday
326                if day != self.dayOfWeek:
327                    if day < self.dayOfWeek:
328                        daysToWait = self.dayOfWeek - day
329                    else:
330                        daysToWait = 6 - day + self.dayOfWeek + 1
331                    newRolloverAt = result + (daysToWait * (60 * 60 * 24))
332                    if not self.utc:
333                        dstNow = t[-1]
334                        dstAtRollover = time.localtime(newRolloverAt)[-1]
335                        if dstNow != dstAtRollover:
336                            if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
337                                addend = -3600
338                            else:           # DST bows out before next rollover, so we need to add an hour
339                                addend = 3600
340                            newRolloverAt += addend
341                    result = newRolloverAt
342        return result
343
344    def shouldRollover(self, record):
345        """
346        Determine if rollover should occur.
347
348        record is not used, as we are just comparing times, but it is needed so
349        the method signatures are the same
350        """
351        t = int(time.time())
352        if t >= self.rolloverAt:
353            # See #89564: Never rollover anything other than regular files
354            if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
355                # The file is not a regular file, so do not rollover, but do
356                # set the next rollover time to avoid repeated checks.
357                self.rolloverAt = self.computeRollover(t)
358                return False
359
360            return True
361        return False
362
363    def getFilesToDelete(self):
364        """
365        Determine the files to delete when rolling over.
366
367        More specific than the earlier method, which just used glob.glob().
368        """
369        dirName, baseName = os.path.split(self.baseFilename)
370        fileNames = os.listdir(dirName)
371        result = []
372        # See bpo-44753: Don't use the extension when computing the prefix.
373        n, e = os.path.splitext(baseName)
374        prefix = n + '.'
375        plen = len(prefix)
376        for fileName in fileNames:
377            if self.namer is None:
378                # Our files will always start with baseName
379                if not fileName.startswith(baseName):
380                    continue
381            else:
382                # Our files could be just about anything after custom naming, but
383                # likely candidates are of the form
384                # foo.log.DATETIME_SUFFIX or foo.DATETIME_SUFFIX.log
385                if (not fileName.startswith(baseName) and fileName.endswith(e) and
386                    len(fileName) > (plen + 1) and not fileName[plen+1].isdigit()):
387                    continue
388
389            if fileName[:plen] == prefix:
390                suffix = fileName[plen:]
391                # See bpo-45628: The date/time suffix could be anywhere in the
392                # filename
393                parts = suffix.split('.')
394                for part in parts:
395                    if self.extMatch.match(part):
396                        result.append(os.path.join(dirName, fileName))
397                        break
398        if len(result) < self.backupCount:
399            result = []
400        else:
401            result.sort()
402            result = result[:len(result) - self.backupCount]
403        return result
404
405    def doRollover(self):
406        """
407        do a rollover; in this case, a date/time stamp is appended to the filename
408        when the rollover happens.  However, you want the file to be named for the
409        start of the interval, not the current time.  If there is a backup count,
410        then we have to get a list of matching filenames, sort them and remove
411        the one with the oldest suffix.
412        """
413        if self.stream:
414            self.stream.close()
415            self.stream = None
416        # get the time that this sequence started at and make it a TimeTuple
417        currentTime = int(time.time())
418        dstNow = time.localtime(currentTime)[-1]
419        t = self.rolloverAt - self.interval
420        if self.utc:
421            timeTuple = time.gmtime(t)
422        else:
423            timeTuple = time.localtime(t)
424            dstThen = timeTuple[-1]
425            if dstNow != dstThen:
426                if dstNow:
427                    addend = 3600
428                else:
429                    addend = -3600
430                timeTuple = time.localtime(t + addend)
431        dfn = self.rotation_filename(self.baseFilename + "." +
432                                     time.strftime(self.suffix, timeTuple))
433        if os.path.exists(dfn):
434            os.remove(dfn)
435        self.rotate(self.baseFilename, dfn)
436        if self.backupCount > 0:
437            for s in self.getFilesToDelete():
438                os.remove(s)
439        if not self.delay:
440            self.stream = self._open()
441        newRolloverAt = self.computeRollover(currentTime)
442        while newRolloverAt <= currentTime:
443            newRolloverAt = newRolloverAt + self.interval
444        #If DST changes and midnight or weekly rollover, adjust for this.
445        if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
446            dstAtRollover = time.localtime(newRolloverAt)[-1]
447            if dstNow != dstAtRollover:
448                if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
449                    addend = -3600
450                else:           # DST bows out before next rollover, so we need to add an hour
451                    addend = 3600
452                newRolloverAt += addend
453        self.rolloverAt = newRolloverAt
454
455class WatchedFileHandler(logging.FileHandler):
456    """
457    A handler for logging to a file, which watches the file
458    to see if it has changed while in use. This can happen because of
459    usage of programs such as newsyslog and logrotate which perform
460    log file rotation. This handler, intended for use under Unix,
461    watches the file to see if it has changed since the last emit.
462    (A file has changed if its device or inode have changed.)
463    If it has changed, the old file stream is closed, and the file
464    opened to get a new stream.
465
466    This handler is not appropriate for use under Windows, because
467    under Windows open files cannot be moved or renamed - logging
468    opens the files with exclusive locks - and so there is no need
469    for such a handler. Furthermore, ST_INO is not supported under
470    Windows; stat always returns zero for this value.
471
472    This handler is based on a suggestion and patch by Chad J.
473    Schroeder.
474    """
475    def __init__(self, filename, mode='a', encoding=None, delay=False,
476                 errors=None):
477        if "b" not in mode:
478            encoding = io.text_encoding(encoding)
479        logging.FileHandler.__init__(self, filename, mode=mode,
480                                     encoding=encoding, delay=delay,
481                                     errors=errors)
482        self.dev, self.ino = -1, -1
483        self._statstream()
484
485    def _statstream(self):
486        if self.stream:
487            sres = os.fstat(self.stream.fileno())
488            self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
489
490    def reopenIfNeeded(self):
491        """
492        Reopen log file if needed.
493
494        Checks if the underlying file has changed, and if it
495        has, close the old stream and reopen the file to get the
496        current stream.
497        """
498        # Reduce the chance of race conditions by stat'ing by path only
499        # once and then fstat'ing our new fd if we opened a new log stream.
500        # See issue #14632: Thanks to John Mulligan for the problem report
501        # and patch.
502        try:
503            # stat the file by path, checking for existence
504            sres = os.stat(self.baseFilename)
505        except FileNotFoundError:
506            sres = None
507        # compare file system stat with that of our stream file handle
508        if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
509            if self.stream is not None:
510                # we have an open file handle, clean it up
511                self.stream.flush()
512                self.stream.close()
513                self.stream = None  # See Issue #21742: _open () might fail.
514                # open a new file handle and get new stat info from that fd
515                self.stream = self._open()
516                self._statstream()
517
518    def emit(self, record):
519        """
520        Emit a record.
521
522        If underlying file has changed, reopen the file before emitting the
523        record to it.
524        """
525        self.reopenIfNeeded()
526        logging.FileHandler.emit(self, record)
527
528
529class SocketHandler(logging.Handler):
530    """
531    A handler class which writes logging records, in pickle format, to
532    a streaming socket. The socket is kept open across logging calls.
533    If the peer resets it, an attempt is made to reconnect on the next call.
534    The pickle which is sent is that of the LogRecord's attribute dictionary
535    (__dict__), so that the receiver does not need to have the logging module
536    installed in order to process the logging event.
537
538    To unpickle the record at the receiving end into a LogRecord, use the
539    makeLogRecord function.
540    """
541
542    def __init__(self, host, port):
543        """
544        Initializes the handler with a specific host address and port.
545
546        When the attribute *closeOnError* is set to True - if a socket error
547        occurs, the socket is silently closed and then reopened on the next
548        logging call.
549        """
550        logging.Handler.__init__(self)
551        self.host = host
552        self.port = port
553        if port is None:
554            self.address = host
555        else:
556            self.address = (host, port)
557        self.sock = None
558        self.closeOnError = False
559        self.retryTime = None
560        #
561        # Exponential backoff parameters.
562        #
563        self.retryStart = 1.0
564        self.retryMax = 30.0
565        self.retryFactor = 2.0
566
567    def makeSocket(self, timeout=1):
568        """
569        A factory method which allows subclasses to define the precise
570        type of socket they want.
571        """
572        if self.port is not None:
573            result = socket.create_connection(self.address, timeout=timeout)
574        else:
575            result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
576            result.settimeout(timeout)
577            try:
578                result.connect(self.address)
579            except OSError:
580                result.close()  # Issue 19182
581                raise
582        return result
583
584    def createSocket(self):
585        """
586        Try to create a socket, using an exponential backoff with
587        a max retry time. Thanks to Robert Olson for the original patch
588        (SF #815911) which has been slightly refactored.
589        """
590        now = time.time()
591        # Either retryTime is None, in which case this
592        # is the first time back after a disconnect, or
593        # we've waited long enough.
594        if self.retryTime is None:
595            attempt = True
596        else:
597            attempt = (now >= self.retryTime)
598        if attempt:
599            try:
600                self.sock = self.makeSocket()
601                self.retryTime = None # next time, no delay before trying
602            except OSError:
603                #Creation failed, so set the retry time and return.
604                if self.retryTime is None:
605                    self.retryPeriod = self.retryStart
606                else:
607                    self.retryPeriod = self.retryPeriod * self.retryFactor
608                    if self.retryPeriod > self.retryMax:
609                        self.retryPeriod = self.retryMax
610                self.retryTime = now + self.retryPeriod
611
612    def send(self, s):
613        """
614        Send a pickled string to the socket.
615
616        This function allows for partial sends which can happen when the
617        network is busy.
618        """
619        if self.sock is None:
620            self.createSocket()
621        #self.sock can be None either because we haven't reached the retry
622        #time yet, or because we have reached the retry time and retried,
623        #but are still unable to connect.
624        if self.sock:
625            try:
626                self.sock.sendall(s)
627            except OSError: #pragma: no cover
628                self.sock.close()
629                self.sock = None  # so we can call createSocket next time
630
631    def makePickle(self, record):
632        """
633        Pickles the record in binary format with a length prefix, and
634        returns it ready for transmission across the socket.
635        """
636        ei = record.exc_info
637        if ei:
638            # just to get traceback text into record.exc_text ...
639            dummy = self.format(record)
640        # See issue #14436: If msg or args are objects, they may not be
641        # available on the receiving end. So we convert the msg % args
642        # to a string, save it as msg and zap the args.
643        d = dict(record.__dict__)
644        d['msg'] = record.getMessage()
645        d['args'] = None
646        d['exc_info'] = None
647        # Issue #25685: delete 'message' if present: redundant with 'msg'
648        d.pop('message', None)
649        s = pickle.dumps(d, 1)
650        slen = struct.pack(">L", len(s))
651        return slen + s
652
653    def handleError(self, record):
654        """
655        Handle an error during logging.
656
657        An error has occurred during logging. Most likely cause -
658        connection lost. Close the socket so that we can retry on the
659        next event.
660        """
661        if self.closeOnError and self.sock:
662            self.sock.close()
663            self.sock = None        #try to reconnect next time
664        else:
665            logging.Handler.handleError(self, record)
666
667    def emit(self, record):
668        """
669        Emit a record.
670
671        Pickles the record and writes it to the socket in binary format.
672        If there is an error with the socket, silently drop the packet.
673        If there was a problem with the socket, re-establishes the
674        socket.
675        """
676        try:
677            s = self.makePickle(record)
678            self.send(s)
679        except Exception:
680            self.handleError(record)
681
682    def close(self):
683        """
684        Closes the socket.
685        """
686        self.acquire()
687        try:
688            sock = self.sock
689            if sock:
690                self.sock = None
691                sock.close()
692            logging.Handler.close(self)
693        finally:
694            self.release()
695
696class DatagramHandler(SocketHandler):
697    """
698    A handler class which writes logging records, in pickle format, to
699    a datagram socket.  The pickle which is sent is that of the LogRecord's
700    attribute dictionary (__dict__), so that the receiver does not need to
701    have the logging module installed in order to process the logging event.
702
703    To unpickle the record at the receiving end into a LogRecord, use the
704    makeLogRecord function.
705
706    """
707    def __init__(self, host, port):
708        """
709        Initializes the handler with a specific host address and port.
710        """
711        SocketHandler.__init__(self, host, port)
712        self.closeOnError = False
713
714    def makeSocket(self):
715        """
716        The factory method of SocketHandler is here overridden to create
717        a UDP socket (SOCK_DGRAM).
718        """
719        if self.port is None:
720            family = socket.AF_UNIX
721        else:
722            family = socket.AF_INET
723        s = socket.socket(family, socket.SOCK_DGRAM)
724        return s
725
726    def send(self, s):
727        """
728        Send a pickled string to a socket.
729
730        This function no longer allows for partial sends which can happen
731        when the network is busy - UDP does not guarantee delivery and
732        can deliver packets out of sequence.
733        """
734        if self.sock is None:
735            self.createSocket()
736        self.sock.sendto(s, self.address)
737
738class SysLogHandler(logging.Handler):
739    """
740    A handler class which sends formatted logging records to a syslog
741    server. Based on Sam Rushing's syslog module:
742    http://www.nightmare.com/squirl/python-ext/misc/syslog.py
743    Contributed by Nicolas Untz (after which minor refactoring changes
744    have been made).
745    """
746
747    # from <linux/sys/syslog.h>:
748    # ======================================================================
749    # priorities/facilities are encoded into a single 32-bit quantity, where
750    # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
751    # facility (0-big number). Both the priorities and the facilities map
752    # roughly one-to-one to strings in the syslogd(8) source code.  This
753    # mapping is included in this file.
754    #
755    # priorities (these are ordered)
756
757    LOG_EMERG     = 0       #  system is unusable
758    LOG_ALERT     = 1       #  action must be taken immediately
759    LOG_CRIT      = 2       #  critical conditions
760    LOG_ERR       = 3       #  error conditions
761    LOG_WARNING   = 4       #  warning conditions
762    LOG_NOTICE    = 5       #  normal but significant condition
763    LOG_INFO      = 6       #  informational
764    LOG_DEBUG     = 7       #  debug-level messages
765
766    #  facility codes
767    LOG_KERN      = 0       #  kernel messages
768    LOG_USER      = 1       #  random user-level messages
769    LOG_MAIL      = 2       #  mail system
770    LOG_DAEMON    = 3       #  system daemons
771    LOG_AUTH      = 4       #  security/authorization messages
772    LOG_SYSLOG    = 5       #  messages generated internally by syslogd
773    LOG_LPR       = 6       #  line printer subsystem
774    LOG_NEWS      = 7       #  network news subsystem
775    LOG_UUCP      = 8       #  UUCP subsystem
776    LOG_CRON      = 9       #  clock daemon
777    LOG_AUTHPRIV  = 10      #  security/authorization messages (private)
778    LOG_FTP       = 11      #  FTP daemon
779    LOG_NTP       = 12      #  NTP subsystem
780    LOG_SECURITY  = 13      #  Log audit
781    LOG_CONSOLE   = 14      #  Log alert
782    LOG_SOLCRON   = 15      #  Scheduling daemon (Solaris)
783
784    #  other codes through 15 reserved for system use
785    LOG_LOCAL0    = 16      #  reserved for local use
786    LOG_LOCAL1    = 17      #  reserved for local use
787    LOG_LOCAL2    = 18      #  reserved for local use
788    LOG_LOCAL3    = 19      #  reserved for local use
789    LOG_LOCAL4    = 20      #  reserved for local use
790    LOG_LOCAL5    = 21      #  reserved for local use
791    LOG_LOCAL6    = 22      #  reserved for local use
792    LOG_LOCAL7    = 23      #  reserved for local use
793
794    priority_names = {
795        "alert":    LOG_ALERT,
796        "crit":     LOG_CRIT,
797        "critical": LOG_CRIT,
798        "debug":    LOG_DEBUG,
799        "emerg":    LOG_EMERG,
800        "err":      LOG_ERR,
801        "error":    LOG_ERR,        #  DEPRECATED
802        "info":     LOG_INFO,
803        "notice":   LOG_NOTICE,
804        "panic":    LOG_EMERG,      #  DEPRECATED
805        "warn":     LOG_WARNING,    #  DEPRECATED
806        "warning":  LOG_WARNING,
807        }
808
809    facility_names = {
810        "auth":         LOG_AUTH,
811        "authpriv":     LOG_AUTHPRIV,
812        "console":      LOG_CONSOLE,
813        "cron":         LOG_CRON,
814        "daemon":       LOG_DAEMON,
815        "ftp":          LOG_FTP,
816        "kern":         LOG_KERN,
817        "lpr":          LOG_LPR,
818        "mail":         LOG_MAIL,
819        "news":         LOG_NEWS,
820        "ntp":          LOG_NTP,
821        "security":     LOG_SECURITY,
822        "solaris-cron": LOG_SOLCRON,
823        "syslog":       LOG_SYSLOG,
824        "user":         LOG_USER,
825        "uucp":         LOG_UUCP,
826        "local0":       LOG_LOCAL0,
827        "local1":       LOG_LOCAL1,
828        "local2":       LOG_LOCAL2,
829        "local3":       LOG_LOCAL3,
830        "local4":       LOG_LOCAL4,
831        "local5":       LOG_LOCAL5,
832        "local6":       LOG_LOCAL6,
833        "local7":       LOG_LOCAL7,
834        }
835
836    #The map below appears to be trivially lowercasing the key. However,
837    #there's more to it than meets the eye - in some locales, lowercasing
838    #gives unexpected results. See SF #1524081: in the Turkish locale,
839    #"INFO".lower() != "info"
840    priority_map = {
841        "DEBUG" : "debug",
842        "INFO" : "info",
843        "WARNING" : "warning",
844        "ERROR" : "error",
845        "CRITICAL" : "critical"
846    }
847
848    def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
849                 facility=LOG_USER, socktype=None):
850        """
851        Initialize a handler.
852
853        If address is specified as a string, a UNIX socket is used. To log to a
854        local syslogd, "SysLogHandler(address="/dev/log")" can be used.
855        If facility is not specified, LOG_USER is used. If socktype is
856        specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
857        socket type will be used. For Unix sockets, you can also specify a
858        socktype of None, in which case socket.SOCK_DGRAM will be used, falling
859        back to socket.SOCK_STREAM.
860        """
861        logging.Handler.__init__(self)
862
863        self.address = address
864        self.facility = facility
865        self.socktype = socktype
866        self.socket = None
867        self.createSocket()
868
869    def _connect_unixsocket(self, address):
870        use_socktype = self.socktype
871        if use_socktype is None:
872            use_socktype = socket.SOCK_DGRAM
873        self.socket = socket.socket(socket.AF_UNIX, use_socktype)
874        try:
875            self.socket.connect(address)
876            # it worked, so set self.socktype to the used type
877            self.socktype = use_socktype
878        except OSError:
879            self.socket.close()
880            if self.socktype is not None:
881                # user didn't specify falling back, so fail
882                raise
883            use_socktype = socket.SOCK_STREAM
884            self.socket = socket.socket(socket.AF_UNIX, use_socktype)
885            try:
886                self.socket.connect(address)
887                # it worked, so set self.socktype to the used type
888                self.socktype = use_socktype
889            except OSError:
890                self.socket.close()
891                raise
892
893    def createSocket(self):
894        """
895        Try to create a socket and, if it's not a datagram socket, connect it
896        to the other end. This method is called during handler initialization,
897        but it's not regarded as an error if the other end isn't listening yet
898        --- the method will be called again when emitting an event,
899        if there is no socket at that point.
900        """
901        address = self.address
902        socktype = self.socktype
903
904        if isinstance(address, str):
905            self.unixsocket = True
906            # Syslog server may be unavailable during handler initialisation.
907            # C's openlog() function also ignores connection errors.
908            # Moreover, we ignore these errors while logging, so it's not worse
909            # to ignore it also here.
910            try:
911                self._connect_unixsocket(address)
912            except OSError:
913                pass
914        else:
915            self.unixsocket = False
916            if socktype is None:
917                socktype = socket.SOCK_DGRAM
918            host, port = address
919            ress = socket.getaddrinfo(host, port, 0, socktype)
920            if not ress:
921                raise OSError("getaddrinfo returns an empty list")
922            for res in ress:
923                af, socktype, proto, _, sa = res
924                err = sock = None
925                try:
926                    sock = socket.socket(af, socktype, proto)
927                    if socktype == socket.SOCK_STREAM:
928                        sock.connect(sa)
929                    break
930                except OSError as exc:
931                    err = exc
932                    if sock is not None:
933                        sock.close()
934            if err is not None:
935                raise err
936            self.socket = sock
937            self.socktype = socktype
938
939    def encodePriority(self, facility, priority):
940        """
941        Encode the facility and priority. You can pass in strings or
942        integers - if strings are passed, the facility_names and
943        priority_names mapping dictionaries are used to convert them to
944        integers.
945        """
946        if isinstance(facility, str):
947            facility = self.facility_names[facility]
948        if isinstance(priority, str):
949            priority = self.priority_names[priority]
950        return (facility << 3) | priority
951
952    def close(self):
953        """
954        Closes the socket.
955        """
956        self.acquire()
957        try:
958            sock = self.socket
959            if sock:
960                self.socket = None
961                sock.close()
962            logging.Handler.close(self)
963        finally:
964            self.release()
965
966    def mapPriority(self, levelName):
967        """
968        Map a logging level name to a key in the priority_names map.
969        This is useful in two scenarios: when custom levels are being
970        used, and in the case where you can't do a straightforward
971        mapping by lowercasing the logging level name because of locale-
972        specific issues (see SF #1524081).
973        """
974        return self.priority_map.get(levelName, "warning")
975
976    ident = ''          # prepended to all messages
977    append_nul = True   # some old syslog daemons expect a NUL terminator
978
979    def emit(self, record):
980        """
981        Emit a record.
982
983        The record is formatted, and then sent to the syslog server. If
984        exception information is present, it is NOT sent to the server.
985        """
986        try:
987            msg = self.format(record)
988            if self.ident:
989                msg = self.ident + msg
990            if self.append_nul:
991                msg += '\000'
992
993            # We need to convert record level to lowercase, maybe this will
994            # change in the future.
995            prio = '<%d>' % self.encodePriority(self.facility,
996                                                self.mapPriority(record.levelname))
997            prio = prio.encode('utf-8')
998            # Message is a string. Convert to bytes as required by RFC 5424
999            msg = msg.encode('utf-8')
1000            msg = prio + msg
1001
1002            if not self.socket:
1003                self.createSocket()
1004
1005            if self.unixsocket:
1006                try:
1007                    self.socket.send(msg)
1008                except OSError:
1009                    self.socket.close()
1010                    self._connect_unixsocket(self.address)
1011                    self.socket.send(msg)
1012            elif self.socktype == socket.SOCK_DGRAM:
1013                self.socket.sendto(msg, self.address)
1014            else:
1015                self.socket.sendall(msg)
1016        except Exception:
1017            self.handleError(record)
1018
1019class SMTPHandler(logging.Handler):
1020    """
1021    A handler class which sends an SMTP email for each logging event.
1022    """
1023    def __init__(self, mailhost, fromaddr, toaddrs, subject,
1024                 credentials=None, secure=None, timeout=5.0):
1025        """
1026        Initialize the handler.
1027
1028        Initialize the instance with the from and to addresses and subject
1029        line of the email. To specify a non-standard SMTP port, use the
1030        (host, port) tuple format for the mailhost argument. To specify
1031        authentication credentials, supply a (username, password) tuple
1032        for the credentials argument. To specify the use of a secure
1033        protocol (TLS), pass in a tuple for the secure argument. This will
1034        only be used when authentication credentials are supplied. The tuple
1035        will be either an empty tuple, or a single-value tuple with the name
1036        of a keyfile, or a 2-value tuple with the names of the keyfile and
1037        certificate file. (This tuple is passed to the `starttls` method).
1038        A timeout in seconds can be specified for the SMTP connection (the
1039        default is one second).
1040        """
1041        logging.Handler.__init__(self)
1042        if isinstance(mailhost, (list, tuple)):
1043            self.mailhost, self.mailport = mailhost
1044        else:
1045            self.mailhost, self.mailport = mailhost, None
1046        if isinstance(credentials, (list, tuple)):
1047            self.username, self.password = credentials
1048        else:
1049            self.username = None
1050        self.fromaddr = fromaddr
1051        if isinstance(toaddrs, str):
1052            toaddrs = [toaddrs]
1053        self.toaddrs = toaddrs
1054        self.subject = subject
1055        self.secure = secure
1056        self.timeout = timeout
1057
1058    def getSubject(self, record):
1059        """
1060        Determine the subject for the email.
1061
1062        If you want to specify a subject line which is record-dependent,
1063        override this method.
1064        """
1065        return self.subject
1066
1067    def emit(self, record):
1068        """
1069        Emit a record.
1070
1071        Format the record and send it to the specified addressees.
1072        """
1073        try:
1074            import smtplib
1075            from email.message import EmailMessage
1076            import email.utils
1077
1078            port = self.mailport
1079            if not port:
1080                port = smtplib.SMTP_PORT
1081            smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
1082            msg = EmailMessage()
1083            msg['From'] = self.fromaddr
1084            msg['To'] = ','.join(self.toaddrs)
1085            msg['Subject'] = self.getSubject(record)
1086            msg['Date'] = email.utils.localtime()
1087            msg.set_content(self.format(record))
1088            if self.username:
1089                if self.secure is not None:
1090                    smtp.ehlo()
1091                    smtp.starttls(*self.secure)
1092                    smtp.ehlo()
1093                smtp.login(self.username, self.password)
1094            smtp.send_message(msg)
1095            smtp.quit()
1096        except Exception:
1097            self.handleError(record)
1098
1099class NTEventLogHandler(logging.Handler):
1100    """
1101    A handler class which sends events to the NT Event Log. Adds a
1102    registry entry for the specified application name. If no dllname is
1103    provided, win32service.pyd (which contains some basic message
1104    placeholders) is used. Note that use of these placeholders will make
1105    your event logs big, as the entire message source is held in the log.
1106    If you want slimmer logs, you have to pass in the name of your own DLL
1107    which contains the message definitions you want to use in the event log.
1108    """
1109    def __init__(self, appname, dllname=None, logtype="Application"):
1110        logging.Handler.__init__(self)
1111        try:
1112            import win32evtlogutil, win32evtlog
1113            self.appname = appname
1114            self._welu = win32evtlogutil
1115            if not dllname:
1116                dllname = os.path.split(self._welu.__file__)
1117                dllname = os.path.split(dllname[0])
1118                dllname = os.path.join(dllname[0], r'win32service.pyd')
1119            self.dllname = dllname
1120            self.logtype = logtype
1121            # Administrative privileges are required to add a source to the registry.
1122            # This may not be available for a user that just wants to add to an
1123            # existing source - handle this specific case.
1124            try:
1125                self._welu.AddSourceToRegistry(appname, dllname, logtype)
1126            except Exception as e:
1127                # This will probably be a pywintypes.error. Only raise if it's not
1128                # an "access denied" error, else let it pass
1129                if getattr(e, 'winerror', None) != 5:  # not access denied
1130                    raise
1131            self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
1132            self.typemap = {
1133                logging.DEBUG   : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1134                logging.INFO    : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1135                logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
1136                logging.ERROR   : win32evtlog.EVENTLOG_ERROR_TYPE,
1137                logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
1138         }
1139        except ImportError:
1140            print("The Python Win32 extensions for NT (service, event "\
1141                        "logging) appear not to be available.")
1142            self._welu = None
1143
1144    def getMessageID(self, record):
1145        """
1146        Return the message ID for the event record. If you are using your
1147        own messages, you could do this by having the msg passed to the
1148        logger being an ID rather than a formatting string. Then, in here,
1149        you could use a dictionary lookup to get the message ID. This
1150        version returns 1, which is the base message ID in win32service.pyd.
1151        """
1152        return 1
1153
1154    def getEventCategory(self, record):
1155        """
1156        Return the event category for the record.
1157
1158        Override this if you want to specify your own categories. This version
1159        returns 0.
1160        """
1161        return 0
1162
1163    def getEventType(self, record):
1164        """
1165        Return the event type for the record.
1166
1167        Override this if you want to specify your own types. This version does
1168        a mapping using the handler's typemap attribute, which is set up in
1169        __init__() to a dictionary which contains mappings for DEBUG, INFO,
1170        WARNING, ERROR and CRITICAL. If you are using your own levels you will
1171        either need to override this method or place a suitable dictionary in
1172        the handler's typemap attribute.
1173        """
1174        return self.typemap.get(record.levelno, self.deftype)
1175
1176    def emit(self, record):
1177        """
1178        Emit a record.
1179
1180        Determine the message ID, event category and event type. Then
1181        log the message in the NT event log.
1182        """
1183        if self._welu:
1184            try:
1185                id = self.getMessageID(record)
1186                cat = self.getEventCategory(record)
1187                type = self.getEventType(record)
1188                msg = self.format(record)
1189                self._welu.ReportEvent(self.appname, id, cat, type, [msg])
1190            except Exception:
1191                self.handleError(record)
1192
1193    def close(self):
1194        """
1195        Clean up this handler.
1196
1197        You can remove the application name from the registry as a
1198        source of event log entries. However, if you do this, you will
1199        not be able to see the events as you intended in the Event Log
1200        Viewer - it needs to be able to access the registry to get the
1201        DLL name.
1202        """
1203        #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
1204        logging.Handler.close(self)
1205
1206class HTTPHandler(logging.Handler):
1207    """
1208    A class which sends records to a web server, using either GET or
1209    POST semantics.
1210    """
1211    def __init__(self, host, url, method="GET", secure=False, credentials=None,
1212                 context=None):
1213        """
1214        Initialize the instance with the host, the request URL, and the method
1215        ("GET" or "POST")
1216        """
1217        logging.Handler.__init__(self)
1218        method = method.upper()
1219        if method not in ["GET", "POST"]:
1220            raise ValueError("method must be GET or POST")
1221        if not secure and context is not None:
1222            raise ValueError("context parameter only makes sense "
1223                             "with secure=True")
1224        self.host = host
1225        self.url = url
1226        self.method = method
1227        self.secure = secure
1228        self.credentials = credentials
1229        self.context = context
1230
1231    def mapLogRecord(self, record):
1232        """
1233        Default implementation of mapping the log record into a dict
1234        that is sent as the CGI data. Overwrite in your class.
1235        Contributed by Franz Glasner.
1236        """
1237        return record.__dict__
1238
1239    def getConnection(self, host, secure):
1240        """
1241        get a HTTP[S]Connection.
1242
1243        Override when a custom connection is required, for example if
1244        there is a proxy.
1245        """
1246        import http.client
1247        if secure:
1248            connection = http.client.HTTPSConnection(host, context=self.context)
1249        else:
1250            connection = http.client.HTTPConnection(host)
1251        return connection
1252
1253    def emit(self, record):
1254        """
1255        Emit a record.
1256
1257        Send the record to the web server as a percent-encoded dictionary
1258        """
1259        try:
1260            import urllib.parse
1261            host = self.host
1262            h = self.getConnection(host, self.secure)
1263            url = self.url
1264            data = urllib.parse.urlencode(self.mapLogRecord(record))
1265            if self.method == "GET":
1266                if (url.find('?') >= 0):
1267                    sep = '&'
1268                else:
1269                    sep = '?'
1270                url = url + "%c%s" % (sep, data)
1271            h.putrequest(self.method, url)
1272            # support multiple hosts on one IP address...
1273            # need to strip optional :port from host, if present
1274            i = host.find(":")
1275            if i >= 0:
1276                host = host[:i]
1277            # See issue #30904: putrequest call above already adds this header
1278            # on Python 3.x.
1279            # h.putheader("Host", host)
1280            if self.method == "POST":
1281                h.putheader("Content-type",
1282                            "application/x-www-form-urlencoded")
1283                h.putheader("Content-length", str(len(data)))
1284            if self.credentials:
1285                import base64
1286                s = ('%s:%s' % self.credentials).encode('utf-8')
1287                s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
1288                h.putheader('Authorization', s)
1289            h.endheaders()
1290            if self.method == "POST":
1291                h.send(data.encode('utf-8'))
1292            h.getresponse()    #can't do anything with the result
1293        except Exception:
1294            self.handleError(record)
1295
1296class BufferingHandler(logging.Handler):
1297    """
1298  A handler class which buffers logging records in memory. Whenever each
1299  record is added to the buffer, a check is made to see if the buffer should
1300  be flushed. If it should, then flush() is expected to do what's needed.
1301    """
1302    def __init__(self, capacity):
1303        """
1304        Initialize the handler with the buffer size.
1305        """
1306        logging.Handler.__init__(self)
1307        self.capacity = capacity
1308        self.buffer = []
1309
1310    def shouldFlush(self, record):
1311        """
1312        Should the handler flush its buffer?
1313
1314        Returns true if the buffer is up to capacity. This method can be
1315        overridden to implement custom flushing strategies.
1316        """
1317        return (len(self.buffer) >= self.capacity)
1318
1319    def emit(self, record):
1320        """
1321        Emit a record.
1322
1323        Append the record. If shouldFlush() tells us to, call flush() to process
1324        the buffer.
1325        """
1326        self.buffer.append(record)
1327        if self.shouldFlush(record):
1328            self.flush()
1329
1330    def flush(self):
1331        """
1332        Override to implement custom flushing behaviour.
1333
1334        This version just zaps the buffer to empty.
1335        """
1336        self.acquire()
1337        try:
1338            self.buffer.clear()
1339        finally:
1340            self.release()
1341
1342    def close(self):
1343        """
1344        Close the handler.
1345
1346        This version just flushes and chains to the parent class' close().
1347        """
1348        try:
1349            self.flush()
1350        finally:
1351            logging.Handler.close(self)
1352
1353class MemoryHandler(BufferingHandler):
1354    """
1355    A handler class which buffers logging records in memory, periodically
1356    flushing them to a target handler. Flushing occurs whenever the buffer
1357    is full, or when an event of a certain severity or greater is seen.
1358    """
1359    def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
1360                 flushOnClose=True):
1361        """
1362        Initialize the handler with the buffer size, the level at which
1363        flushing should occur and an optional target.
1364
1365        Note that without a target being set either here or via setTarget(),
1366        a MemoryHandler is no use to anyone!
1367
1368        The ``flushOnClose`` argument is ``True`` for backward compatibility
1369        reasons - the old behaviour is that when the handler is closed, the
1370        buffer is flushed, even if the flush level hasn't been exceeded nor the
1371        capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
1372        """
1373        BufferingHandler.__init__(self, capacity)
1374        self.flushLevel = flushLevel
1375        self.target = target
1376        # See Issue #26559 for why this has been added
1377        self.flushOnClose = flushOnClose
1378
1379    def shouldFlush(self, record):
1380        """
1381        Check for buffer full or a record at the flushLevel or higher.
1382        """
1383        return (len(self.buffer) >= self.capacity) or \
1384                (record.levelno >= self.flushLevel)
1385
1386    def setTarget(self, target):
1387        """
1388        Set the target handler for this handler.
1389        """
1390        self.acquire()
1391        try:
1392            self.target = target
1393        finally:
1394            self.release()
1395
1396    def flush(self):
1397        """
1398        For a MemoryHandler, flushing means just sending the buffered
1399        records to the target, if there is one. Override if you want
1400        different behaviour.
1401
1402        The record buffer is also cleared by this operation.
1403        """
1404        self.acquire()
1405        try:
1406            if self.target:
1407                for record in self.buffer:
1408                    self.target.handle(record)
1409                self.buffer.clear()
1410        finally:
1411            self.release()
1412
1413    def close(self):
1414        """
1415        Flush, if appropriately configured, set the target to None and lose the
1416        buffer.
1417        """
1418        try:
1419            if self.flushOnClose:
1420                self.flush()
1421        finally:
1422            self.acquire()
1423            try:
1424                self.target = None
1425                BufferingHandler.close(self)
1426            finally:
1427                self.release()
1428
1429
1430class QueueHandler(logging.Handler):
1431    """
1432    This handler sends events to a queue. Typically, it would be used together
1433    with a multiprocessing Queue to centralise logging to file in one process
1434    (in a multi-process application), so as to avoid file write contention
1435    between processes.
1436
1437    This code is new in Python 3.2, but this class can be copy pasted into
1438    user code for use with earlier Python versions.
1439    """
1440
1441    def __init__(self, queue):
1442        """
1443        Initialise an instance, using the passed queue.
1444        """
1445        logging.Handler.__init__(self)
1446        self.queue = queue
1447
1448    def enqueue(self, record):
1449        """
1450        Enqueue a record.
1451
1452        The base implementation uses put_nowait. You may want to override
1453        this method if you want to use blocking, timeouts or custom queue
1454        implementations.
1455        """
1456        self.queue.put_nowait(record)
1457
1458    def prepare(self, record):
1459        """
1460        Prepare a record for queuing. The object returned by this method is
1461        enqueued.
1462
1463        The base implementation formats the record to merge the message and
1464        arguments, and removes unpickleable items from the record in-place.
1465        Specifically, it overwrites the record's `msg` and
1466        `message` attributes with the merged message (obtained by
1467        calling the handler's `format` method), and sets the `args`,
1468        `exc_info` and `exc_text` attributes to None.
1469
1470        You might want to override this method if you want to convert
1471        the record to a dict or JSON string, or send a modified copy
1472        of the record while leaving the original intact.
1473        """
1474        # The format operation gets traceback text into record.exc_text
1475        # (if there's exception data), and also returns the formatted
1476        # message. We can then use this to replace the original
1477        # msg + args, as these might be unpickleable. We also zap the
1478        # exc_info, exc_text and stack_info attributes, as they are no longer
1479        # needed and, if not None, will typically not be pickleable.
1480        msg = self.format(record)
1481        # bpo-35726: make copy of record to avoid affecting other handlers in the chain.
1482        record = copy.copy(record)
1483        record.message = msg
1484        record.msg = msg
1485        record.args = None
1486        record.exc_info = None
1487        record.exc_text = None
1488        record.stack_info = None
1489        return record
1490
1491    def emit(self, record):
1492        """
1493        Emit a record.
1494
1495        Writes the LogRecord to the queue, preparing it for pickling first.
1496        """
1497        try:
1498            self.enqueue(self.prepare(record))
1499        except Exception:
1500            self.handleError(record)
1501
1502
1503class QueueListener(object):
1504    """
1505    This class implements an internal threaded listener which watches for
1506    LogRecords being added to a queue, removes them and passes them to a
1507    list of handlers for processing.
1508    """
1509    _sentinel = None
1510
1511    def __init__(self, queue, *handlers, respect_handler_level=False):
1512        """
1513        Initialise an instance with the specified queue and
1514        handlers.
1515        """
1516        self.queue = queue
1517        self.handlers = handlers
1518        self._thread = None
1519        self.respect_handler_level = respect_handler_level
1520
1521    def dequeue(self, block):
1522        """
1523        Dequeue a record and return it, optionally blocking.
1524
1525        The base implementation uses get. You may want to override this method
1526        if you want to use timeouts or work with custom queue implementations.
1527        """
1528        return self.queue.get(block)
1529
1530    def start(self):
1531        """
1532        Start the listener.
1533
1534        This starts up a background thread to monitor the queue for
1535        LogRecords to process.
1536        """
1537        self._thread = t = threading.Thread(target=self._monitor)
1538        t.daemon = True
1539        t.start()
1540
1541    def prepare(self, record):
1542        """
1543        Prepare a record for handling.
1544
1545        This method just returns the passed-in record. You may want to
1546        override this method if you need to do any custom marshalling or
1547        manipulation of the record before passing it to the handlers.
1548        """
1549        return record
1550
1551    def handle(self, record):
1552        """
1553        Handle a record.
1554
1555        This just loops through the handlers offering them the record
1556        to handle.
1557        """
1558        record = self.prepare(record)
1559        for handler in self.handlers:
1560            if not self.respect_handler_level:
1561                process = True
1562            else:
1563                process = record.levelno >= handler.level
1564            if process:
1565                handler.handle(record)
1566
1567    def _monitor(self):
1568        """
1569        Monitor the queue for records, and ask the handler
1570        to deal with them.
1571
1572        This method runs on a separate, internal thread.
1573        The thread will terminate if it sees a sentinel object in the queue.
1574        """
1575        q = self.queue
1576        has_task_done = hasattr(q, 'task_done')
1577        while True:
1578            try:
1579                record = self.dequeue(True)
1580                if record is self._sentinel:
1581                    if has_task_done:
1582                        q.task_done()
1583                    break
1584                self.handle(record)
1585                if has_task_done:
1586                    q.task_done()
1587            except queue.Empty:
1588                break
1589
1590    def enqueue_sentinel(self):
1591        """
1592        This is used to enqueue the sentinel record.
1593
1594        The base implementation uses put_nowait. You may want to override this
1595        method if you want to use timeouts or work with custom queue
1596        implementations.
1597        """
1598        self.queue.put_nowait(self._sentinel)
1599
1600    def stop(self):
1601        """
1602        Stop the listener.
1603
1604        This asks the thread to terminate, and then waits for it to do so.
1605        Note that if you don't call this before your application exits, there
1606        may be some records still left on the queue, which won't be processed.
1607        """
1608        self.enqueue_sentinel()
1609        self._thread.join()
1610        self._thread = None
1611