1# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
2#
3# Permission to use, copy, modify, and distribute this software and its
4# documentation for any purpose and without fee is hereby granted,
5# provided that the above copyright notice appear in all copies and that
6# both that copyright notice and this permission notice appear in
7# supporting documentation, and that the name of Vinay Sajip
8# not be used in advertising or publicity pertaining to distribution
9# of the software without specific, written prior permission.
10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
17"""
18Additional handlers for the logging package for Python. The core package is
19based on PEP 282 and comments thereto in comp.lang.python.
20
21Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
22
23To use, simply 'import logging.handlers' and log away!
24"""
25
26import logging, socket, os, pickle, struct, time, re
27from stat import ST_DEV, ST_INO, ST_MTIME
28import queue
29import threading
30
31#
32# Some constants...
33#
34
35DEFAULT_TCP_LOGGING_PORT    = 9020
36DEFAULT_UDP_LOGGING_PORT    = 9021
37DEFAULT_HTTP_LOGGING_PORT   = 9022
38DEFAULT_SOAP_LOGGING_PORT   = 9023
39SYSLOG_UDP_PORT             = 514
40SYSLOG_TCP_PORT             = 514
41
42_MIDNIGHT = 24 * 60 * 60  # number of seconds in a day
43
44class BaseRotatingHandler(logging.FileHandler):
45    """
46    Base class for handlers that rotate log files at a certain point.
47    Not meant to be instantiated directly.  Instead, use RotatingFileHandler
48    or TimedRotatingFileHandler.
49    """
50    def __init__(self, filename, mode, encoding=None, delay=False):
51        """
52        Use the specified filename for streamed logging
53        """
54        logging.FileHandler.__init__(self, filename, mode, encoding, delay)
55        self.mode = mode
56        self.encoding = encoding
57        self.namer = None
58        self.rotator = None
59
60    def emit(self, record):
61        """
62        Emit a record.
63
64        Output the record to the file, catering for rollover as described
65        in doRollover().
66        """
67        try:
68            if self.shouldRollover(record):
69                self.doRollover()
70            logging.FileHandler.emit(self, record)
71        except Exception:
72            self.handleError(record)
73
74    def rotation_filename(self, default_name):
75        """
76        Modify the filename of a log file when rotating.
77
78        This is provided so that a custom filename can be provided.
79
80        The default implementation calls the 'namer' attribute of the
81        handler, if it's callable, passing the default name to
82        it. If the attribute isn't callable (the default is None), the name
83        is returned unchanged.
84
85        :param default_name: The default name for the log file.
86        """
87        if not callable(self.namer):
88            result = default_name
89        else:
90            result = self.namer(default_name)
91        return result
92
93    def rotate(self, source, dest):
94        """
95        When rotating, rotate the current log.
96
97        The default implementation calls the 'rotator' attribute of the
98        handler, if it's callable, passing the source and dest arguments to
99        it. If the attribute isn't callable (the default is None), the source
100        is simply renamed to the destination.
101
102        :param source: The source filename. This is normally the base
103                       filename, e.g. 'test.log'
104        :param dest:   The destination filename. This is normally
105                       what the source is rotated to, e.g. 'test.log.1'.
106        """
107        if not callable(self.rotator):
108            # Issue 18940: A file may not have been created if delay is True.
109            if os.path.exists(source):
110                os.rename(source, dest)
111        else:
112            self.rotator(source, dest)
113
114class RotatingFileHandler(BaseRotatingHandler):
115    """
116    Handler for logging to a set of files, which switches from one file
117    to the next when the current file reaches a certain size.
118    """
119    def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
120        """
121        Open the specified file and use it as the stream for logging.
122
123        By default, the file grows indefinitely. You can specify particular
124        values of maxBytes and backupCount to allow the file to rollover at
125        a predetermined size.
126
127        Rollover occurs whenever the current log file is nearly maxBytes in
128        length. If backupCount is >= 1, the system will successively create
129        new files with the same pathname as the base file, but with extensions
130        ".1", ".2" etc. appended to it. For example, with a backupCount of 5
131        and a base file name of "app.log", you would get "app.log",
132        "app.log.1", "app.log.2", ... through to "app.log.5". The file being
133        written to is always "app.log" - when it gets filled up, it is closed
134        and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
135        exist, then they are renamed to "app.log.2", "app.log.3" etc.
136        respectively.
137
138        If maxBytes is zero, rollover never occurs.
139        """
140        # If rotation/rollover is wanted, it doesn't make sense to use another
141        # mode. If for example 'w' were specified, then if there were multiple
142        # runs of the calling application, the logs from previous runs would be
143        # lost if the 'w' is respected, because the log file would be truncated
144        # on each run.
145        if maxBytes > 0:
146            mode = 'a'
147        BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
148        self.maxBytes = maxBytes
149        self.backupCount = backupCount
150
151    def doRollover(self):
152        """
153        Do a rollover, as described in __init__().
154        """
155        if self.stream:
156            self.stream.close()
157            self.stream = None
158        if self.backupCount > 0:
159            for i in range(self.backupCount - 1, 0, -1):
160                sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
161                dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
162                                                        i + 1))
163                if os.path.exists(sfn):
164                    if os.path.exists(dfn):
165                        os.remove(dfn)
166                    os.rename(sfn, dfn)
167            dfn = self.rotation_filename(self.baseFilename + ".1")
168            if os.path.exists(dfn):
169                os.remove(dfn)
170            self.rotate(self.baseFilename, dfn)
171        if not self.delay:
172            self.stream = self._open()
173
174    def shouldRollover(self, record):
175        """
176        Determine if rollover should occur.
177
178        Basically, see if the supplied record would cause the file to exceed
179        the size limit we have.
180        """
181        if self.stream is None:                 # delay was set...
182            self.stream = self._open()
183        if self.maxBytes > 0:                   # are we rolling over?
184            msg = "%s\n" % self.format(record)
185            self.stream.seek(0, 2)  #due to non-posix-compliant Windows feature
186            if self.stream.tell() + len(msg) >= self.maxBytes:
187                return 1
188        return 0
189
190class TimedRotatingFileHandler(BaseRotatingHandler):
191    """
192    Handler for logging to a file, rotating the log file at certain timed
193    intervals.
194
195    If backupCount is > 0, when rollover is done, no more than backupCount
196    files are kept - the oldest ones are deleted.
197    """
198    def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
199        BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
200        self.when = when.upper()
201        self.backupCount = backupCount
202        self.utc = utc
203        self.atTime = atTime
204        # Calculate the real rollover interval, which is just the number of
205        # seconds between rollovers.  Also set the filename suffix used when
206        # a rollover occurs.  Current 'when' events supported:
207        # S - Seconds
208        # M - Minutes
209        # H - Hours
210        # D - Days
211        # midnight - roll over at midnight
212        # W{0-6} - roll over on a certain day; 0 - Monday
213        #
214        # Case of the 'when' specifier is not important; lower or upper case
215        # will work.
216        if self.when == 'S':
217            self.interval = 1 # one second
218            self.suffix = "%Y-%m-%d_%H-%M-%S"
219            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
220        elif self.when == 'M':
221            self.interval = 60 # one minute
222            self.suffix = "%Y-%m-%d_%H-%M"
223            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
224        elif self.when == 'H':
225            self.interval = 60 * 60 # one hour
226            self.suffix = "%Y-%m-%d_%H"
227            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
228        elif self.when == 'D' or self.when == 'MIDNIGHT':
229            self.interval = 60 * 60 * 24 # one day
230            self.suffix = "%Y-%m-%d"
231            self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
232        elif self.when.startswith('W'):
233            self.interval = 60 * 60 * 24 * 7 # one week
234            if len(self.when) != 2:
235                raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
236            if self.when[1] < '0' or self.when[1] > '6':
237                raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
238            self.dayOfWeek = int(self.when[1])
239            self.suffix = "%Y-%m-%d"
240            self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
241        else:
242            raise ValueError("Invalid rollover interval specified: %s" % self.when)
243
244        self.extMatch = re.compile(self.extMatch, re.ASCII)
245        self.interval = self.interval * interval # multiply by units requested
246        # The following line added because the filename passed in could be a
247        # path object (see Issue #27493), but self.baseFilename will be a string
248        filename = self.baseFilename
249        if os.path.exists(filename):
250            t = os.stat(filename)[ST_MTIME]
251        else:
252            t = int(time.time())
253        self.rolloverAt = self.computeRollover(t)
254
255    def computeRollover(self, currentTime):
256        """
257        Work out the rollover time based on the specified time.
258        """
259        result = currentTime + self.interval
260        # If we are rolling over at midnight or weekly, then the interval is already known.
261        # What we need to figure out is WHEN the next interval is.  In other words,
262        # if you are rolling over at midnight, then your base interval is 1 day,
263        # but you want to start that one day clock at midnight, not now.  So, we
264        # have to fudge the rolloverAt value in order to trigger the first rollover
265        # at the right time.  After that, the regular interval will take care of
266        # the rest.  Note that this code doesn't care about leap seconds. :)
267        if self.when == 'MIDNIGHT' or self.when.startswith('W'):
268            # This could be done with less code, but I wanted it to be clear
269            if self.utc:
270                t = time.gmtime(currentTime)
271            else:
272                t = time.localtime(currentTime)
273            currentHour = t[3]
274            currentMinute = t[4]
275            currentSecond = t[5]
276            currentDay = t[6]
277            # r is the number of seconds left between now and the next rotation
278            if self.atTime is None:
279                rotate_ts = _MIDNIGHT
280            else:
281                rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
282                    self.atTime.second)
283
284            r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
285                currentSecond)
286            if r < 0:
287                # Rotate time is before the current time (for example when
288                # self.rotateAt is 13:45 and it now 14:15), rotation is
289                # tomorrow.
290                r += _MIDNIGHT
291                currentDay = (currentDay + 1) % 7
292            result = currentTime + r
293            # If we are rolling over on a certain day, add in the number of days until
294            # the next rollover, but offset by 1 since we just calculated the time
295            # until the next day starts.  There are three cases:
296            # Case 1) The day to rollover is today; in this case, do nothing
297            # Case 2) The day to rollover is further in the interval (i.e., today is
298            #         day 2 (Wednesday) and rollover is on day 6 (Sunday).  Days to
299            #         next rollover is simply 6 - 2 - 1, or 3.
300            # Case 3) The day to rollover is behind us in the interval (i.e., today
301            #         is day 5 (Saturday) and rollover is on day 3 (Thursday).
302            #         Days to rollover is 6 - 5 + 3, or 4.  In this case, it's the
303            #         number of days left in the current week (1) plus the number
304            #         of days in the next week until the rollover day (3).
305            # The calculations described in 2) and 3) above need to have a day added.
306            # This is because the above time calculation takes us to midnight on this
307            # day, i.e. the start of the next day.
308            if self.when.startswith('W'):
309                day = currentDay # 0 is Monday
310                if day != self.dayOfWeek:
311                    if day < self.dayOfWeek:
312                        daysToWait = self.dayOfWeek - day
313                    else:
314                        daysToWait = 6 - day + self.dayOfWeek + 1
315                    newRolloverAt = result + (daysToWait * (60 * 60 * 24))
316                    if not self.utc:
317                        dstNow = t[-1]
318                        dstAtRollover = time.localtime(newRolloverAt)[-1]
319                        if dstNow != dstAtRollover:
320                            if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
321                                addend = -3600
322                            else:           # DST bows out before next rollover, so we need to add an hour
323                                addend = 3600
324                            newRolloverAt += addend
325                    result = newRolloverAt
326        return result
327
328    def shouldRollover(self, record):
329        """
330        Determine if rollover should occur.
331
332        record is not used, as we are just comparing times, but it is needed so
333        the method signatures are the same
334        """
335        t = int(time.time())
336        if t >= self.rolloverAt:
337            return 1
338        return 0
339
340    def getFilesToDelete(self):
341        """
342        Determine the files to delete when rolling over.
343
344        More specific than the earlier method, which just used glob.glob().
345        """
346        dirName, baseName = os.path.split(self.baseFilename)
347        fileNames = os.listdir(dirName)
348        result = []
349        prefix = baseName + "."
350        plen = len(prefix)
351        for fileName in fileNames:
352            if fileName[:plen] == prefix:
353                suffix = fileName[plen:]
354                if self.extMatch.match(suffix):
355                    result.append(os.path.join(dirName, fileName))
356        if len(result) < self.backupCount:
357            result = []
358        else:
359            result.sort()
360            result = result[:len(result) - self.backupCount]
361        return result
362
363    def doRollover(self):
364        """
365        do a rollover; in this case, a date/time stamp is appended to the filename
366        when the rollover happens.  However, you want the file to be named for the
367        start of the interval, not the current time.  If there is a backup count,
368        then we have to get a list of matching filenames, sort them and remove
369        the one with the oldest suffix.
370        """
371        if self.stream:
372            self.stream.close()
373            self.stream = None
374        # get the time that this sequence started at and make it a TimeTuple
375        currentTime = int(time.time())
376        dstNow = time.localtime(currentTime)[-1]
377        t = self.rolloverAt - self.interval
378        if self.utc:
379            timeTuple = time.gmtime(t)
380        else:
381            timeTuple = time.localtime(t)
382            dstThen = timeTuple[-1]
383            if dstNow != dstThen:
384                if dstNow:
385                    addend = 3600
386                else:
387                    addend = -3600
388                timeTuple = time.localtime(t + addend)
389        dfn = self.rotation_filename(self.baseFilename + "." +
390                                     time.strftime(self.suffix, timeTuple))
391        if os.path.exists(dfn):
392            os.remove(dfn)
393        self.rotate(self.baseFilename, dfn)
394        if self.backupCount > 0:
395            for s in self.getFilesToDelete():
396                os.remove(s)
397        if not self.delay:
398            self.stream = self._open()
399        newRolloverAt = self.computeRollover(currentTime)
400        while newRolloverAt <= currentTime:
401            newRolloverAt = newRolloverAt + self.interval
402        #If DST changes and midnight or weekly rollover, adjust for this.
403        if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
404            dstAtRollover = time.localtime(newRolloverAt)[-1]
405            if dstNow != dstAtRollover:
406                if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
407                    addend = -3600
408                else:           # DST bows out before next rollover, so we need to add an hour
409                    addend = 3600
410                newRolloverAt += addend
411        self.rolloverAt = newRolloverAt
412
413class WatchedFileHandler(logging.FileHandler):
414    """
415    A handler for logging to a file, which watches the file
416    to see if it has changed while in use. This can happen because of
417    usage of programs such as newsyslog and logrotate which perform
418    log file rotation. This handler, intended for use under Unix,
419    watches the file to see if it has changed since the last emit.
420    (A file has changed if its device or inode have changed.)
421    If it has changed, the old file stream is closed, and the file
422    opened to get a new stream.
423
424    This handler is not appropriate for use under Windows, because
425    under Windows open files cannot be moved or renamed - logging
426    opens the files with exclusive locks - and so there is no need
427    for such a handler. Furthermore, ST_INO is not supported under
428    Windows; stat always returns zero for this value.
429
430    This handler is based on a suggestion and patch by Chad J.
431    Schroeder.
432    """
433    def __init__(self, filename, mode='a', encoding=None, delay=False):
434        logging.FileHandler.__init__(self, filename, mode, encoding, delay)
435        self.dev, self.ino = -1, -1
436        self._statstream()
437
438    def _statstream(self):
439        if self.stream:
440            sres = os.fstat(self.stream.fileno())
441            self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
442
443    def reopenIfNeeded(self):
444        """
445        Reopen log file if needed.
446
447        Checks if the underlying file has changed, and if it
448        has, close the old stream and reopen the file to get the
449        current stream.
450        """
451        # Reduce the chance of race conditions by stat'ing by path only
452        # once and then fstat'ing our new fd if we opened a new log stream.
453        # See issue #14632: Thanks to John Mulligan for the problem report
454        # and patch.
455        try:
456            # stat the file by path, checking for existence
457            sres = os.stat(self.baseFilename)
458        except FileNotFoundError:
459            sres = None
460        # compare file system stat with that of our stream file handle
461        if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
462            if self.stream is not None:
463                # we have an open file handle, clean it up
464                self.stream.flush()
465                self.stream.close()
466                self.stream = None  # See Issue #21742: _open () might fail.
467                # open a new file handle and get new stat info from that fd
468                self.stream = self._open()
469                self._statstream()
470
471    def emit(self, record):
472        """
473        Emit a record.
474
475        If underlying file has changed, reopen the file before emitting the
476        record to it.
477        """
478        self.reopenIfNeeded()
479        logging.FileHandler.emit(self, record)
480
481
482class SocketHandler(logging.Handler):
483    """
484    A handler class which writes logging records, in pickle format, to
485    a streaming socket. The socket is kept open across logging calls.
486    If the peer resets it, an attempt is made to reconnect on the next call.
487    The pickle which is sent is that of the LogRecord's attribute dictionary
488    (__dict__), so that the receiver does not need to have the logging module
489    installed in order to process the logging event.
490
491    To unpickle the record at the receiving end into a LogRecord, use the
492    makeLogRecord function.
493    """
494
495    def __init__(self, host, port):
496        """
497        Initializes the handler with a specific host address and port.
498
499        When the attribute *closeOnError* is set to True - if a socket error
500        occurs, the socket is silently closed and then reopened on the next
501        logging call.
502        """
503        logging.Handler.__init__(self)
504        self.host = host
505        self.port = port
506        if port is None:
507            self.address = host
508        else:
509            self.address = (host, port)
510        self.sock = None
511        self.closeOnError = False
512        self.retryTime = None
513        #
514        # Exponential backoff parameters.
515        #
516        self.retryStart = 1.0
517        self.retryMax = 30.0
518        self.retryFactor = 2.0
519
520    def makeSocket(self, timeout=1):
521        """
522        A factory method which allows subclasses to define the precise
523        type of socket they want.
524        """
525        if self.port is not None:
526            result = socket.create_connection(self.address, timeout=timeout)
527        else:
528            result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
529            result.settimeout(timeout)
530            try:
531                result.connect(self.address)
532            except OSError:
533                result.close()  # Issue 19182
534                raise
535        return result
536
537    def createSocket(self):
538        """
539        Try to create a socket, using an exponential backoff with
540        a max retry time. Thanks to Robert Olson for the original patch
541        (SF #815911) which has been slightly refactored.
542        """
543        now = time.time()
544        # Either retryTime is None, in which case this
545        # is the first time back after a disconnect, or
546        # we've waited long enough.
547        if self.retryTime is None:
548            attempt = True
549        else:
550            attempt = (now >= self.retryTime)
551        if attempt:
552            try:
553                self.sock = self.makeSocket()
554                self.retryTime = None # next time, no delay before trying
555            except OSError:
556                #Creation failed, so set the retry time and return.
557                if self.retryTime is None:
558                    self.retryPeriod = self.retryStart
559                else:
560                    self.retryPeriod = self.retryPeriod * self.retryFactor
561                    if self.retryPeriod > self.retryMax:
562                        self.retryPeriod = self.retryMax
563                self.retryTime = now + self.retryPeriod
564
565    def send(self, s):
566        """
567        Send a pickled string to the socket.
568
569        This function allows for partial sends which can happen when the
570        network is busy.
571        """
572        if self.sock is None:
573            self.createSocket()
574        #self.sock can be None either because we haven't reached the retry
575        #time yet, or because we have reached the retry time and retried,
576        #but are still unable to connect.
577        if self.sock:
578            try:
579                self.sock.sendall(s)
580            except OSError: #pragma: no cover
581                self.sock.close()
582                self.sock = None  # so we can call createSocket next time
583
584    def makePickle(self, record):
585        """
586        Pickles the record in binary format with a length prefix, and
587        returns it ready for transmission across the socket.
588        """
589        ei = record.exc_info
590        if ei:
591            # just to get traceback text into record.exc_text ...
592            dummy = self.format(record)
593        # See issue #14436: If msg or args are objects, they may not be
594        # available on the receiving end. So we convert the msg % args
595        # to a string, save it as msg and zap the args.
596        d = dict(record.__dict__)
597        d['msg'] = record.getMessage()
598        d['args'] = None
599        d['exc_info'] = None
600        # Issue #25685: delete 'message' if present: redundant with 'msg'
601        d.pop('message', None)
602        s = pickle.dumps(d, 1)
603        slen = struct.pack(">L", len(s))
604        return slen + s
605
606    def handleError(self, record):
607        """
608        Handle an error during logging.
609
610        An error has occurred during logging. Most likely cause -
611        connection lost. Close the socket so that we can retry on the
612        next event.
613        """
614        if self.closeOnError and self.sock:
615            self.sock.close()
616            self.sock = None        #try to reconnect next time
617        else:
618            logging.Handler.handleError(self, record)
619
620    def emit(self, record):
621        """
622        Emit a record.
623
624        Pickles the record and writes it to the socket in binary format.
625        If there is an error with the socket, silently drop the packet.
626        If there was a problem with the socket, re-establishes the
627        socket.
628        """
629        try:
630            s = self.makePickle(record)
631            self.send(s)
632        except Exception:
633            self.handleError(record)
634
635    def close(self):
636        """
637        Closes the socket.
638        """
639        self.acquire()
640        try:
641            sock = self.sock
642            if sock:
643                self.sock = None
644                sock.close()
645            logging.Handler.close(self)
646        finally:
647            self.release()
648
649class DatagramHandler(SocketHandler):
650    """
651    A handler class which writes logging records, in pickle format, to
652    a datagram socket.  The pickle which is sent is that of the LogRecord's
653    attribute dictionary (__dict__), so that the receiver does not need to
654    have the logging module installed in order to process the logging event.
655
656    To unpickle the record at the receiving end into a LogRecord, use the
657    makeLogRecord function.
658
659    """
660    def __init__(self, host, port):
661        """
662        Initializes the handler with a specific host address and port.
663        """
664        SocketHandler.__init__(self, host, port)
665        self.closeOnError = False
666
667    def makeSocket(self):
668        """
669        The factory method of SocketHandler is here overridden to create
670        a UDP socket (SOCK_DGRAM).
671        """
672        if self.port is None:
673            family = socket.AF_UNIX
674        else:
675            family = socket.AF_INET
676        s = socket.socket(family, socket.SOCK_DGRAM)
677        return s
678
679    def send(self, s):
680        """
681        Send a pickled string to a socket.
682
683        This function no longer allows for partial sends which can happen
684        when the network is busy - UDP does not guarantee delivery and
685        can deliver packets out of sequence.
686        """
687        if self.sock is None:
688            self.createSocket()
689        self.sock.sendto(s, self.address)
690
691class SysLogHandler(logging.Handler):
692    """
693    A handler class which sends formatted logging records to a syslog
694    server. Based on Sam Rushing's syslog module:
695    http://www.nightmare.com/squirl/python-ext/misc/syslog.py
696    Contributed by Nicolas Untz (after which minor refactoring changes
697    have been made).
698    """
699
700    # from <linux/sys/syslog.h>:
701    # ======================================================================
702    # priorities/facilities are encoded into a single 32-bit quantity, where
703    # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
704    # facility (0-big number). Both the priorities and the facilities map
705    # roughly one-to-one to strings in the syslogd(8) source code.  This
706    # mapping is included in this file.
707    #
708    # priorities (these are ordered)
709
710    LOG_EMERG     = 0       #  system is unusable
711    LOG_ALERT     = 1       #  action must be taken immediately
712    LOG_CRIT      = 2       #  critical conditions
713    LOG_ERR       = 3       #  error conditions
714    LOG_WARNING   = 4       #  warning conditions
715    LOG_NOTICE    = 5       #  normal but significant condition
716    LOG_INFO      = 6       #  informational
717    LOG_DEBUG     = 7       #  debug-level messages
718
719    #  facility codes
720    LOG_KERN      = 0       #  kernel messages
721    LOG_USER      = 1       #  random user-level messages
722    LOG_MAIL      = 2       #  mail system
723    LOG_DAEMON    = 3       #  system daemons
724    LOG_AUTH      = 4       #  security/authorization messages
725    LOG_SYSLOG    = 5       #  messages generated internally by syslogd
726    LOG_LPR       = 6       #  line printer subsystem
727    LOG_NEWS      = 7       #  network news subsystem
728    LOG_UUCP      = 8       #  UUCP subsystem
729    LOG_CRON      = 9       #  clock daemon
730    LOG_AUTHPRIV  = 10      #  security/authorization messages (private)
731    LOG_FTP       = 11      #  FTP daemon
732
733    #  other codes through 15 reserved for system use
734    LOG_LOCAL0    = 16      #  reserved for local use
735    LOG_LOCAL1    = 17      #  reserved for local use
736    LOG_LOCAL2    = 18      #  reserved for local use
737    LOG_LOCAL3    = 19      #  reserved for local use
738    LOG_LOCAL4    = 20      #  reserved for local use
739    LOG_LOCAL5    = 21      #  reserved for local use
740    LOG_LOCAL6    = 22      #  reserved for local use
741    LOG_LOCAL7    = 23      #  reserved for local use
742
743    priority_names = {
744        "alert":    LOG_ALERT,
745        "crit":     LOG_CRIT,
746        "critical": LOG_CRIT,
747        "debug":    LOG_DEBUG,
748        "emerg":    LOG_EMERG,
749        "err":      LOG_ERR,
750        "error":    LOG_ERR,        #  DEPRECATED
751        "info":     LOG_INFO,
752        "notice":   LOG_NOTICE,
753        "panic":    LOG_EMERG,      #  DEPRECATED
754        "warn":     LOG_WARNING,    #  DEPRECATED
755        "warning":  LOG_WARNING,
756        }
757
758    facility_names = {
759        "auth":     LOG_AUTH,
760        "authpriv": LOG_AUTHPRIV,
761        "cron":     LOG_CRON,
762        "daemon":   LOG_DAEMON,
763        "ftp":      LOG_FTP,
764        "kern":     LOG_KERN,
765        "lpr":      LOG_LPR,
766        "mail":     LOG_MAIL,
767        "news":     LOG_NEWS,
768        "security": LOG_AUTH,       #  DEPRECATED
769        "syslog":   LOG_SYSLOG,
770        "user":     LOG_USER,
771        "uucp":     LOG_UUCP,
772        "local0":   LOG_LOCAL0,
773        "local1":   LOG_LOCAL1,
774        "local2":   LOG_LOCAL2,
775        "local3":   LOG_LOCAL3,
776        "local4":   LOG_LOCAL4,
777        "local5":   LOG_LOCAL5,
778        "local6":   LOG_LOCAL6,
779        "local7":   LOG_LOCAL7,
780        }
781
782    #The map below appears to be trivially lowercasing the key. However,
783    #there's more to it than meets the eye - in some locales, lowercasing
784    #gives unexpected results. See SF #1524081: in the Turkish locale,
785    #"INFO".lower() != "info"
786    priority_map = {
787        "DEBUG" : "debug",
788        "INFO" : "info",
789        "WARNING" : "warning",
790        "ERROR" : "error",
791        "CRITICAL" : "critical"
792    }
793
794    def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
795                 facility=LOG_USER, socktype=None):
796        """
797        Initialize a handler.
798
799        If address is specified as a string, a UNIX socket is used. To log to a
800        local syslogd, "SysLogHandler(address="/dev/log")" can be used.
801        If facility is not specified, LOG_USER is used. If socktype is
802        specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
803        socket type will be used. For Unix sockets, you can also specify a
804        socktype of None, in which case socket.SOCK_DGRAM will be used, falling
805        back to socket.SOCK_STREAM.
806        """
807        logging.Handler.__init__(self)
808
809        self.address = address
810        self.facility = facility
811        self.socktype = socktype
812
813        if isinstance(address, str):
814            self.unixsocket = True
815            # Syslog server may be unavailable during handler initialisation.
816            # C's openlog() function also ignores connection errors.
817            # Moreover, we ignore these errors while logging, so it not worse
818            # to ignore it also here.
819            try:
820                self._connect_unixsocket(address)
821            except OSError:
822                pass
823        else:
824            self.unixsocket = False
825            if socktype is None:
826                socktype = socket.SOCK_DGRAM
827            host, port = address
828            ress = socket.getaddrinfo(host, port, 0, socktype)
829            if not ress:
830                raise OSError("getaddrinfo returns an empty list")
831            for res in ress:
832                af, socktype, proto, _, sa = res
833                err = sock = None
834                try:
835                    sock = socket.socket(af, socktype, proto)
836                    if socktype == socket.SOCK_STREAM:
837                        sock.connect(sa)
838                    break
839                except OSError as exc:
840                    err = exc
841                    if sock is not None:
842                        sock.close()
843            if err is not None:
844                raise err
845            self.socket = sock
846            self.socktype = socktype
847
848    def _connect_unixsocket(self, address):
849        use_socktype = self.socktype
850        if use_socktype is None:
851            use_socktype = socket.SOCK_DGRAM
852        self.socket = socket.socket(socket.AF_UNIX, use_socktype)
853        try:
854            self.socket.connect(address)
855            # it worked, so set self.socktype to the used type
856            self.socktype = use_socktype
857        except OSError:
858            self.socket.close()
859            if self.socktype is not None:
860                # user didn't specify falling back, so fail
861                raise
862            use_socktype = socket.SOCK_STREAM
863            self.socket = socket.socket(socket.AF_UNIX, use_socktype)
864            try:
865                self.socket.connect(address)
866                # it worked, so set self.socktype to the used type
867                self.socktype = use_socktype
868            except OSError:
869                self.socket.close()
870                raise
871
872    def encodePriority(self, facility, priority):
873        """
874        Encode the facility and priority. You can pass in strings or
875        integers - if strings are passed, the facility_names and
876        priority_names mapping dictionaries are used to convert them to
877        integers.
878        """
879        if isinstance(facility, str):
880            facility = self.facility_names[facility]
881        if isinstance(priority, str):
882            priority = self.priority_names[priority]
883        return (facility << 3) | priority
884
885    def close(self):
886        """
887        Closes the socket.
888        """
889        self.acquire()
890        try:
891            self.socket.close()
892            logging.Handler.close(self)
893        finally:
894            self.release()
895
896    def mapPriority(self, levelName):
897        """
898        Map a logging level name to a key in the priority_names map.
899        This is useful in two scenarios: when custom levels are being
900        used, and in the case where you can't do a straightforward
901        mapping by lowercasing the logging level name because of locale-
902        specific issues (see SF #1524081).
903        """
904        return self.priority_map.get(levelName, "warning")
905
906    ident = ''          # prepended to all messages
907    append_nul = True   # some old syslog daemons expect a NUL terminator
908
909    def emit(self, record):
910        """
911        Emit a record.
912
913        The record is formatted, and then sent to the syslog server. If
914        exception information is present, it is NOT sent to the server.
915        """
916        try:
917            msg = self.format(record)
918            if self.ident:
919                msg = self.ident + msg
920            if self.append_nul:
921                msg += '\000'
922
923            # We need to convert record level to lowercase, maybe this will
924            # change in the future.
925            prio = '<%d>' % self.encodePriority(self.facility,
926                                                self.mapPriority(record.levelname))
927            prio = prio.encode('utf-8')
928            # Message is a string. Convert to bytes as required by RFC 5424
929            msg = msg.encode('utf-8')
930            msg = prio + msg
931            if self.unixsocket:
932                try:
933                    self.socket.send(msg)
934                except OSError:
935                    self.socket.close()
936                    self._connect_unixsocket(self.address)
937                    self.socket.send(msg)
938            elif self.socktype == socket.SOCK_DGRAM:
939                self.socket.sendto(msg, self.address)
940            else:
941                self.socket.sendall(msg)
942        except Exception:
943            self.handleError(record)
944
945class SMTPHandler(logging.Handler):
946    """
947    A handler class which sends an SMTP email for each logging event.
948    """
949    def __init__(self, mailhost, fromaddr, toaddrs, subject,
950                 credentials=None, secure=None, timeout=5.0):
951        """
952        Initialize the handler.
953
954        Initialize the instance with the from and to addresses and subject
955        line of the email. To specify a non-standard SMTP port, use the
956        (host, port) tuple format for the mailhost argument. To specify
957        authentication credentials, supply a (username, password) tuple
958        for the credentials argument. To specify the use of a secure
959        protocol (TLS), pass in a tuple for the secure argument. This will
960        only be used when authentication credentials are supplied. The tuple
961        will be either an empty tuple, or a single-value tuple with the name
962        of a keyfile, or a 2-value tuple with the names of the keyfile and
963        certificate file. (This tuple is passed to the `starttls` method).
964        A timeout in seconds can be specified for the SMTP connection (the
965        default is one second).
966        """
967        logging.Handler.__init__(self)
968        if isinstance(mailhost, (list, tuple)):
969            self.mailhost, self.mailport = mailhost
970        else:
971            self.mailhost, self.mailport = mailhost, None
972        if isinstance(credentials, (list, tuple)):
973            self.username, self.password = credentials
974        else:
975            self.username = None
976        self.fromaddr = fromaddr
977        if isinstance(toaddrs, str):
978            toaddrs = [toaddrs]
979        self.toaddrs = toaddrs
980        self.subject = subject
981        self.secure = secure
982        self.timeout = timeout
983
984    def getSubject(self, record):
985        """
986        Determine the subject for the email.
987
988        If you want to specify a subject line which is record-dependent,
989        override this method.
990        """
991        return self.subject
992
993    def emit(self, record):
994        """
995        Emit a record.
996
997        Format the record and send it to the specified addressees.
998        """
999        try:
1000            import smtplib
1001            from email.message import EmailMessage
1002            import email.utils
1003
1004            port = self.mailport
1005            if not port:
1006                port = smtplib.SMTP_PORT
1007            smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
1008            msg = EmailMessage()
1009            msg['From'] = self.fromaddr
1010            msg['To'] = ','.join(self.toaddrs)
1011            msg['Subject'] = self.getSubject(record)
1012            msg['Date'] = email.utils.localtime()
1013            msg.set_content(self.format(record))
1014            if self.username:
1015                if self.secure is not None:
1016                    smtp.ehlo()
1017                    smtp.starttls(*self.secure)
1018                    smtp.ehlo()
1019                smtp.login(self.username, self.password)
1020            smtp.send_message(msg)
1021            smtp.quit()
1022        except Exception:
1023            self.handleError(record)
1024
1025class NTEventLogHandler(logging.Handler):
1026    """
1027    A handler class which sends events to the NT Event Log. Adds a
1028    registry entry for the specified application name. If no dllname is
1029    provided, win32service.pyd (which contains some basic message
1030    placeholders) is used. Note that use of these placeholders will make
1031    your event logs big, as the entire message source is held in the log.
1032    If you want slimmer logs, you have to pass in the name of your own DLL
1033    which contains the message definitions you want to use in the event log.
1034    """
1035    def __init__(self, appname, dllname=None, logtype="Application"):
1036        logging.Handler.__init__(self)
1037        try:
1038            import win32evtlogutil, win32evtlog
1039            self.appname = appname
1040            self._welu = win32evtlogutil
1041            if not dllname:
1042                dllname = os.path.split(self._welu.__file__)
1043                dllname = os.path.split(dllname[0])
1044                dllname = os.path.join(dllname[0], r'win32service.pyd')
1045            self.dllname = dllname
1046            self.logtype = logtype
1047            self._welu.AddSourceToRegistry(appname, dllname, logtype)
1048            self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
1049            self.typemap = {
1050                logging.DEBUG   : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1051                logging.INFO    : win32evtlog.EVENTLOG_INFORMATION_TYPE,
1052                logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
1053                logging.ERROR   : win32evtlog.EVENTLOG_ERROR_TYPE,
1054                logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
1055         }
1056        except ImportError:
1057            print("The Python Win32 extensions for NT (service, event "\
1058                        "logging) appear not to be available.")
1059            self._welu = None
1060
1061    def getMessageID(self, record):
1062        """
1063        Return the message ID for the event record. If you are using your
1064        own messages, you could do this by having the msg passed to the
1065        logger being an ID rather than a formatting string. Then, in here,
1066        you could use a dictionary lookup to get the message ID. This
1067        version returns 1, which is the base message ID in win32service.pyd.
1068        """
1069        return 1
1070
1071    def getEventCategory(self, record):
1072        """
1073        Return the event category for the record.
1074
1075        Override this if you want to specify your own categories. This version
1076        returns 0.
1077        """
1078        return 0
1079
1080    def getEventType(self, record):
1081        """
1082        Return the event type for the record.
1083
1084        Override this if you want to specify your own types. This version does
1085        a mapping using the handler's typemap attribute, which is set up in
1086        __init__() to a dictionary which contains mappings for DEBUG, INFO,
1087        WARNING, ERROR and CRITICAL. If you are using your own levels you will
1088        either need to override this method or place a suitable dictionary in
1089        the handler's typemap attribute.
1090        """
1091        return self.typemap.get(record.levelno, self.deftype)
1092
1093    def emit(self, record):
1094        """
1095        Emit a record.
1096
1097        Determine the message ID, event category and event type. Then
1098        log the message in the NT event log.
1099        """
1100        if self._welu:
1101            try:
1102                id = self.getMessageID(record)
1103                cat = self.getEventCategory(record)
1104                type = self.getEventType(record)
1105                msg = self.format(record)
1106                self._welu.ReportEvent(self.appname, id, cat, type, [msg])
1107            except Exception:
1108                self.handleError(record)
1109
1110    def close(self):
1111        """
1112        Clean up this handler.
1113
1114        You can remove the application name from the registry as a
1115        source of event log entries. However, if you do this, you will
1116        not be able to see the events as you intended in the Event Log
1117        Viewer - it needs to be able to access the registry to get the
1118        DLL name.
1119        """
1120        #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
1121        logging.Handler.close(self)
1122
1123class HTTPHandler(logging.Handler):
1124    """
1125    A class which sends records to a Web server, using either GET or
1126    POST semantics.
1127    """
1128    def __init__(self, host, url, method="GET", secure=False, credentials=None,
1129                 context=None):
1130        """
1131        Initialize the instance with the host, the request URL, and the method
1132        ("GET" or "POST")
1133        """
1134        logging.Handler.__init__(self)
1135        method = method.upper()
1136        if method not in ["GET", "POST"]:
1137            raise ValueError("method must be GET or POST")
1138        if not secure and context is not None:
1139            raise ValueError("context parameter only makes sense "
1140                             "with secure=True")
1141        self.host = host
1142        self.url = url
1143        self.method = method
1144        self.secure = secure
1145        self.credentials = credentials
1146        self.context = context
1147
1148    def mapLogRecord(self, record):
1149        """
1150        Default implementation of mapping the log record into a dict
1151        that is sent as the CGI data. Overwrite in your class.
1152        Contributed by Franz Glasner.
1153        """
1154        return record.__dict__
1155
1156    def emit(self, record):
1157        """
1158        Emit a record.
1159
1160        Send the record to the Web server as a percent-encoded dictionary
1161        """
1162        try:
1163            import http.client, urllib.parse
1164            host = self.host
1165            if self.secure:
1166                h = http.client.HTTPSConnection(host, context=self.context)
1167            else:
1168                h = http.client.HTTPConnection(host)
1169            url = self.url
1170            data = urllib.parse.urlencode(self.mapLogRecord(record))
1171            if self.method == "GET":
1172                if (url.find('?') >= 0):
1173                    sep = '&'
1174                else:
1175                    sep = '?'
1176                url = url + "%c%s" % (sep, data)
1177            h.putrequest(self.method, url)
1178            # support multiple hosts on one IP address...
1179            # need to strip optional :port from host, if present
1180            i = host.find(":")
1181            if i >= 0:
1182                host = host[:i]
1183            # See issue #30904: putrequest call above already adds this header
1184            # on Python 3.x.
1185            # h.putheader("Host", host)
1186            if self.method == "POST":
1187                h.putheader("Content-type",
1188                            "application/x-www-form-urlencoded")
1189                h.putheader("Content-length", str(len(data)))
1190            if self.credentials:
1191                import base64
1192                s = ('%s:%s' % self.credentials).encode('utf-8')
1193                s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
1194                h.putheader('Authorization', s)
1195            h.endheaders()
1196            if self.method == "POST":
1197                h.send(data.encode('utf-8'))
1198            h.getresponse()    #can't do anything with the result
1199        except Exception:
1200            self.handleError(record)
1201
1202class BufferingHandler(logging.Handler):
1203    """
1204  A handler class which buffers logging records in memory. Whenever each
1205  record is added to the buffer, a check is made to see if the buffer should
1206  be flushed. If it should, then flush() is expected to do what's needed.
1207    """
1208    def __init__(self, capacity):
1209        """
1210        Initialize the handler with the buffer size.
1211        """
1212        logging.Handler.__init__(self)
1213        self.capacity = capacity
1214        self.buffer = []
1215
1216    def shouldFlush(self, record):
1217        """
1218        Should the handler flush its buffer?
1219
1220        Returns true if the buffer is up to capacity. This method can be
1221        overridden to implement custom flushing strategies.
1222        """
1223        return (len(self.buffer) >= self.capacity)
1224
1225    def emit(self, record):
1226        """
1227        Emit a record.
1228
1229        Append the record. If shouldFlush() tells us to, call flush() to process
1230        the buffer.
1231        """
1232        self.buffer.append(record)
1233        if self.shouldFlush(record):
1234            self.flush()
1235
1236    def flush(self):
1237        """
1238        Override to implement custom flushing behaviour.
1239
1240        This version just zaps the buffer to empty.
1241        """
1242        self.acquire()
1243        try:
1244            self.buffer = []
1245        finally:
1246            self.release()
1247
1248    def close(self):
1249        """
1250        Close the handler.
1251
1252        This version just flushes and chains to the parent class' close().
1253        """
1254        try:
1255            self.flush()
1256        finally:
1257            logging.Handler.close(self)
1258
1259class MemoryHandler(BufferingHandler):
1260    """
1261    A handler class which buffers logging records in memory, periodically
1262    flushing them to a target handler. Flushing occurs whenever the buffer
1263    is full, or when an event of a certain severity or greater is seen.
1264    """
1265    def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
1266                 flushOnClose=True):
1267        """
1268        Initialize the handler with the buffer size, the level at which
1269        flushing should occur and an optional target.
1270
1271        Note that without a target being set either here or via setTarget(),
1272        a MemoryHandler is no use to anyone!
1273
1274        The ``flushOnClose`` argument is ``True`` for backward compatibility
1275        reasons - the old behaviour is that when the handler is closed, the
1276        buffer is flushed, even if the flush level hasn't been exceeded nor the
1277        capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
1278        """
1279        BufferingHandler.__init__(self, capacity)
1280        self.flushLevel = flushLevel
1281        self.target = target
1282        # See Issue #26559 for why this has been added
1283        self.flushOnClose = flushOnClose
1284
1285    def shouldFlush(self, record):
1286        """
1287        Check for buffer full or a record at the flushLevel or higher.
1288        """
1289        return (len(self.buffer) >= self.capacity) or \
1290                (record.levelno >= self.flushLevel)
1291
1292    def setTarget(self, target):
1293        """
1294        Set the target handler for this handler.
1295        """
1296        self.target = target
1297
1298    def flush(self):
1299        """
1300        For a MemoryHandler, flushing means just sending the buffered
1301        records to the target, if there is one. Override if you want
1302        different behaviour.
1303
1304        The record buffer is also cleared by this operation.
1305        """
1306        self.acquire()
1307        try:
1308            if self.target:
1309                for record in self.buffer:
1310                    self.target.handle(record)
1311                self.buffer = []
1312        finally:
1313            self.release()
1314
1315    def close(self):
1316        """
1317        Flush, if appropriately configured, set the target to None and lose the
1318        buffer.
1319        """
1320        try:
1321            if self.flushOnClose:
1322                self.flush()
1323        finally:
1324            self.acquire()
1325            try:
1326                self.target = None
1327                BufferingHandler.close(self)
1328            finally:
1329                self.release()
1330
1331
1332class QueueHandler(logging.Handler):
1333    """
1334    This handler sends events to a queue. Typically, it would be used together
1335    with a multiprocessing Queue to centralise logging to file in one process
1336    (in a multi-process application), so as to avoid file write contention
1337    between processes.
1338
1339    This code is new in Python 3.2, but this class can be copy pasted into
1340    user code for use with earlier Python versions.
1341    """
1342
1343    def __init__(self, queue):
1344        """
1345        Initialise an instance, using the passed queue.
1346        """
1347        logging.Handler.__init__(self)
1348        self.queue = queue
1349
1350    def enqueue(self, record):
1351        """
1352        Enqueue a record.
1353
1354        The base implementation uses put_nowait. You may want to override
1355        this method if you want to use blocking, timeouts or custom queue
1356        implementations.
1357        """
1358        self.queue.put_nowait(record)
1359
1360    def prepare(self, record):
1361        """
1362        Prepares a record for queuing. The object returned by this method is
1363        enqueued.
1364
1365        The base implementation formats the record to merge the message
1366        and arguments, and removes unpickleable items from the record
1367        in-place.
1368
1369        You might want to override this method if you want to convert
1370        the record to a dict or JSON string, or send a modified copy
1371        of the record while leaving the original intact.
1372        """
1373        # The format operation gets traceback text into record.exc_text
1374        # (if there's exception data), and also returns the formatted
1375        # message. We can then use this to replace the original
1376        # msg + args, as these might be unpickleable. We also zap the
1377        # exc_info and exc_text attributes, as they are no longer
1378        # needed and, if not None, will typically not be pickleable.
1379        msg = self.format(record)
1380        record.message = msg
1381        record.msg = msg
1382        record.args = None
1383        record.exc_info = None
1384        record.exc_text = None
1385        return record
1386
1387    def emit(self, record):
1388        """
1389        Emit a record.
1390
1391        Writes the LogRecord to the queue, preparing it for pickling first.
1392        """
1393        try:
1394            self.enqueue(self.prepare(record))
1395        except Exception:
1396            self.handleError(record)
1397
1398
1399class QueueListener(object):
1400    """
1401    This class implements an internal threaded listener which watches for
1402    LogRecords being added to a queue, removes them and passes them to a
1403    list of handlers for processing.
1404    """
1405    _sentinel = None
1406
1407    def __init__(self, queue, *handlers, respect_handler_level=False):
1408        """
1409        Initialise an instance with the specified queue and
1410        handlers.
1411        """
1412        self.queue = queue
1413        self.handlers = handlers
1414        self._thread = None
1415        self.respect_handler_level = respect_handler_level
1416
1417    def dequeue(self, block):
1418        """
1419        Dequeue a record and return it, optionally blocking.
1420
1421        The base implementation uses get. You may want to override this method
1422        if you want to use timeouts or work with custom queue implementations.
1423        """
1424        return self.queue.get(block)
1425
1426    def start(self):
1427        """
1428        Start the listener.
1429
1430        This starts up a background thread to monitor the queue for
1431        LogRecords to process.
1432        """
1433        self._thread = t = threading.Thread(target=self._monitor)
1434        t.daemon = True
1435        t.start()
1436
1437    def prepare(self , record):
1438        """
1439        Prepare a record for handling.
1440
1441        This method just returns the passed-in record. You may want to
1442        override this method if you need to do any custom marshalling or
1443        manipulation of the record before passing it to the handlers.
1444        """
1445        return record
1446
1447    def handle(self, record):
1448        """
1449        Handle a record.
1450
1451        This just loops through the handlers offering them the record
1452        to handle.
1453        """
1454        record = self.prepare(record)
1455        for handler in self.handlers:
1456            if not self.respect_handler_level:
1457                process = True
1458            else:
1459                process = record.levelno >= handler.level
1460            if process:
1461                handler.handle(record)
1462
1463    def _monitor(self):
1464        """
1465        Monitor the queue for records, and ask the handler
1466        to deal with them.
1467
1468        This method runs on a separate, internal thread.
1469        The thread will terminate if it sees a sentinel object in the queue.
1470        """
1471        q = self.queue
1472        has_task_done = hasattr(q, 'task_done')
1473        while True:
1474            try:
1475                record = self.dequeue(True)
1476                if record is self._sentinel:
1477                    break
1478                self.handle(record)
1479                if has_task_done:
1480                    q.task_done()
1481            except queue.Empty:
1482                break
1483
1484    def enqueue_sentinel(self):
1485        """
1486        This is used to enqueue the sentinel record.
1487
1488        The base implementation uses put_nowait. You may want to override this
1489        method if you want to use timeouts or work with custom queue
1490        implementations.
1491        """
1492        self.queue.put_nowait(self._sentinel)
1493
1494    def stop(self):
1495        """
1496        Stop the listener.
1497
1498        This asks the thread to terminate, and then waits for it to do so.
1499        Note that if you don't call this before your application exits, there
1500        may be some records still left on the queue, which won't be processed.
1501        """
1502        self.enqueue_sentinel()
1503        self._thread.join()
1504        self._thread = None
1505