1# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
2#
3# Permission to use, copy, modify, and distribute this software and its
4# documentation for any purpose and without fee is hereby granted,
5# provided that the above copyright notice appear in all copies and that
6# both that copyright notice and this permission notice appear in
7# supporting documentation, and that the name of Vinay Sajip
8# not be used in advertising or publicity pertaining to distribution
9# of the software without specific, written prior permission.
10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
17"""
18Additional handlers for the logging package for Python. The core package is
19based on PEP 282 and comments thereto in comp.lang.python, and influenced by
20Apache's log4j system.
21
22Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
23
24To use, simply 'import logging.handlers' and log away!
25"""
26
27import logging, socket, os, cPickle, struct, time, re
28from stat import ST_DEV, ST_INO, ST_MTIME
29
30try:
31    import codecs
32except ImportError:
33    codecs = None
34try:
35    unicode
36    _unicode = True
37except NameError:
38    _unicode = False
39
40#
41# Some constants...
42#
43
44DEFAULT_TCP_LOGGING_PORT    = 9020
45DEFAULT_UDP_LOGGING_PORT    = 9021
46DEFAULT_HTTP_LOGGING_PORT   = 9022
47DEFAULT_SOAP_LOGGING_PORT   = 9023
48SYSLOG_UDP_PORT             = 514
49SYSLOG_TCP_PORT             = 514
50
51_MIDNIGHT = 24 * 60 * 60  # number of seconds in a day
52
53class BaseRotatingHandler(logging.FileHandler):
54    """
55    Base class for handlers that rotate log files at a certain point.
56    Not meant to be instantiated directly.  Instead, use RotatingFileHandler
57    or TimedRotatingFileHandler.
58    """
59    def __init__(self, filename, mode, encoding=None, delay=0):
60        """
61        Use the specified filename for streamed logging
62        """
63        if codecs is None:
64            encoding = None
65        logging.FileHandler.__init__(self, filename, mode, encoding, delay)
66        self.mode = mode
67        self.encoding = encoding
68
69    def emit(self, record):
70        """
71        Emit a record.
72
73        Output the record to the file, catering for rollover as described
74        in doRollover().
75        """
76        try:
77            if self.shouldRollover(record):
78                self.doRollover()
79            logging.FileHandler.emit(self, record)
80        except (KeyboardInterrupt, SystemExit):
81            raise
82        except:
83            self.handleError(record)
84
85class RotatingFileHandler(BaseRotatingHandler):
86    """
87    Handler for logging to a set of files, which switches from one file
88    to the next when the current file reaches a certain size.
89    """
90    def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
91        """
92        Open the specified file and use it as the stream for logging.
93
94        By default, the file grows indefinitely. You can specify particular
95        values of maxBytes and backupCount to allow the file to rollover at
96        a predetermined size.
97
98        Rollover occurs whenever the current log file is nearly maxBytes in
99        length. If backupCount is >= 1, the system will successively create
100        new files with the same pathname as the base file, but with extensions
101        ".1", ".2" etc. appended to it. For example, with a backupCount of 5
102        and a base file name of "app.log", you would get "app.log",
103        "app.log.1", "app.log.2", ... through to "app.log.5". The file being
104        written to is always "app.log" - when it gets filled up, it is closed
105        and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
106        exist, then they are renamed to "app.log.2", "app.log.3" etc.
107        respectively.
108
109        If maxBytes is zero, rollover never occurs.
110        """
111        # If rotation/rollover is wanted, it doesn't make sense to use another
112        # mode. If for example 'w' were specified, then if there were multiple
113        # runs of the calling application, the logs from previous runs would be
114        # lost if the 'w' is respected, because the log file would be truncated
115        # on each run.
116        if maxBytes > 0:
117            mode = 'a'
118        BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
119        self.maxBytes = maxBytes
120        self.backupCount = backupCount
121
122    def doRollover(self):
123        """
124        Do a rollover, as described in __init__().
125        """
126        if self.stream:
127            self.stream.close()
128            self.stream = None
129        if self.backupCount > 0:
130            for i in range(self.backupCount - 1, 0, -1):
131                sfn = "%s.%d" % (self.baseFilename, i)
132                dfn = "%s.%d" % (self.baseFilename, i + 1)
133                if os.path.exists(sfn):
134                    #print "%s -> %s" % (sfn, dfn)
135                    if os.path.exists(dfn):
136                        os.remove(dfn)
137                    os.rename(sfn, dfn)
138            dfn = self.baseFilename + ".1"
139            if os.path.exists(dfn):
140                os.remove(dfn)
141            os.rename(self.baseFilename, dfn)
142            #print "%s -> %s" % (self.baseFilename, dfn)
143        self.mode = 'w'
144        self.stream = self._open()
145
146    def shouldRollover(self, record):
147        """
148        Determine if rollover should occur.
149
150        Basically, see if the supplied record would cause the file to exceed
151        the size limit we have.
152        """
153        if self.stream is None:                 # delay was set...
154            self.stream = self._open()
155        if self.maxBytes > 0:                   # are we rolling over?
156            msg = "%s\n" % self.format(record)
157            self.stream.seek(0, 2)  #due to non-posix-compliant Windows feature
158            if self.stream.tell() + len(msg) >= self.maxBytes:
159                return 1
160        return 0
161
162class TimedRotatingFileHandler(BaseRotatingHandler):
163    """
164    Handler for logging to a file, rotating the log file at certain timed
165    intervals.
166
167    If backupCount is > 0, when rollover is done, no more than backupCount
168    files are kept - the oldest ones are deleted.
169    """
170    def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
171        BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
172        self.when = when.upper()
173        self.backupCount = backupCount
174        self.utc = utc
175        # Calculate the real rollover interval, which is just the number of
176        # seconds between rollovers.  Also set the filename suffix used when
177        # a rollover occurs.  Current 'when' events supported:
178        # S - Seconds
179        # M - Minutes
180        # H - Hours
181        # D - Days
182        # midnight - roll over at midnight
183        # W{0-6} - roll over on a certain day; 0 - Monday
184        #
185        # Case of the 'when' specifier is not important; lower or upper case
186        # will work.
187        if self.when == 'S':
188            self.interval = 1 # one second
189            self.suffix = "%Y-%m-%d_%H-%M-%S"
190            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
191        elif self.when == 'M':
192            self.interval = 60 # one minute
193            self.suffix = "%Y-%m-%d_%H-%M"
194            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
195        elif self.when == 'H':
196            self.interval = 60 * 60 # one hour
197            self.suffix = "%Y-%m-%d_%H"
198            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
199        elif self.when == 'D' or self.when == 'MIDNIGHT':
200            self.interval = 60 * 60 * 24 # one day
201            self.suffix = "%Y-%m-%d"
202            self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
203        elif self.when.startswith('W'):
204            self.interval = 60 * 60 * 24 * 7 # one week
205            if len(self.when) != 2:
206                raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
207            if self.when[1] < '0' or self.when[1] > '6':
208                raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
209            self.dayOfWeek = int(self.when[1])
210            self.suffix = "%Y-%m-%d"
211            self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
212        else:
213            raise ValueError("Invalid rollover interval specified: %s" % self.when)
214
215        self.extMatch = re.compile(self.extMatch)
216        self.interval = self.interval * interval # multiply by units requested
217        if os.path.exists(filename):
218            t = os.stat(filename)[ST_MTIME]
219        else:
220            t = int(time.time())
221        self.rolloverAt = self.computeRollover(t)
222
223    def computeRollover(self, currentTime):
224        """
225        Work out the rollover time based on the specified time.
226        """
227        result = currentTime + self.interval
228        # If we are rolling over at midnight or weekly, then the interval is already known.
229        # What we need to figure out is WHEN the next interval is.  In other words,
230        # if you are rolling over at midnight, then your base interval is 1 day,
231        # but you want to start that one day clock at midnight, not now.  So, we
232        # have to fudge the rolloverAt value in order to trigger the first rollover
233        # at the right time.  After that, the regular interval will take care of
234        # the rest.  Note that this code doesn't care about leap seconds. :)
235        if self.when == 'MIDNIGHT' or self.when.startswith('W'):
236            # This could be done with less code, but I wanted it to be clear
237            if self.utc:
238                t = time.gmtime(currentTime)
239            else:
240                t = time.localtime(currentTime)
241            currentHour = t[3]
242            currentMinute = t[4]
243            currentSecond = t[5]
244            # r is the number of seconds left between now and midnight
245            r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
246                    currentSecond)
247            result = currentTime + r
248            # If we are rolling over on a certain day, add in the number of days until
249            # the next rollover, but offset by 1 since we just calculated the time
250            # until the next day starts.  There are three cases:
251            # Case 1) The day to rollover is today; in this case, do nothing
252            # Case 2) The day to rollover is further in the interval (i.e., today is
253            #         day 2 (Wednesday) and rollover is on day 6 (Sunday).  Days to
254            #         next rollover is simply 6 - 2 - 1, or 3.
255            # Case 3) The day to rollover is behind us in the interval (i.e., today
256            #         is day 5 (Saturday) and rollover is on day 3 (Thursday).
257            #         Days to rollover is 6 - 5 + 3, or 4.  In this case, it's the
258            #         number of days left in the current week (1) plus the number
259            #         of days in the next week until the rollover day (3).
260            # The calculations described in 2) and 3) above need to have a day added.
261            # This is because the above time calculation takes us to midnight on this
262            # day, i.e. the start of the next day.
263            if self.when.startswith('W'):
264                day = t[6] # 0 is Monday
265                if day != self.dayOfWeek:
266                    if day < self.dayOfWeek:
267                        daysToWait = self.dayOfWeek - day
268                    else:
269                        daysToWait = 6 - day + self.dayOfWeek + 1
270                    newRolloverAt = result + (daysToWait * (60 * 60 * 24))
271                    if not self.utc:
272                        dstNow = t[-1]
273                        dstAtRollover = time.localtime(newRolloverAt)[-1]
274                        if dstNow != dstAtRollover:
275                            if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
276                                newRolloverAt = newRolloverAt - 3600
277                            else:           # DST bows out before next rollover, so we need to add an hour
278                                newRolloverAt = newRolloverAt + 3600
279                    result = newRolloverAt
280        return result
281
282    def shouldRollover(self, record):
283        """
284        Determine if rollover should occur.
285
286        record is not used, as we are just comparing times, but it is needed so
287        the method signatures are the same
288        """
289        t = int(time.time())
290        if t >= self.rolloverAt:
291            return 1
292        #print "No need to rollover: %d, %d" % (t, self.rolloverAt)
293        return 0
294
295    def getFilesToDelete(self):
296        """
297        Determine the files to delete when rolling over.
298
299        More specific than the earlier method, which just used glob.glob().
300        """
301        dirName, baseName = os.path.split(self.baseFilename)
302        fileNames = os.listdir(dirName)
303        result = []
304        prefix = baseName + "."
305        plen = len(prefix)
306        for fileName in fileNames:
307            if fileName[:plen] == prefix:
308                suffix = fileName[plen:]
309                if self.extMatch.match(suffix):
310                    result.append(os.path.join(dirName, fileName))
311        result.sort()
312        if len(result) < self.backupCount:
313            result = []
314        else:
315            result = result[:len(result) - self.backupCount]
316        return result
317
318    def doRollover(self):
319        """
320        do a rollover; in this case, a date/time stamp is appended to the filename
321        when the rollover happens.  However, you want the file to be named for the
322        start of the interval, not the current time.  If there is a backup count,
323        then we have to get a list of matching filenames, sort them and remove
324        the one with the oldest suffix.
325        """
326        if self.stream:
327            self.stream.close()
328            self.stream = None
329        # get the time that this sequence started at and make it a TimeTuple
330        t = self.rolloverAt - self.interval
331        if self.utc:
332            timeTuple = time.gmtime(t)
333        else:
334            timeTuple = time.localtime(t)
335        dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
336        if os.path.exists(dfn):
337            os.remove(dfn)
338        os.rename(self.baseFilename, dfn)
339        if self.backupCount > 0:
340            # find the oldest log file and delete it
341            #s = glob.glob(self.baseFilename + ".20*")
342            #if len(s) > self.backupCount:
343            #    s.sort()
344            #    os.remove(s[0])
345            for s in self.getFilesToDelete():
346                os.remove(s)
347        #print "%s -> %s" % (self.baseFilename, dfn)
348        self.mode = 'w'
349        self.stream = self._open()
350        currentTime = int(time.time())
351        newRolloverAt = self.computeRollover(currentTime)
352        while newRolloverAt <= currentTime:
353            newRolloverAt = newRolloverAt + self.interval
354        #If DST changes and midnight or weekly rollover, adjust for this.
355        if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
356            dstNow = time.localtime(currentTime)[-1]
357            dstAtRollover = time.localtime(newRolloverAt)[-1]
358            if dstNow != dstAtRollover:
359                if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
360                    newRolloverAt = newRolloverAt - 3600
361                else:           # DST bows out before next rollover, so we need to add an hour
362                    newRolloverAt = newRolloverAt + 3600
363        self.rolloverAt = newRolloverAt
364
365class WatchedFileHandler(logging.FileHandler):
366    """
367    A handler for logging to a file, which watches the file
368    to see if it has changed while in use. This can happen because of
369    usage of programs such as newsyslog and logrotate which perform
370    log file rotation. This handler, intended for use under Unix,
371    watches the file to see if it has changed since the last emit.
372    (A file has changed if its device or inode have changed.)
373    If it has changed, the old file stream is closed, and the file
374    opened to get a new stream.
375
376    This handler is not appropriate for use under Windows, because
377    under Windows open files cannot be moved or renamed - logging
378    opens the files with exclusive locks - and so there is no need
379    for such a handler. Furthermore, ST_INO is not supported under
380    Windows; stat always returns zero for this value.
381
382    This handler is based on a suggestion and patch by Chad J.
383    Schroeder.
384    """
385    def __init__(self, filename, mode='a', encoding=None, delay=0):
386        logging.FileHandler.__init__(self, filename, mode, encoding, delay)
387        if not os.path.exists(self.baseFilename):
388            self.dev, self.ino = -1, -1
389        else:
390            stat = os.stat(self.baseFilename)
391            self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
392
393    def emit(self, record):
394        """
395        Emit a record.
396
397        First check if the underlying file has changed, and if it
398        has, close the old stream and reopen the file to get the
399        current stream.
400        """
401        if not os.path.exists(self.baseFilename):
402            stat = None
403            changed = 1
404        else:
405            stat = os.stat(self.baseFilename)
406            changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
407        if changed and self.stream is not None:
408            self.stream.flush()
409            self.stream.close()
410            self.stream = self._open()
411            if stat is None:
412                stat = os.stat(self.baseFilename)
413            self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
414        logging.FileHandler.emit(self, record)
415
416class SocketHandler(logging.Handler):
417    """
418    A handler class which writes logging records, in pickle format, to
419    a streaming socket. The socket is kept open across logging calls.
420    If the peer resets it, an attempt is made to reconnect on the next call.
421    The pickle which is sent is that of the LogRecord's attribute dictionary
422    (__dict__), so that the receiver does not need to have the logging module
423    installed in order to process the logging event.
424
425    To unpickle the record at the receiving end into a LogRecord, use the
426    makeLogRecord function.
427    """
428
429    def __init__(self, host, port):
430        """
431        Initializes the handler with a specific host address and port.
432
433        The attribute 'closeOnError' is set to 1 - which means that if
434        a socket error occurs, the socket is silently closed and then
435        reopened on the next logging call.
436        """
437        logging.Handler.__init__(self)
438        self.host = host
439        self.port = port
440        self.sock = None
441        self.closeOnError = 0
442        self.retryTime = None
443        #
444        # Exponential backoff parameters.
445        #
446        self.retryStart = 1.0
447        self.retryMax = 30.0
448        self.retryFactor = 2.0
449
450    def makeSocket(self, timeout=1):
451        """
452        A factory method which allows subclasses to define the precise
453        type of socket they want.
454        """
455        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
456        if hasattr(s, 'settimeout'):
457            s.settimeout(timeout)
458        s.connect((self.host, self.port))
459        return s
460
461    def createSocket(self):
462        """
463        Try to create a socket, using an exponential backoff with
464        a max retry time. Thanks to Robert Olson for the original patch
465        (SF #815911) which has been slightly refactored.
466        """
467        now = time.time()
468        # Either retryTime is None, in which case this
469        # is the first time back after a disconnect, or
470        # we've waited long enough.
471        if self.retryTime is None:
472            attempt = 1
473        else:
474            attempt = (now >= self.retryTime)
475        if attempt:
476            try:
477                self.sock = self.makeSocket()
478                self.retryTime = None # next time, no delay before trying
479            except socket.error:
480                #Creation failed, so set the retry time and return.
481                if self.retryTime is None:
482                    self.retryPeriod = self.retryStart
483                else:
484                    self.retryPeriod = self.retryPeriod * self.retryFactor
485                    if self.retryPeriod > self.retryMax:
486                        self.retryPeriod = self.retryMax
487                self.retryTime = now + self.retryPeriod
488
489    def send(self, s):
490        """
491        Send a pickled string to the socket.
492
493        This function allows for partial sends which can happen when the
494        network is busy.
495        """
496        if self.sock is None:
497            self.createSocket()
498        #self.sock can be None either because we haven't reached the retry
499        #time yet, or because we have reached the retry time and retried,
500        #but are still unable to connect.
501        if self.sock:
502            try:
503                if hasattr(self.sock, "sendall"):
504                    self.sock.sendall(s)
505                else:
506                    sentsofar = 0
507                    left = len(s)
508                    while left > 0:
509                        sent = self.sock.send(s[sentsofar:])
510                        sentsofar = sentsofar + sent
511                        left = left - sent
512            except socket.error:
513                self.sock.close()
514                self.sock = None  # so we can call createSocket next time
515
516    def makePickle(self, record):
517        """
518        Pickles the record in binary format with a length prefix, and
519        returns it ready for transmission across the socket.
520        """
521        ei = record.exc_info
522        if ei:
523            dummy = self.format(record) # just to get traceback text into record.exc_text
524            record.exc_info = None  # to avoid Unpickleable error
525        s = cPickle.dumps(record.__dict__, 1)
526        if ei:
527            record.exc_info = ei  # for next handler
528        slen = struct.pack(">L", len(s))
529        return slen + s
530
531    def handleError(self, record):
532        """
533        Handle an error during logging.
534
535        An error has occurred during logging. Most likely cause -
536        connection lost. Close the socket so that we can retry on the
537        next event.
538        """
539        if self.closeOnError and self.sock:
540            self.sock.close()
541            self.sock = None        #try to reconnect next time
542        else:
543            logging.Handler.handleError(self, record)
544
545    def emit(self, record):
546        """
547        Emit a record.
548
549        Pickles the record and writes it to the socket in binary format.
550        If there is an error with the socket, silently drop the packet.
551        If there was a problem with the socket, re-establishes the
552        socket.
553        """
554        try:
555            s = self.makePickle(record)
556            self.send(s)
557        except (KeyboardInterrupt, SystemExit):
558            raise
559        except:
560            self.handleError(record)
561
562    def close(self):
563        """
564        Closes the socket.
565        """
566        if self.sock:
567            self.sock.close()
568            self.sock = None
569        logging.Handler.close(self)
570
571class DatagramHandler(SocketHandler):
572    """
573    A handler class which writes logging records, in pickle format, to
574    a datagram socket.  The pickle which is sent is that of the LogRecord's
575    attribute dictionary (__dict__), so that the receiver does not need to
576    have the logging module installed in order to process the logging event.
577
578    To unpickle the record at the receiving end into a LogRecord, use the
579    makeLogRecord function.
580
581    """
582    def __init__(self, host, port):
583        """
584        Initializes the handler with a specific host address and port.
585        """
586        SocketHandler.__init__(self, host, port)
587        self.closeOnError = 0
588
589    def makeSocket(self):
590        """
591        The factory method of SocketHandler is here overridden to create
592        a UDP socket (SOCK_DGRAM).
593        """
594        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
595        return s
596
597    def send(self, s):
598        """
599        Send a pickled string to a socket.
600
601        This function no longer allows for partial sends which can happen
602        when the network is busy - UDP does not guarantee delivery and
603        can deliver packets out of sequence.
604        """
605        if self.sock is None:
606            self.createSocket()
607        self.sock.sendto(s, (self.host, self.port))
608
609class SysLogHandler(logging.Handler):
610    """
611    A handler class which sends formatted logging records to a syslog
612    server. Based on Sam Rushing's syslog module:
613    http://www.nightmare.com/squirl/python-ext/misc/syslog.py
614    Contributed by Nicolas Untz (after which minor refactoring changes
615    have been made).
616    """
617
618    # from <linux/sys/syslog.h>:
619    # ======================================================================
620    # priorities/facilities are encoded into a single 32-bit quantity, where
621    # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
622    # facility (0-big number). Both the priorities and the facilities map
623    # roughly one-to-one to strings in the syslogd(8) source code.  This
624    # mapping is included in this file.
625    #
626    # priorities (these are ordered)
627
628    LOG_EMERG     = 0       #  system is unusable
629    LOG_ALERT     = 1       #  action must be taken immediately
630    LOG_CRIT      = 2       #  critical conditions
631    LOG_ERR       = 3       #  error conditions
632    LOG_WARNING   = 4       #  warning conditions
633    LOG_NOTICE    = 5       #  normal but significant condition
634    LOG_INFO      = 6       #  informational
635    LOG_DEBUG     = 7       #  debug-level messages
636
637    #  facility codes
638    LOG_KERN      = 0       #  kernel messages
639    LOG_USER      = 1       #  random user-level messages
640    LOG_MAIL      = 2       #  mail system
641    LOG_DAEMON    = 3       #  system daemons
642    LOG_AUTH      = 4       #  security/authorization messages
643    LOG_SYSLOG    = 5       #  messages generated internally by syslogd
644    LOG_LPR       = 6       #  line printer subsystem
645    LOG_NEWS      = 7       #  network news subsystem
646    LOG_UUCP      = 8       #  UUCP subsystem
647    LOG_CRON      = 9       #  clock daemon
648    LOG_AUTHPRIV  = 10      #  security/authorization messages (private)
649    LOG_FTP       = 11      #  FTP daemon
650
651    #  other codes through 15 reserved for system use
652    LOG_LOCAL0    = 16      #  reserved for local use
653    LOG_LOCAL1    = 17      #  reserved for local use
654    LOG_LOCAL2    = 18      #  reserved for local use
655    LOG_LOCAL3    = 19      #  reserved for local use
656    LOG_LOCAL4    = 20      #  reserved for local use
657    LOG_LOCAL5    = 21      #  reserved for local use
658    LOG_LOCAL6    = 22      #  reserved for local use
659    LOG_LOCAL7    = 23      #  reserved for local use
660
661    priority_names = {
662        "alert":    LOG_ALERT,
663        "crit":     LOG_CRIT,
664        "critical": LOG_CRIT,
665        "debug":    LOG_DEBUG,
666        "emerg":    LOG_EMERG,
667        "err":      LOG_ERR,
668        "error":    LOG_ERR,        #  DEPRECATED
669        "info":     LOG_INFO,
670        "notice":   LOG_NOTICE,
671        "panic":    LOG_EMERG,      #  DEPRECATED
672        "warn":     LOG_WARNING,    #  DEPRECATED
673        "warning":  LOG_WARNING,
674        }
675
676    facility_names = {
677        "auth":     LOG_AUTH,
678        "authpriv": LOG_AUTHPRIV,
679        "cron":     LOG_CRON,
680        "daemon":   LOG_DAEMON,
681        "ftp":      LOG_FTP,
682        "kern":     LOG_KERN,
683        "lpr":      LOG_LPR,
684        "mail":     LOG_MAIL,
685        "news":     LOG_NEWS,
686        "security": LOG_AUTH,       #  DEPRECATED
687        "syslog":   LOG_SYSLOG,
688        "user":     LOG_USER,
689        "uucp":     LOG_UUCP,
690        "local0":   LOG_LOCAL0,
691        "local1":   LOG_LOCAL1,
692        "local2":   LOG_LOCAL2,
693        "local3":   LOG_LOCAL3,
694        "local4":   LOG_LOCAL4,
695        "local5":   LOG_LOCAL5,
696        "local6":   LOG_LOCAL6,
697        "local7":   LOG_LOCAL7,
698        }
699
700    #The map below appears to be trivially lowercasing the key. However,
701    #there's more to it than meets the eye - in some locales, lowercasing
702    #gives unexpected results. See SF #1524081: in the Turkish locale,
703    #"INFO".lower() != "info"
704    priority_map = {
705        "DEBUG" : "debug",
706        "INFO" : "info",
707        "WARNING" : "warning",
708        "ERROR" : "error",
709        "CRITICAL" : "critical"
710    }
711
712    def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
713                 facility=LOG_USER, socktype=socket.SOCK_DGRAM):
714        """
715        Initialize a handler.
716
717        If address is specified as a string, a UNIX socket is used. To log to a
718        local syslogd, "SysLogHandler(address="/dev/log")" can be used.
719        If facility is not specified, LOG_USER is used.
720        """
721        logging.Handler.__init__(self)
722
723        self.address = address
724        self.facility = facility
725        self.socktype = socktype
726
727        if isinstance(address, basestring):
728            self.unixsocket = 1
729            self._connect_unixsocket(address)
730        else:
731            self.unixsocket = 0
732            self.socket = socket.socket(socket.AF_INET, socktype)
733            if socktype == socket.SOCK_STREAM:
734                self.socket.connect(address)
735        self.formatter = None
736
737    def _connect_unixsocket(self, address):
738        self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
739        # syslog may require either DGRAM or STREAM sockets
740        try:
741            self.socket.connect(address)
742        except socket.error:
743            self.socket.close()
744            self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
745            self.socket.connect(address)
746
747    # curious: when talking to the unix-domain '/dev/log' socket, a
748    #   zero-terminator seems to be required.  this string is placed
749    #   into a class variable so that it can be overridden if
750    #   necessary.
751    log_format_string = '<%d>%s\000'
752
753    def encodePriority(self, facility, priority):
754        """
755        Encode the facility and priority. You can pass in strings or
756        integers - if strings are passed, the facility_names and
757        priority_names mapping dictionaries are used to convert them to
758        integers.
759        """
760        if isinstance(facility, basestring):
761            facility = self.facility_names[facility]
762        if isinstance(priority, basestring):
763            priority = self.priority_names[priority]
764        return (facility << 3) | priority
765
766    def close (self):
767        """
768        Closes the socket.
769        """
770        if self.unixsocket:
771            self.socket.close()
772        logging.Handler.close(self)
773
774    def mapPriority(self, levelName):
775        """
776        Map a logging level name to a key in the priority_names map.
777        This is useful in two scenarios: when custom levels are being
778        used, and in the case where you can't do a straightforward
779        mapping by lowercasing the logging level name because of locale-
780        specific issues (see SF #1524081).
781        """
782        return self.priority_map.get(levelName, "warning")
783
784    def emit(self, record):
785        """
786        Emit a record.
787
788        The record is formatted, and then sent to the syslog server. If
789        exception information is present, it is NOT sent to the server.
790        """
791        msg = self.format(record) + '\000'
792        """
793        We need to convert record level to lowercase, maybe this will
794        change in the future.
795        """
796        prio = '<%d>' % self.encodePriority(self.facility,
797                                            self.mapPriority(record.levelname))
798        # Message is a string. Convert to bytes as required by RFC 5424
799        if type(msg) is unicode:
800            msg = msg.encode('utf-8')
801            if codecs:
802                msg = codecs.BOM_UTF8 + msg
803        msg = prio + msg
804        try:
805            if self.unixsocket:
806                try:
807                    self.socket.send(msg)
808                except socket.error:
809                    self._connect_unixsocket(self.address)
810                    self.socket.send(msg)
811            elif self.socktype == socket.SOCK_DGRAM:
812                self.socket.sendto(msg, self.address)
813            else:
814                self.socket.sendall(msg)
815        except (KeyboardInterrupt, SystemExit):
816            raise
817        except:
818            self.handleError(record)
819
820class SMTPHandler(logging.Handler):
821    """
822    A handler class which sends an SMTP email for each logging event.
823    """
824    def __init__(self, mailhost, fromaddr, toaddrs, subject,
825                 credentials=None, secure=None):
826        """
827        Initialize the handler.
828
829        Initialize the instance with the from and to addresses and subject
830        line of the email. To specify a non-standard SMTP port, use the
831        (host, port) tuple format for the mailhost argument. To specify
832        authentication credentials, supply a (username, password) tuple
833        for the credentials argument. To specify the use of a secure
834        protocol (TLS), pass in a tuple for the secure argument. This will
835        only be used when authentication credentials are supplied. The tuple
836        will be either an empty tuple, or a single-value tuple with the name
837        of a keyfile, or a 2-value tuple with the names of the keyfile and
838        certificate file. (This tuple is passed to the `starttls` method).
839        """
840        logging.Handler.__init__(self)
841        if isinstance(mailhost, tuple):
842            self.mailhost, self.mailport = mailhost
843        else:
844            self.mailhost, self.mailport = mailhost, None
845        if isinstance(credentials, tuple):
846            self.username, self.password = credentials
847        else:
848            self.username = None
849        self.fromaddr = fromaddr
850        if isinstance(toaddrs, basestring):
851            toaddrs = [toaddrs]
852        self.toaddrs = toaddrs
853        self.subject = subject
854        self.secure = secure
855
856    def getSubject(self, record):
857        """
858        Determine the subject for the email.
859
860        If you want to specify a subject line which is record-dependent,
861        override this method.
862        """
863        return self.subject
864
865    def emit(self, record):
866        """
867        Emit a record.
868
869        Format the record and send it to the specified addressees.
870        """
871        try:
872            import smtplib
873            from email.utils import formatdate
874            port = self.mailport
875            if not port:
876                port = smtplib.SMTP_PORT
877            smtp = smtplib.SMTP(self.mailhost, port)
878            msg = self.format(record)
879            msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
880                            self.fromaddr,
881                            ",".join(self.toaddrs),
882                            self.getSubject(record),
883                            formatdate(), msg)
884            if self.username:
885                if self.secure is not None:
886                    smtp.ehlo()
887                    smtp.starttls(*self.secure)
888                    smtp.ehlo()
889                smtp.login(self.username, self.password)
890            smtp.sendmail(self.fromaddr, self.toaddrs, msg)
891            smtp.quit()
892        except (KeyboardInterrupt, SystemExit):
893            raise
894        except:
895            self.handleError(record)
896
897class NTEventLogHandler(logging.Handler):
898    """
899    A handler class which sends events to the NT Event Log. Adds a
900    registry entry for the specified application name. If no dllname is
901    provided, win32service.pyd (which contains some basic message
902    placeholders) is used. Note that use of these placeholders will make
903    your event logs big, as the entire message source is held in the log.
904    If you want slimmer logs, you have to pass in the name of your own DLL
905    which contains the message definitions you want to use in the event log.
906    """
907    def __init__(self, appname, dllname=None, logtype="Application"):
908        logging.Handler.__init__(self)
909        try:
910            import win32evtlogutil, win32evtlog
911            self.appname = appname
912            self._welu = win32evtlogutil
913            if not dllname:
914                dllname = os.path.split(self._welu.__file__)
915                dllname = os.path.split(dllname[0])
916                dllname = os.path.join(dllname[0], r'win32service.pyd')
917            self.dllname = dllname
918            self.logtype = logtype
919            self._welu.AddSourceToRegistry(appname, dllname, logtype)
920            self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
921            self.typemap = {
922                logging.DEBUG   : win32evtlog.EVENTLOG_INFORMATION_TYPE,
923                logging.INFO    : win32evtlog.EVENTLOG_INFORMATION_TYPE,
924                logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
925                logging.ERROR   : win32evtlog.EVENTLOG_ERROR_TYPE,
926                logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
927         }
928        except ImportError:
929            print("The Python Win32 extensions for NT (service, event "\
930                        "logging) appear not to be available.")
931            self._welu = None
932
933    def getMessageID(self, record):
934        """
935        Return the message ID for the event record. If you are using your
936        own messages, you could do this by having the msg passed to the
937        logger being an ID rather than a formatting string. Then, in here,
938        you could use a dictionary lookup to get the message ID. This
939        version returns 1, which is the base message ID in win32service.pyd.
940        """
941        return 1
942
943    def getEventCategory(self, record):
944        """
945        Return the event category for the record.
946
947        Override this if you want to specify your own categories. This version
948        returns 0.
949        """
950        return 0
951
952    def getEventType(self, record):
953        """
954        Return the event type for the record.
955
956        Override this if you want to specify your own types. This version does
957        a mapping using the handler's typemap attribute, which is set up in
958        __init__() to a dictionary which contains mappings for DEBUG, INFO,
959        WARNING, ERROR and CRITICAL. If you are using your own levels you will
960        either need to override this method or place a suitable dictionary in
961        the handler's typemap attribute.
962        """
963        return self.typemap.get(record.levelno, self.deftype)
964
965    def emit(self, record):
966        """
967        Emit a record.
968
969        Determine the message ID, event category and event type. Then
970        log the message in the NT event log.
971        """
972        if self._welu:
973            try:
974                id = self.getMessageID(record)
975                cat = self.getEventCategory(record)
976                type = self.getEventType(record)
977                msg = self.format(record)
978                self._welu.ReportEvent(self.appname, id, cat, type, [msg])
979            except (KeyboardInterrupt, SystemExit):
980                raise
981            except:
982                self.handleError(record)
983
984    def close(self):
985        """
986        Clean up this handler.
987
988        You can remove the application name from the registry as a
989        source of event log entries. However, if you do this, you will
990        not be able to see the events as you intended in the Event Log
991        Viewer - it needs to be able to access the registry to get the
992        DLL name.
993        """
994        #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
995        logging.Handler.close(self)
996
997class HTTPHandler(logging.Handler):
998    """
999    A class which sends records to a Web server, using either GET or
1000    POST semantics.
1001    """
1002    def __init__(self, host, url, method="GET"):
1003        """
1004        Initialize the instance with the host, the request URL, and the method
1005        ("GET" or "POST")
1006        """
1007        logging.Handler.__init__(self)
1008        method = method.upper()
1009        if method not in ["GET", "POST"]:
1010            raise ValueError("method must be GET or POST")
1011        self.host = host
1012        self.url = url
1013        self.method = method
1014
1015    def mapLogRecord(self, record):
1016        """
1017        Default implementation of mapping the log record into a dict
1018        that is sent as the CGI data. Overwrite in your class.
1019        Contributed by Franz  Glasner.
1020        """
1021        return record.__dict__
1022
1023    def emit(self, record):
1024        """
1025        Emit a record.
1026
1027        Send the record to the Web server as a percent-encoded dictionary
1028        """
1029        try:
1030            import httplib, urllib
1031            host = self.host
1032            h = httplib.HTTP(host)
1033            url = self.url
1034            data = urllib.urlencode(self.mapLogRecord(record))
1035            if self.method == "GET":
1036                if (url.find('?') >= 0):
1037                    sep = '&'
1038                else:
1039                    sep = '?'
1040                url = url + "%c%s" % (sep, data)
1041            h.putrequest(self.method, url)
1042            # support multiple hosts on one IP address...
1043            # need to strip optional :port from host, if present
1044            i = host.find(":")
1045            if i >= 0:
1046                host = host[:i]
1047            h.putheader("Host", host)
1048            if self.method == "POST":
1049                h.putheader("Content-type",
1050                            "application/x-www-form-urlencoded")
1051                h.putheader("Content-length", str(len(data)))
1052            h.endheaders(data if self.method == "POST" else None)
1053            h.getreply()    #can't do anything with the result
1054        except (KeyboardInterrupt, SystemExit):
1055            raise
1056        except:
1057            self.handleError(record)
1058
1059class BufferingHandler(logging.Handler):
1060    """
1061  A handler class which buffers logging records in memory. Whenever each
1062  record is added to the buffer, a check is made to see if the buffer should
1063  be flushed. If it should, then flush() is expected to do what's needed.
1064    """
1065    def __init__(self, capacity):
1066        """
1067        Initialize the handler with the buffer size.
1068        """
1069        logging.Handler.__init__(self)
1070        self.capacity = capacity
1071        self.buffer = []
1072
1073    def shouldFlush(self, record):
1074        """
1075        Should the handler flush its buffer?
1076
1077        Returns true if the buffer is up to capacity. This method can be
1078        overridden to implement custom flushing strategies.
1079        """
1080        return (len(self.buffer) >= self.capacity)
1081
1082    def emit(self, record):
1083        """
1084        Emit a record.
1085
1086        Append the record. If shouldFlush() tells us to, call flush() to process
1087        the buffer.
1088        """
1089        self.buffer.append(record)
1090        if self.shouldFlush(record):
1091            self.flush()
1092
1093    def flush(self):
1094        """
1095        Override to implement custom flushing behaviour.
1096
1097        This version just zaps the buffer to empty.
1098        """
1099        self.buffer = []
1100
1101    def close(self):
1102        """
1103        Close the handler.
1104
1105        This version just flushes and chains to the parent class' close().
1106        """
1107        self.flush()
1108        logging.Handler.close(self)
1109
1110class MemoryHandler(BufferingHandler):
1111    """
1112    A handler class which buffers logging records in memory, periodically
1113    flushing them to a target handler. Flushing occurs whenever the buffer
1114    is full, or when an event of a certain severity or greater is seen.
1115    """
1116    def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
1117        """
1118        Initialize the handler with the buffer size, the level at which
1119        flushing should occur and an optional target.
1120
1121        Note that without a target being set either here or via setTarget(),
1122        a MemoryHandler is no use to anyone!
1123        """
1124        BufferingHandler.__init__(self, capacity)
1125        self.flushLevel = flushLevel
1126        self.target = target
1127
1128    def shouldFlush(self, record):
1129        """
1130        Check for buffer full or a record at the flushLevel or higher.
1131        """
1132        return (len(self.buffer) >= self.capacity) or \
1133                (record.levelno >= self.flushLevel)
1134
1135    def setTarget(self, target):
1136        """
1137        Set the target handler for this handler.
1138        """
1139        self.target = target
1140
1141    def flush(self):
1142        """
1143        For a MemoryHandler, flushing means just sending the buffered
1144        records to the target, if there is one. Override if you want
1145        different behaviour.
1146        """
1147        if self.target:
1148            for record in self.buffer:
1149                self.target.handle(record)
1150            self.buffer = []
1151
1152    def close(self):
1153        """
1154        Flush, set the target to None and lose the buffer.
1155        """
1156        self.flush()
1157        self.target = None
1158        BufferingHandler.close(self)
1159