paul@139 | 1 | # Copyright (C) 2002-2006 Python Software Foundation |
paul@139 | 2 | # Author: Ben Gertzfield, Barry Warsaw |
paul@139 | 3 | # Contact: email-sig@python.org |
paul@139 | 4 | |
paul@139 | 5 | """Header encoding and decoding functionality.""" |
paul@139 | 6 | |
paul@139 | 7 | __all__ = [ |
paul@139 | 8 | 'Header', |
paul@139 | 9 | 'decode_header', |
paul@139 | 10 | 'make_header', |
paul@139 | 11 | ] |
paul@139 | 12 | |
paul@139 | 13 | import re |
paul@139 | 14 | import binascii |
paul@139 | 15 | |
paul@139 | 16 | import email.quoprimime |
paul@139 | 17 | import email.base64mime |
paul@139 | 18 | |
paul@139 | 19 | from email.errors import HeaderParseError |
paul@139 | 20 | from email.charset import Charset |
paul@139 | 21 | |
paul@139 | 22 | NL = '\n' |
paul@139 | 23 | SPACE = ' ' |
paul@139 | 24 | USPACE = u' ' |
paul@139 | 25 | SPACE8 = ' ' * 8 |
paul@139 | 26 | UEMPTYSTRING = u'' |
paul@139 | 27 | |
paul@139 | 28 | MAXLINELEN = 76 |
paul@139 | 29 | |
paul@139 | 30 | USASCII = Charset('us-ascii') |
paul@139 | 31 | UTF8 = Charset('utf-8') |
paul@139 | 32 | |
paul@139 | 33 | # Match encoded-word strings in the form =?charset?q?Hello_World?= |
paul@139 | 34 | ecre = re.compile(r''' |
paul@139 | 35 | =\? # literal =? |
paul@139 | 36 | (?P<charset>[^?]*?) # non-greedy up to the next ? is the charset |
paul@139 | 37 | \? # literal ? |
paul@139 | 38 | (?P<encoding>[qb]) # either a "q" or a "b", case insensitive |
paul@139 | 39 | \? # literal ? |
paul@139 | 40 | (?P<encoded>.*?) # non-greedy up to the next ?= is the encoded string |
paul@139 | 41 | \?= # literal ?= |
paul@139 | 42 | (?=[ \t]|$) # whitespace or the end of the string |
paul@139 | 43 | ''', re.VERBOSE | re.IGNORECASE | re.MULTILINE) |
paul@139 | 44 | |
paul@139 | 45 | # Field name regexp, including trailing colon, but not separating whitespace, |
paul@139 | 46 | # according to RFC 2822. Character range is from tilde to exclamation mark. |
paul@139 | 47 | # For use with .match() |
paul@139 | 48 | fcre = re.compile(r'[\041-\176]+:$') |
paul@139 | 49 | |
paul@139 | 50 | # Find a header embedded in a putative header value. Used to check for |
paul@139 | 51 | # header injection attack. |
paul@139 | 52 | _embeded_header = re.compile(r'\n[^ \t]+:') |
paul@139 | 53 | |
paul@139 | 54 | |
paul@139 | 55 | |
paul@139 | 56 | # Helpers |
paul@139 | 57 | _max_append = email.quoprimime._max_append |
paul@139 | 58 | |
paul@139 | 59 | |
paul@139 | 60 | |
paul@139 | 61 | def decode_header(header): |
paul@139 | 62 | """Decode a message header value without converting charset. |
paul@139 | 63 | |
paul@139 | 64 | Returns a list of (decoded_string, charset) pairs containing each of the |
paul@139 | 65 | decoded parts of the header. Charset is None for non-encoded parts of the |
paul@139 | 66 | header, otherwise a lower-case string containing the name of the character |
paul@139 | 67 | set specified in the encoded string. |
paul@139 | 68 | |
paul@139 | 69 | An email.errors.HeaderParseError may be raised when certain decoding error |
paul@139 | 70 | occurs (e.g. a base64 decoding exception). |
paul@139 | 71 | """ |
paul@139 | 72 | # If no encoding, just return the header |
paul@139 | 73 | header = str(header) |
paul@139 | 74 | if not ecre.search(header): |
paul@139 | 75 | return [(header, None)] |
paul@139 | 76 | decoded = [] |
paul@139 | 77 | dec = '' |
paul@139 | 78 | for line in header.splitlines(): |
paul@139 | 79 | # This line might not have an encoding in it |
paul@139 | 80 | if not ecre.search(line): |
paul@139 | 81 | decoded.append((line, None)) |
paul@139 | 82 | continue |
paul@139 | 83 | parts = ecre.split(line) |
paul@139 | 84 | while parts: |
paul@139 | 85 | unenc = parts.pop(0).strip() |
paul@139 | 86 | if unenc: |
paul@139 | 87 | # Should we continue a long line? |
paul@139 | 88 | if decoded and decoded[-1][1] is None: |
paul@139 | 89 | decoded[-1] = (decoded[-1][0] + SPACE + unenc, None) |
paul@139 | 90 | else: |
paul@139 | 91 | decoded.append((unenc, None)) |
paul@139 | 92 | if parts: |
paul@139 | 93 | charset, encoding = [s.lower() for s in parts[0:2]] |
paul@139 | 94 | encoded = parts[2] |
paul@139 | 95 | dec = None |
paul@139 | 96 | if encoding == 'q': |
paul@139 | 97 | dec = email.quoprimime.header_decode(encoded) |
paul@139 | 98 | elif encoding == 'b': |
paul@139 | 99 | paderr = len(encoded) % 4 # Postel's law: add missing padding |
paul@139 | 100 | if paderr: |
paul@139 | 101 | encoded += '==='[:4 - paderr] |
paul@139 | 102 | try: |
paul@139 | 103 | dec = email.base64mime.decode(encoded) |
paul@139 | 104 | except binascii.Error: |
paul@139 | 105 | # Turn this into a higher level exception. BAW: Right |
paul@139 | 106 | # now we throw the lower level exception away but |
paul@139 | 107 | # when/if we get exception chaining, we'll preserve it. |
paul@139 | 108 | raise HeaderParseError |
paul@139 | 109 | if dec is None: |
paul@139 | 110 | dec = encoded |
paul@139 | 111 | |
paul@139 | 112 | if decoded and decoded[-1][1] == charset: |
paul@139 | 113 | decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1]) |
paul@139 | 114 | else: |
paul@139 | 115 | decoded.append((dec, charset)) |
paul@139 | 116 | del parts[0:3] |
paul@139 | 117 | return decoded |
paul@139 | 118 | |
paul@139 | 119 | |
paul@139 | 120 | |
paul@139 | 121 | def make_header(decoded_seq, maxlinelen=None, header_name=None, |
paul@139 | 122 | continuation_ws=' '): |
paul@139 | 123 | """Create a Header from a sequence of pairs as returned by decode_header() |
paul@139 | 124 | |
paul@139 | 125 | decode_header() takes a header value string and returns a sequence of |
paul@139 | 126 | pairs of the format (decoded_string, charset) where charset is the string |
paul@139 | 127 | name of the character set. |
paul@139 | 128 | |
paul@139 | 129 | This function takes one of those sequence of pairs and returns a Header |
paul@139 | 130 | instance. Optional maxlinelen, header_name, and continuation_ws are as in |
paul@139 | 131 | the Header constructor. |
paul@139 | 132 | """ |
paul@139 | 133 | h = Header(maxlinelen=maxlinelen, header_name=header_name, |
paul@139 | 134 | continuation_ws=continuation_ws) |
paul@139 | 135 | for s, charset in decoded_seq: |
paul@139 | 136 | # None means us-ascii but we can simply pass it on to h.append() |
paul@139 | 137 | if charset is not None and not isinstance(charset, Charset): |
paul@139 | 138 | charset = Charset(charset) |
paul@139 | 139 | h.append(s, charset) |
paul@139 | 140 | return h |
paul@139 | 141 | |
paul@139 | 142 | |
paul@139 | 143 | |
paul@139 | 144 | class Header: |
paul@139 | 145 | def __init__(self, s=None, charset=None, |
paul@139 | 146 | maxlinelen=None, header_name=None, |
paul@139 | 147 | continuation_ws=' ', errors='strict'): |
paul@139 | 148 | """Create a MIME-compliant header that can contain many character sets. |
paul@139 | 149 | |
paul@139 | 150 | Optional s is the initial header value. If None, the initial header |
paul@139 | 151 | value is not set. You can later append to the header with .append() |
paul@139 | 152 | method calls. s may be a byte string or a Unicode string, but see the |
paul@139 | 153 | .append() documentation for semantics. |
paul@139 | 154 | |
paul@139 | 155 | Optional charset serves two purposes: it has the same meaning as the |
paul@139 | 156 | charset argument to the .append() method. It also sets the default |
paul@139 | 157 | character set for all subsequent .append() calls that omit the charset |
paul@139 | 158 | argument. If charset is not provided in the constructor, the us-ascii |
paul@139 | 159 | charset is used both as s's initial charset and as the default for |
paul@139 | 160 | subsequent .append() calls. |
paul@139 | 161 | |
paul@139 | 162 | The maximum line length can be specified explicit via maxlinelen. For |
paul@139 | 163 | splitting the first line to a shorter value (to account for the field |
paul@139 | 164 | header which isn't included in s, e.g. `Subject') pass in the name of |
paul@139 | 165 | the field in header_name. The default maxlinelen is 76. |
paul@139 | 166 | |
paul@139 | 167 | continuation_ws must be RFC 2822 compliant folding whitespace (usually |
paul@139 | 168 | either a space or a hard tab) which will be prepended to continuation |
paul@139 | 169 | lines. |
paul@139 | 170 | |
paul@139 | 171 | errors is passed through to the .append() call. |
paul@139 | 172 | """ |
paul@139 | 173 | if charset is None: |
paul@139 | 174 | charset = USASCII |
paul@139 | 175 | if not isinstance(charset, Charset): |
paul@139 | 176 | charset = Charset(charset) |
paul@139 | 177 | self._charset = charset |
paul@139 | 178 | self._continuation_ws = continuation_ws |
paul@139 | 179 | cws_expanded_len = len(continuation_ws.replace('\t', SPACE8)) |
paul@139 | 180 | # BAW: I believe `chunks' and `maxlinelen' should be non-public. |
paul@139 | 181 | self._chunks = [] |
paul@139 | 182 | if s is not None: |
paul@139 | 183 | self.append(s, charset, errors) |
paul@139 | 184 | if maxlinelen is None: |
paul@139 | 185 | maxlinelen = MAXLINELEN |
paul@139 | 186 | if header_name is None: |
paul@139 | 187 | # We don't know anything about the field header so the first line |
paul@139 | 188 | # is the same length as subsequent lines. |
paul@139 | 189 | self._firstlinelen = maxlinelen |
paul@139 | 190 | else: |
paul@139 | 191 | # The first line should be shorter to take into account the field |
paul@139 | 192 | # header. Also subtract off 2 extra for the colon and space. |
paul@139 | 193 | self._firstlinelen = maxlinelen - len(header_name) - 2 |
paul@139 | 194 | # Second and subsequent lines should subtract off the length in |
paul@139 | 195 | # columns of the continuation whitespace prefix. |
paul@139 | 196 | self._maxlinelen = maxlinelen - cws_expanded_len |
paul@139 | 197 | |
paul@139 | 198 | def __str__(self): |
paul@139 | 199 | """A synonym for self.encode().""" |
paul@139 | 200 | return self.encode() |
paul@139 | 201 | |
paul@139 | 202 | def __unicode__(self): |
paul@139 | 203 | """Helper for the built-in unicode function.""" |
paul@139 | 204 | uchunks = [] |
paul@139 | 205 | lastcs = None |
paul@139 | 206 | for s, charset in self._chunks: |
paul@139 | 207 | # We must preserve spaces between encoded and non-encoded word |
paul@139 | 208 | # boundaries, which means for us we need to add a space when we go |
paul@139 | 209 | # from a charset to None/us-ascii, or from None/us-ascii to a |
paul@139 | 210 | # charset. Only do this for the second and subsequent chunks. |
paul@139 | 211 | nextcs = charset |
paul@139 | 212 | if uchunks: |
paul@139 | 213 | if lastcs not in (None, 'us-ascii'): |
paul@139 | 214 | if nextcs in (None, 'us-ascii'): |
paul@139 | 215 | uchunks.append(USPACE) |
paul@139 | 216 | nextcs = None |
paul@139 | 217 | elif nextcs not in (None, 'us-ascii'): |
paul@139 | 218 | uchunks.append(USPACE) |
paul@139 | 219 | lastcs = nextcs |
paul@139 | 220 | uchunks.append(unicode(s, str(charset))) |
paul@139 | 221 | return UEMPTYSTRING.join(uchunks) |
paul@139 | 222 | |
paul@139 | 223 | # Rich comparison operators for equality only. BAW: does it make sense to |
paul@139 | 224 | # have or explicitly disable <, <=, >, >= operators? |
paul@139 | 225 | def __eq__(self, other): |
paul@139 | 226 | # other may be a Header or a string. Both are fine so coerce |
paul@139 | 227 | # ourselves to a string, swap the args and do another comparison. |
paul@139 | 228 | return other == self.encode() |
paul@139 | 229 | |
paul@139 | 230 | def __ne__(self, other): |
paul@139 | 231 | return not self == other |
paul@139 | 232 | |
paul@139 | 233 | def append(self, s, charset=None, errors='strict'): |
paul@139 | 234 | """Append a string to the MIME header. |
paul@139 | 235 | |
paul@139 | 236 | Optional charset, if given, should be a Charset instance or the name |
paul@139 | 237 | of a character set (which will be converted to a Charset instance). A |
paul@139 | 238 | value of None (the default) means that the charset given in the |
paul@139 | 239 | constructor is used. |
paul@139 | 240 | |
paul@139 | 241 | s may be a byte string or a Unicode string. If it is a byte string |
paul@139 | 242 | (i.e. isinstance(s, str) is true), then charset is the encoding of |
paul@139 | 243 | that byte string, and a UnicodeError will be raised if the string |
paul@139 | 244 | cannot be decoded with that charset. If s is a Unicode string, then |
paul@139 | 245 | charset is a hint specifying the character set of the characters in |
paul@139 | 246 | the string. In this case, when producing an RFC 2822 compliant header |
paul@139 | 247 | using RFC 2047 rules, the Unicode string will be encoded using the |
paul@139 | 248 | following charsets in order: us-ascii, the charset hint, utf-8. The |
paul@139 | 249 | first character set not to provoke a UnicodeError is used. |
paul@139 | 250 | |
paul@139 | 251 | Optional `errors' is passed as the third argument to any unicode() or |
paul@139 | 252 | ustr.encode() call. |
paul@139 | 253 | """ |
paul@139 | 254 | if charset is None: |
paul@139 | 255 | charset = self._charset |
paul@139 | 256 | elif not isinstance(charset, Charset): |
paul@139 | 257 | charset = Charset(charset) |
paul@139 | 258 | # If the charset is our faux 8bit charset, leave the string unchanged |
paul@139 | 259 | if charset != '8bit': |
paul@139 | 260 | # We need to test that the string can be converted to unicode and |
paul@139 | 261 | # back to a byte string, given the input and output codecs of the |
paul@139 | 262 | # charset. |
paul@139 | 263 | if isinstance(s, str): |
paul@139 | 264 | # Possibly raise UnicodeError if the byte string can't be |
paul@139 | 265 | # converted to a unicode with the input codec of the charset. |
paul@139 | 266 | incodec = charset.input_codec or 'us-ascii' |
paul@139 | 267 | ustr = unicode(s, incodec, errors) |
paul@139 | 268 | # Now make sure that the unicode could be converted back to a |
paul@139 | 269 | # byte string with the output codec, which may be different |
paul@139 | 270 | # than the iput coded. Still, use the original byte string. |
paul@139 | 271 | outcodec = charset.output_codec or 'us-ascii' |
paul@139 | 272 | ustr.encode(outcodec, errors) |
paul@139 | 273 | elif isinstance(s, unicode): |
paul@139 | 274 | # Now we have to be sure the unicode string can be converted |
paul@139 | 275 | # to a byte string with a reasonable output codec. We want to |
paul@139 | 276 | # use the byte string in the chunk. |
paul@139 | 277 | for charset in USASCII, charset, UTF8: |
paul@139 | 278 | try: |
paul@139 | 279 | outcodec = charset.output_codec or 'us-ascii' |
paul@139 | 280 | s = s.encode(outcodec, errors) |
paul@139 | 281 | break |
paul@139 | 282 | except UnicodeError: |
paul@139 | 283 | pass |
paul@139 | 284 | else: |
paul@139 | 285 | assert False, 'utf-8 conversion failed' |
paul@139 | 286 | self._chunks.append((s, charset)) |
paul@139 | 287 | |
paul@139 | 288 | def _split(self, s, charset, maxlinelen, splitchars): |
paul@139 | 289 | # Split up a header safely for use with encode_chunks. |
paul@139 | 290 | splittable = charset.to_splittable(s) |
paul@139 | 291 | encoded = charset.from_splittable(splittable, True) |
paul@139 | 292 | elen = charset.encoded_header_len(encoded) |
paul@139 | 293 | # If the line's encoded length first, just return it |
paul@139 | 294 | if elen <= maxlinelen: |
paul@139 | 295 | return [(encoded, charset)] |
paul@139 | 296 | # If we have undetermined raw 8bit characters sitting in a byte |
paul@139 | 297 | # string, we really don't know what the right thing to do is. We |
paul@139 | 298 | # can't really split it because it might be multibyte data which we |
paul@139 | 299 | # could break if we split it between pairs. The least harm seems to |
paul@139 | 300 | # be to not split the header at all, but that means they could go out |
paul@139 | 301 | # longer than maxlinelen. |
paul@139 | 302 | if charset == '8bit': |
paul@139 | 303 | return [(s, charset)] |
paul@139 | 304 | # BAW: I'm not sure what the right test here is. What we're trying to |
paul@139 | 305 | # do is be faithful to RFC 2822's recommendation that ($2.2.3): |
paul@139 | 306 | # |
paul@139 | 307 | # "Note: Though structured field bodies are defined in such a way that |
paul@139 | 308 | # folding can take place between many of the lexical tokens (and even |
paul@139 | 309 | # within some of the lexical tokens), folding SHOULD be limited to |
paul@139 | 310 | # placing the CRLF at higher-level syntactic breaks." |
paul@139 | 311 | # |
paul@139 | 312 | # For now, I can only imagine doing this when the charset is us-ascii, |
paul@139 | 313 | # although it's possible that other charsets may also benefit from the |
paul@139 | 314 | # higher-level syntactic breaks. |
paul@139 | 315 | elif charset == 'us-ascii': |
paul@139 | 316 | return self._split_ascii(s, charset, maxlinelen, splitchars) |
paul@139 | 317 | # BAW: should we use encoded? |
paul@139 | 318 | elif elen == len(s): |
paul@139 | 319 | # We can split on _maxlinelen boundaries because we know that the |
paul@139 | 320 | # encoding won't change the size of the string |
paul@139 | 321 | splitpnt = maxlinelen |
paul@139 | 322 | first = charset.from_splittable(splittable[:splitpnt], False) |
paul@139 | 323 | last = charset.from_splittable(splittable[splitpnt:], False) |
paul@139 | 324 | else: |
paul@139 | 325 | # Binary search for split point |
paul@139 | 326 | first, last = _binsplit(splittable, charset, maxlinelen) |
paul@139 | 327 | # first is of the proper length so just wrap it in the appropriate |
paul@139 | 328 | # chrome. last must be recursively split. |
paul@139 | 329 | fsplittable = charset.to_splittable(first) |
paul@139 | 330 | fencoded = charset.from_splittable(fsplittable, True) |
paul@139 | 331 | chunk = [(fencoded, charset)] |
paul@139 | 332 | return chunk + self._split(last, charset, self._maxlinelen, splitchars) |
paul@139 | 333 | |
paul@139 | 334 | def _split_ascii(self, s, charset, firstlen, splitchars): |
paul@139 | 335 | chunks = _split_ascii(s, firstlen, self._maxlinelen, |
paul@139 | 336 | self._continuation_ws, splitchars, self._NL) |
paul@139 | 337 | return zip(chunks, [charset]*len(chunks)) |
paul@139 | 338 | |
paul@139 | 339 | def _encode_chunks(self, newchunks, maxlinelen): |
paul@139 | 340 | # MIME-encode a header with many different charsets and/or encodings. |
paul@139 | 341 | # |
paul@139 | 342 | # Given a list of pairs (string, charset), return a MIME-encoded |
paul@139 | 343 | # string suitable for use in a header field. Each pair may have |
paul@139 | 344 | # different charsets and/or encodings, and the resulting header will |
paul@139 | 345 | # accurately reflect each setting. |
paul@139 | 346 | # |
paul@139 | 347 | # Each encoding can be email.utils.QP (quoted-printable, for |
paul@139 | 348 | # ASCII-like character sets like iso-8859-1), email.utils.BASE64 |
paul@139 | 349 | # (Base64, for non-ASCII like character sets like KOI8-R and |
paul@139 | 350 | # iso-2022-jp), or None (no encoding). |
paul@139 | 351 | # |
paul@139 | 352 | # Each pair will be represented on a separate line; the resulting |
paul@139 | 353 | # string will be in the format: |
paul@139 | 354 | # |
paul@139 | 355 | # =?charset1?q?Mar=EDa_Gonz=E1lez_Alonso?=\n |
paul@139 | 356 | # =?charset2?b?SvxyZ2VuIEL2aW5n?=" |
paul@139 | 357 | chunks = [] |
paul@139 | 358 | for header, charset in newchunks: |
paul@139 | 359 | if not header: |
paul@139 | 360 | continue |
paul@139 | 361 | if charset is None or charset.header_encoding is None: |
paul@139 | 362 | s = header |
paul@139 | 363 | else: |
paul@139 | 364 | s = charset.header_encode(header) |
paul@139 | 365 | # Don't add more folding whitespace than necessary |
paul@139 | 366 | if chunks and chunks[-1].endswith(' '): |
paul@139 | 367 | extra = '' |
paul@139 | 368 | else: |
paul@139 | 369 | extra = ' ' |
paul@139 | 370 | _max_append(chunks, s, maxlinelen, extra) |
paul@139 | 371 | joiner = self._NL + self._continuation_ws |
paul@139 | 372 | return joiner.join(chunks) |
paul@139 | 373 | |
paul@139 | 374 | def encode(self, splitchars=';, ', linesep=NL): |
paul@139 | 375 | """Encode a message header into an RFC-compliant format. |
paul@139 | 376 | |
paul@139 | 377 | There are many issues involved in converting a given string for use in |
paul@139 | 378 | an email header. Only certain character sets are readable in most |
paul@139 | 379 | email clients, and as header strings can only contain a subset of |
paul@139 | 380 | 7-bit ASCII, care must be taken to properly convert and encode (with |
paul@139 | 381 | Base64 or quoted-printable) header strings. In addition, there is a |
paul@139 | 382 | 75-character length limit on any given encoded header field, so |
paul@139 | 383 | line-wrapping must be performed, even with double-byte character sets. |
paul@139 | 384 | |
paul@139 | 385 | This method will do its best to convert the string to the correct |
paul@139 | 386 | character set used in email, and encode and line wrap it safely with |
paul@139 | 387 | the appropriate scheme for that character set. |
paul@139 | 388 | |
paul@139 | 389 | If the given charset is not known or an error occurs during |
paul@139 | 390 | conversion, this function will return the header untouched. |
paul@139 | 391 | |
paul@139 | 392 | Optional splitchars is a string containing characters to split long |
paul@139 | 393 | ASCII lines on, in rough support of RFC 2822's `highest level |
paul@139 | 394 | syntactic breaks'. This doesn't affect RFC 2047 encoded lines. |
paul@139 | 395 | """ |
paul@139 | 396 | self._NL = linesep |
paul@139 | 397 | newchunks = [] |
paul@139 | 398 | maxlinelen = self._firstlinelen |
paul@139 | 399 | lastlen = 0 |
paul@139 | 400 | for s, charset in self._chunks: |
paul@139 | 401 | # The first bit of the next chunk should be just long enough to |
paul@139 | 402 | # fill the next line. Don't forget the space separating the |
paul@139 | 403 | # encoded words. |
paul@139 | 404 | targetlen = maxlinelen - lastlen - 1 |
paul@139 | 405 | if targetlen < charset.encoded_header_len(''): |
paul@139 | 406 | # Stick it on the next line |
paul@139 | 407 | targetlen = maxlinelen |
paul@139 | 408 | newchunks += self._split(s, charset, targetlen, splitchars) |
paul@139 | 409 | lastchunk, lastcharset = newchunks[-1] |
paul@139 | 410 | lastlen = lastcharset.encoded_header_len(lastchunk) |
paul@139 | 411 | value = self._encode_chunks(newchunks, maxlinelen) |
paul@139 | 412 | if _embeded_header.search(value): |
paul@139 | 413 | raise HeaderParseError("header value appears to contain " |
paul@139 | 414 | "an embedded header: {!r}".format(value)) |
paul@139 | 415 | return value |
paul@139 | 416 | |
paul@139 | 417 | |
paul@139 | 418 | |
paul@139 | 419 | def _split_ascii(s, firstlen, restlen, continuation_ws, splitchars, linesep): |
paul@139 | 420 | lines = [] |
paul@139 | 421 | maxlen = firstlen |
paul@139 | 422 | for line in s.split(linesep): |
paul@139 | 423 | # Ignore any leading whitespace (i.e. continuation whitespace) already |
paul@139 | 424 | # on the line, since we'll be adding our own. |
paul@139 | 425 | line = line.lstrip() |
paul@139 | 426 | if len(line) < maxlen: |
paul@139 | 427 | lines.append(line) |
paul@139 | 428 | maxlen = restlen |
paul@139 | 429 | continue |
paul@139 | 430 | # Attempt to split the line at the highest-level syntactic break |
paul@139 | 431 | # possible. Note that we don't have a lot of smarts about field |
paul@139 | 432 | # syntax; we just try to break on semi-colons, then commas, then |
paul@139 | 433 | # whitespace. |
paul@139 | 434 | for ch in splitchars: |
paul@139 | 435 | if ch in line: |
paul@139 | 436 | break |
paul@139 | 437 | else: |
paul@139 | 438 | # There's nothing useful to split the line on, not even spaces, so |
paul@139 | 439 | # just append this line unchanged |
paul@139 | 440 | lines.append(line) |
paul@139 | 441 | maxlen = restlen |
paul@139 | 442 | continue |
paul@139 | 443 | # Now split the line on the character plus trailing whitespace |
paul@139 | 444 | cre = re.compile(r'%s\s*' % ch) |
paul@139 | 445 | if ch in ';,': |
paul@139 | 446 | eol = ch |
paul@139 | 447 | else: |
paul@139 | 448 | eol = '' |
paul@139 | 449 | joiner = eol + ' ' |
paul@139 | 450 | joinlen = len(joiner) |
paul@139 | 451 | wslen = len(continuation_ws.replace('\t', SPACE8)) |
paul@139 | 452 | this = [] |
paul@139 | 453 | linelen = 0 |
paul@139 | 454 | for part in cre.split(line): |
paul@139 | 455 | curlen = linelen + max(0, len(this)-1) * joinlen |
paul@139 | 456 | partlen = len(part) |
paul@139 | 457 | onfirstline = not lines |
paul@139 | 458 | # We don't want to split after the field name, if we're on the |
paul@139 | 459 | # first line and the field name is present in the header string. |
paul@139 | 460 | if ch == ' ' and onfirstline and \ |
paul@139 | 461 | len(this) == 1 and fcre.match(this[0]): |
paul@139 | 462 | this.append(part) |
paul@139 | 463 | linelen += partlen |
paul@139 | 464 | elif curlen + partlen > maxlen: |
paul@139 | 465 | if this: |
paul@139 | 466 | lines.append(joiner.join(this) + eol) |
paul@139 | 467 | # If this part is longer than maxlen and we aren't already |
paul@139 | 468 | # splitting on whitespace, try to recursively split this line |
paul@139 | 469 | # on whitespace. |
paul@139 | 470 | if partlen > maxlen and ch != ' ': |
paul@139 | 471 | subl = _split_ascii(part, maxlen, restlen, |
paul@139 | 472 | continuation_ws, ' ', self._NL) |
paul@139 | 473 | lines.extend(subl[:-1]) |
paul@139 | 474 | this = [subl[-1]] |
paul@139 | 475 | else: |
paul@139 | 476 | this = [part] |
paul@139 | 477 | linelen = wslen + len(this[-1]) |
paul@139 | 478 | maxlen = restlen |
paul@139 | 479 | else: |
paul@139 | 480 | this.append(part) |
paul@139 | 481 | linelen += partlen |
paul@139 | 482 | # Put any left over parts on a line by themselves |
paul@139 | 483 | if this: |
paul@139 | 484 | lines.append(joiner.join(this)) |
paul@139 | 485 | return lines |
paul@139 | 486 | |
paul@139 | 487 | |
paul@139 | 488 | |
paul@139 | 489 | def _binsplit(splittable, charset, maxlinelen): |
paul@139 | 490 | i = 0 |
paul@139 | 491 | j = len(splittable) |
paul@139 | 492 | while i < j: |
paul@139 | 493 | # Invariants: |
paul@139 | 494 | # 1. splittable[:k] fits for all k <= i (note that we *assume*, |
paul@139 | 495 | # at the start, that splittable[:0] fits). |
paul@139 | 496 | # 2. splittable[:k] does not fit for any k > j (at the start, |
paul@139 | 497 | # this means we shouldn't look at any k > len(splittable)). |
paul@139 | 498 | # 3. We don't know about splittable[:k] for k in i+1..j. |
paul@139 | 499 | # 4. We want to set i to the largest k that fits, with i <= k <= j. |
paul@139 | 500 | # |
paul@139 | 501 | m = (i+j+1) >> 1 # ceiling((i+j)/2); i < m <= j |
paul@139 | 502 | chunk = charset.from_splittable(splittable[:m], True) |
paul@139 | 503 | chunklen = charset.encoded_header_len(chunk) |
paul@139 | 504 | if chunklen <= maxlinelen: |
paul@139 | 505 | # m is acceptable, so is a new lower bound. |
paul@139 | 506 | i = m |
paul@139 | 507 | else: |
paul@139 | 508 | # m is not acceptable, so final i must be < m. |
paul@139 | 509 | j = m - 1 |
paul@139 | 510 | # i == j. Invariant #1 implies that splittable[:i] fits, and |
paul@139 | 511 | # invariant #2 implies that splittable[:i+1] does not fit, so i |
paul@139 | 512 | # is what we're looking for. |
paul@139 | 513 | first = charset.from_splittable(splittable[:i], False) |
paul@139 | 514 | last = charset.from_splittable(splittable[i:], False) |
paul@139 | 515 | return first, last |