1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012, 2013 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2005-2008 MoinMoin:ThomasWaldmann. 8 @license: GNU GPL (v2 or later), see COPYING.txt for details. 9 """ 10 11 from DateSupport import * 12 from MoinMoin.Page import Page 13 from MoinMoin.util import lock 14 from MoinMoin import config, search, wikiutil 15 from StringIO import StringIO 16 from shlex import shlex 17 import re 18 import time 19 import os 20 21 # Moin 1.9 request parameters. 22 23 try: 24 from MoinMoin.support.werkzeug.datastructures import MultiDict 25 except ImportError: 26 pass 27 28 __version__ = "0.2" 29 30 # Extraction of shared fragments. 31 32 marker_regexp_str = r"([{]{3,}|[}]{3,})" 33 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 34 35 # Extraction of headings. 36 37 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 38 39 # Category extraction from pages. 40 41 category_regexp = None 42 43 # Simple content parsing. 44 45 verbatim_regexp = re.compile(ur'(?:' 46 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 47 ur'|' 48 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 49 ur'|' 50 ur'!(?P<verbatim3>.*?)(\s|$)?' 51 ur'|' 52 ur'`(?P<monospace>.*?)`' 53 ur'|' 54 ur'{{{(?P<preformatted>.*?)}}}' 55 ur')', re.UNICODE) 56 57 # Category discovery. 58 59 def getCategoryPattern(request): 60 global category_regexp 61 62 try: 63 return request.cfg.cache.page_category_regexact 64 except AttributeError: 65 66 # Use regular expression from MoinMoin 1.7.1 otherwise. 67 68 if category_regexp is None: 69 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 70 return category_regexp 71 72 def getCategories(request): 73 74 """ 75 From the AdvancedSearch macro, return a list of category page names using 76 the given 'request'. 77 """ 78 79 # This will return all pages with "Category" in the title. 80 81 cat_filter = getCategoryPattern(request).search 82 return request.rootpage.getPageList(filter=cat_filter) 83 84 def getCategoryMapping(category_pagenames, request): 85 86 """ 87 For the given 'category_pagenames' return a list of tuples of the form 88 (category name, category page name) using the given 'request'. 89 """ 90 91 cat_pattern = getCategoryPattern(request) 92 mapping = [] 93 for pagename in category_pagenames: 94 name = cat_pattern.match(pagename).group("key") 95 if name != "Category": 96 mapping.append((name, pagename)) 97 mapping.sort() 98 return mapping 99 100 def getCategoryPages(pagename, request): 101 102 """ 103 Return the pages associated with the given category 'pagename' using the 104 'request'. 105 """ 106 107 query = search.QueryParser().parse_query('category:%s' % pagename) 108 results = search.searchPages(request, query, "page_name") 109 return filterCategoryPages(results, request) 110 111 def filterCategoryPages(results, request): 112 113 "Filter category pages from the given 'results' using the 'request'." 114 115 cat_pattern = getCategoryPattern(request) 116 pages = [] 117 for page in results.hits: 118 if not cat_pattern.match(page.page_name): 119 pages.append(page) 120 return pages 121 122 def getAllCategoryPages(category_names, request): 123 124 """ 125 Return all pages belonging to the categories having the given 126 'category_names', using the given 'request'. 127 """ 128 129 pages = [] 130 pagenames = set() 131 132 for category_name in category_names: 133 134 # Get the pages and page names in the category. 135 136 pages_in_category = getCategoryPages(category_name, request) 137 138 # Visit each page in the category. 139 140 for page_in_category in pages_in_category: 141 pagename = page_in_category.page_name 142 143 # Only process each page once. 144 145 if pagename in pagenames: 146 continue 147 else: 148 pagenames.add(pagename) 149 150 pages.append(page_in_category) 151 152 return pages 153 154 def getPagesForSearch(search_pattern, request): 155 156 """ 157 Return result pages for a search employing the given 'search_pattern' and 158 using the given 'request'. 159 """ 160 161 query = search.QueryParser().parse_query(search_pattern) 162 results = search.searchPages(request, query, "page_name") 163 return filterCategoryPages(results, request) 164 165 # WikiDict functions. 166 167 def getWikiDict(pagename, request): 168 169 """ 170 Return the WikiDict provided by the given 'pagename' using the given 171 'request'. 172 """ 173 174 if pagename and Page(request, pagename).exists() and request.user.may.read(pagename): 175 if hasattr(request.dicts, "dict"): 176 return request.dicts.dict(pagename) 177 else: 178 return request.dicts[pagename] 179 else: 180 return None 181 182 # Searching-related functions. 183 184 def getPagesFromResults(result_pages, request): 185 186 "Return genuine pages for the given 'result_pages' using the 'request'." 187 188 return [Page(request, page.page_name) for page in result_pages] 189 190 # Region/section parsing. 191 192 def getRegions(s, include_non_regions=False): 193 194 """ 195 Parse the string 's', returning a list of explicitly declared regions. 196 197 If 'include_non_regions' is specified as a true value, fragments will be 198 included for text between explicitly declared regions. 199 """ 200 201 regions = [] 202 marker = None 203 is_block = True 204 205 # Start a region for exposed text, if appropriate. 206 207 if include_non_regions: 208 regions.append("") 209 210 for match_text in marker_regexp.split(s): 211 212 # Capture section text. 213 214 if is_block: 215 if marker or include_non_regions: 216 regions[-1] += match_text 217 218 # Handle section markers. 219 220 else: 221 222 # Close any open sections, returning to exposed text regions. 223 224 if marker: 225 226 # Add any marker to the current region, regardless of whether it 227 # successfully closes a section. 228 229 regions[-1] += match_text 230 231 if match_text.startswith("}") and len(marker) == len(match_text): 232 marker = None 233 234 # Start a region for exposed text, if appropriate. 235 236 if include_non_regions: 237 regions.append("") 238 239 # Without a current marker, start a new section. 240 241 else: 242 marker = match_text 243 regions.append("") 244 245 # Add the marker to the new region. 246 247 regions[-1] += match_text 248 249 # The match text alternates between text between markers and the markers 250 # themselves. 251 252 is_block = not is_block 253 254 return regions 255 256 def getFragmentsFromRegions(regions): 257 258 """ 259 Return fragments from the given 'regions', each having the form 260 (format, attributes, body text). 261 """ 262 263 fragments = [] 264 265 for region in regions: 266 format, attributes, body, header, close = getFragmentFromRegion(region) 267 fragments.append((format, attributes, body)) 268 269 return fragments 270 271 def getFragmentFromRegion(region): 272 273 """ 274 Return a fragment for the given 'region' having the form (format, 275 attributes, body text, header, close), where the 'header' is the original 276 declaration of the 'region' or None if no explicit region is defined, and 277 'close' is the closing marker of the 'region' or None if no explicit region 278 is defined. 279 """ 280 281 if region.startswith("{{{"): 282 283 body = region.lstrip("{") 284 level = len(region) - len(body) 285 body = body.rstrip("}").lstrip() 286 287 # Remove any prelude and process metadata. 288 289 if body.startswith("#!"): 290 291 try: 292 declaration, body = body.split("\n", 1) 293 except ValueError: 294 declaration = body 295 body = "" 296 297 arguments = declaration[2:] 298 299 # Get any parser/format declaration. 300 301 if arguments and not arguments[0].isspace(): 302 details = arguments.split(None, 1) 303 if len(details) == 2: 304 format, arguments = details 305 else: 306 format = details[0] 307 arguments = "" 308 else: 309 format = None 310 311 # Get the attributes/arguments for the region. 312 313 attributes = parseAttributes(arguments, False) 314 315 # Add an entry for the format in the attribute dictionary. 316 317 if format and not attributes.has_key(format): 318 attributes[format] = True 319 320 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 321 322 else: 323 return None, {}, body, level * "{" + "\n", level * "}" 324 325 else: 326 return None, {}, region, None, None 327 328 def getFragments(s, include_non_regions=False): 329 330 """ 331 Return fragments for the given string 's', each having the form 332 (format, arguments, body text). 333 334 If 'include_non_regions' is specified as a true value, fragments will be 335 included for text between explicitly declared regions. 336 """ 337 338 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 339 340 # Heading extraction. 341 342 def getHeadings(s): 343 344 """ 345 Return tuples of the form (level, title, span) for headings found within the 346 given string 's'. The span is itself a (start, end) tuple indicating the 347 matching region of 's' for a heading declaration. 348 """ 349 350 headings = [] 351 352 for match in heading_regexp.finditer(s): 353 headings.append( 354 (len(match.group("level")), match.group("heading"), match.span()) 355 ) 356 357 return headings 358 359 # Region/section attribute parsing. 360 361 def parseAttributes(s, escape=True): 362 363 """ 364 Parse the section attributes string 's', returning a mapping of names to 365 values. If 'escape' is set to a true value, the attributes will be suitable 366 for use with the formatter API. If 'escape' is set to a false value, the 367 attributes will have any quoting removed. 368 """ 369 370 attrs = {} 371 f = StringIO(s) 372 name = None 373 need_value = False 374 lex = shlex(f) 375 lex.wordchars += "-" 376 377 for token in lex: 378 379 # Capture the name if needed. 380 381 if name is None: 382 name = escape and wikiutil.escape(token) or strip_token(token) 383 384 # Detect either an equals sign or another name. 385 386 elif not need_value: 387 if token == "=": 388 need_value = True 389 else: 390 attrs[name.lower()] = escape and "true" or True 391 name = wikiutil.escape(token) 392 393 # Otherwise, capture a value. 394 395 else: 396 # Quoting of attributes done similarly to wikiutil.parseAttributes. 397 398 if token: 399 if escape: 400 if token[0] in ("'", '"'): 401 token = wikiutil.escape(token) 402 else: 403 token = '"%s"' % wikiutil.escape(token, 1) 404 else: 405 token = strip_token(token) 406 407 attrs[name.lower()] = token 408 name = None 409 need_value = False 410 411 # Handle any name-only attributes at the end of the collection. 412 413 if name and not need_value: 414 attrs[name.lower()] = escape and "true" or True 415 416 return attrs 417 418 def strip_token(token): 419 420 "Return the given 'token' stripped of quoting." 421 422 if token[0] in ("'", '"') and token[-1] == token[0]: 423 return token[1:-1] 424 else: 425 return token 426 427 # Request-related classes and associated functions. 428 429 class Form: 430 431 """ 432 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 433 environment. 434 """ 435 436 def __init__(self, request): 437 self.request = request 438 self.form = request.values 439 440 def has_key(self, name): 441 return not not self.form.getlist(name) 442 443 def get(self, name, default=None): 444 values = self.form.getlist(name) 445 if not values: 446 return default 447 else: 448 return values 449 450 def __getitem__(self, name): 451 return self.form.getlist(name) 452 453 def __setitem__(self, name, value): 454 try: 455 self.form.setlist(name, value) 456 except TypeError: 457 self._write_enable() 458 self.form.setlist(name, value) 459 460 def __delitem__(self, name): 461 try: 462 del self.form[name] 463 except TypeError: 464 self._write_enable() 465 del self.form[name] 466 467 def _write_enable(self): 468 self.form = self.request.values = MultiDict(self.form) 469 470 def keys(self): 471 return self.form.keys() 472 473 def items(self): 474 return self.form.lists() 475 476 class ActionSupport: 477 478 """ 479 Work around disruptive MoinMoin changes in 1.9, and also provide useful 480 convenience methods. 481 """ 482 483 def get_form(self): 484 return get_form(self.request) 485 486 def _get_selected(self, value, input_value): 487 488 """ 489 Return the HTML attribute text indicating selection of an option (or 490 otherwise) if 'value' matches 'input_value'. 491 """ 492 493 return input_value is not None and value == input_value and 'selected="selected"' or '' 494 495 def _get_selected_for_list(self, value, input_values): 496 497 """ 498 Return the HTML attribute text indicating selection of an option (or 499 otherwise) if 'value' matches one of the 'input_values'. 500 """ 501 502 return value in input_values and 'selected="selected"' or '' 503 504 def get_option_list(self, value, values): 505 506 """ 507 Return a list of HTML element definitions for options describing the 508 given 'values', selecting the option with the specified 'value' if 509 present. 510 """ 511 512 options = [] 513 for available_value in values: 514 selected = self._get_selected(available_value, value) 515 options.append('<option value="%s" %s>%s</option>' % ( 516 escattr(available_value), selected, wikiutil.escape(available_value))) 517 return options 518 519 def _get_input(self, form, name, default=None): 520 521 """ 522 Return the input from 'form' having the given 'name', returning either 523 the input converted to an integer or the given 'default' (optional, None 524 if not specified). 525 """ 526 527 value = form.get(name, [None])[0] 528 if not value: # true if 0 obtained 529 return default 530 else: 531 return int(value) 532 533 def get_form(request): 534 535 "Work around disruptive MoinMoin changes in 1.9." 536 537 if hasattr(request, "values"): 538 return Form(request) 539 else: 540 return request.form 541 542 class send_headers_cls: 543 544 """ 545 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 546 1.9.x environment. 547 """ 548 549 def __init__(self, request): 550 self.request = request 551 552 def __call__(self, headers): 553 for header in headers: 554 parts = header.split(":") 555 self.request.headers.add(parts[0], ":".join(parts[1:])) 556 557 def get_send_headers(request): 558 559 "Return a function that can send response headers." 560 561 if hasattr(request, "http_headers"): 562 return request.http_headers 563 elif hasattr(request, "emit_http_headers"): 564 return request.emit_http_headers 565 else: 566 return send_headers_cls(request) 567 568 def escattr(s): 569 return wikiutil.escape(s, 1) 570 571 def getPathInfo(request): 572 if hasattr(request, "getPathinfo"): 573 return request.getPathinfo() 574 else: 575 return request.path 576 577 def getHeader(request, header_name, prefix=None): 578 579 """ 580 Using the 'request', return the value of the header with the given 581 'header_name', using the optional 'prefix' to obtain protocol-specific 582 headers if necessary. 583 584 If no value is found for the given 'header_name', None is returned. 585 """ 586 587 if hasattr(request, "getHeader"): 588 return request.getHeader(header_name) 589 elif hasattr(request, "headers"): 590 return request.headers.get(header_name) 591 else: 592 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 593 594 def writeHeaders(request, mimetype, metadata, status=None): 595 596 """ 597 Using the 'request', write resource headers using the given 'mimetype', 598 based on the given 'metadata'. If the optional 'status' is specified, set 599 the status header to the given value. 600 """ 601 602 send_headers = get_send_headers(request) 603 604 # Define headers. 605 606 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 607 608 # Define the last modified time. 609 # NOTE: Consider using request.httpDate. 610 611 latest_timestamp = metadata.get("last-modified") 612 if latest_timestamp: 613 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 614 615 if status: 616 headers.append("Status: %s" % status) 617 618 send_headers(headers) 619 620 # Page access functions. 621 622 def getPageURL(page): 623 624 "Return the URL of the given 'page'." 625 626 request = page.request 627 return request.getQualifiedURL(page.url(request, relative=0)) 628 629 def getFormat(page): 630 631 "Get the format used on the given 'page'." 632 633 return page.pi["format"] 634 635 def getMetadata(page): 636 637 """ 638 Return a dictionary containing items describing for the given 'page' the 639 page's "created" time, "last-modified" time, "sequence" (or revision number) 640 and the "last-comment" made about the last edit. 641 """ 642 643 request = page.request 644 645 # Get the initial revision of the page. 646 647 revisions = page.getRevList() 648 649 if not revisions: 650 return {} 651 652 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 653 654 # Get the created and last modified times. 655 656 initial_revision = getPageRevision(event_page_initial) 657 658 metadata = {} 659 metadata["created"] = initial_revision["timestamp"] 660 latest_revision = getPageRevision(page) 661 metadata["last-modified"] = latest_revision["timestamp"] 662 metadata["sequence"] = len(revisions) - 1 663 metadata["last-comment"] = latest_revision["comment"] 664 665 return metadata 666 667 def getPageRevision(page): 668 669 "Return the revision details dictionary for the given 'page'." 670 671 # From Page.edit_info... 672 673 if hasattr(page, "editlog_entry"): 674 line = page.editlog_entry() 675 else: 676 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 677 678 # Similar to Page.mtime_usecs behaviour... 679 680 if line: 681 timestamp = line.ed_time_usecs 682 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 683 comment = line.comment 684 else: 685 mtime = 0 686 comment = "" 687 688 # Leave the time zone empty. 689 690 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + (None,)), "comment" : comment} 691 692 # Page parsing and formatting of embedded content. 693 694 def getPageParserClass(request): 695 696 "Using 'request', return a parser class for the current page's format." 697 698 return getParserClass(request, getFormat(request.page)) 699 700 def getParserClass(request, format): 701 702 """ 703 Return a parser class using the 'request' for the given 'format', returning 704 a plain text parser if no parser can be found for the specified 'format'. 705 """ 706 707 try: 708 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 709 except wikiutil.PluginMissingError: 710 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 711 712 def getFormatterClass(request, format): 713 714 """ 715 Return a formatter class using the 'request' for the given output 'format', 716 returning a plain text formatter if no formatter can be found for the 717 specified 'format'. 718 """ 719 720 try: 721 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 722 except wikiutil.PluginMissingError: 723 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 724 725 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 726 727 """ 728 Format the given 'text' using the specified 'request' and formatter 'fmt'. 729 Suppress line anchors in the output, and fix lists by indicating that a 730 paragraph has already been started. 731 """ 732 733 if not parser_cls: 734 parser_cls = getPageParserClass(request) 735 parser = parser_cls(text, request, line_anchors=False) 736 737 old_fmt = request.formatter 738 request.formatter = fmt 739 try: 740 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 741 finally: 742 request.formatter = old_fmt 743 744 def redirectedOutput(request, parser, fmt, **kw): 745 746 "A fixed version of the request method of the same name." 747 748 buf = StringIO() 749 request.redirect(buf) 750 try: 751 parser.format(fmt, **kw) 752 if hasattr(fmt, "flush"): 753 buf.write(fmt.flush(True)) 754 finally: 755 request.redirect() 756 text = buf.getvalue() 757 buf.close() 758 return text 759 760 # Textual representations. 761 762 def getSimpleWikiText(text): 763 764 """ 765 Return the plain text representation of the given 'text' which may employ 766 certain Wiki syntax features, such as those providing verbatim or monospaced 767 text. 768 """ 769 770 # NOTE: Re-implementing support for verbatim text and linking avoidance. 771 772 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 773 774 def getEncodedWikiText(text): 775 776 "Encode the given 'text' in a verbatim representation." 777 778 return "<<Verbatim(%s)>>" % text 779 780 def getPrettyTitle(title): 781 782 "Return a nicely formatted version of the given 'title'." 783 784 return title.replace("_", " ").replace("/", u" ? ") 785 786 # User interface functions. 787 788 def getParameter(request, name, default=None): 789 790 """ 791 Using the given 'request', return the value of the parameter with the given 792 'name', returning the optional 'default' (or None) if no value was supplied 793 in the 'request'. 794 """ 795 796 return get_form(request).get(name, [default])[0] 797 798 def getQualifiedParameter(request, prefix, argname, default=None): 799 800 """ 801 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 802 qualified parameter, returning the optional 'default' (or None) if no value 803 was supplied in the 'request'. 804 """ 805 806 argname = getQualifiedParameterName(prefix, argname) 807 return getParameter(request, argname, default) 808 809 def getQualifiedParameterName(prefix, argname): 810 811 """ 812 Return the qualified parameter name using the given 'prefix' and 'argname'. 813 """ 814 815 if not prefix: 816 return argname 817 else: 818 return "%s-%s" % (prefix, argname) 819 820 # Page-related functions. 821 822 def getPrettyPageName(page): 823 824 "Return a nicely formatted title/name for the given 'page'." 825 826 title = page.split_title(force=1) 827 return getPrettyTitle(title) 828 829 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 830 831 """ 832 Using 'request', return a link to 'page' with the given link 'text' and 833 optional 'query_string' and 'anchor'. 834 """ 835 836 text = wikiutil.escape(text) 837 return page.link_to_raw(request, text, query_string, anchor, **kw) 838 839 def linkToResource(url, request, text, query_string=None, anchor=None): 840 841 """ 842 Using 'request', return a link to 'url' with the given link 'text' and 843 optional 'query_string' and 'anchor'. 844 """ 845 846 if anchor: 847 url += "#%s" % anchor 848 849 if query_string: 850 query_string = wikiutil.makeQueryString(query_string) 851 url += "?%s" % query_string 852 853 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 854 855 output = [] 856 output.append(formatter.url(1, url)) 857 output.append(formatter.text(text)) 858 output.append(formatter.url(0)) 859 return "".join(output) 860 861 def getFullPageName(parent, title): 862 863 """ 864 Return a full page name from the given 'parent' page (can be empty or None) 865 and 'title' (a simple page name). 866 """ 867 868 if parent: 869 return "%s/%s" % (parent.rstrip("/"), title) 870 else: 871 return title 872 873 # Content storage support. 874 875 class ItemStore: 876 877 "A page-specific item store." 878 879 def __init__(self, page, item_dir_name="items", lock_dir_name="item-locks"): 880 881 "Initialise an item store for the given 'page'." 882 883 self.page = page 884 self.path = page.getPagePath(item_dir_name) 885 self.next_path = os.path.join(self.path, "next") 886 lock_dir = page.getPagePath(lock_dir_name) 887 self.lock = lock.WriteLock(lock_dir) 888 889 def get_next(self): 890 891 "Return the next item number." 892 893 next = self.read_next() 894 if not next: 895 next = self.deduce_next() 896 self.write_next(next) 897 return next 898 899 def deduce_next(self): 900 901 "Deduce the next item number from the existing item files." 902 903 return max([int(filename) for filename in os.listdir(self.path) if filename.isdigit()] or [0]) + 1 904 905 def read_next(self): 906 907 "Read the next item number from a special file." 908 909 if not os.path.exists(self.next_path): 910 return 0 911 912 f = open(self.next_path) 913 try: 914 try: 915 return int(f.read()) 916 except ValueError: 917 return 0 918 finally: 919 f.close() 920 921 def write_next(self, next): 922 923 "Write the 'next' item number to a special file." 924 925 f = open(self.next_path, "w") 926 try: 927 f.write(str(next)) 928 finally: 929 f.close() 930 931 def write_item(self, item, next): 932 933 "Write the given 'item' to a file with the given 'next' item number." 934 935 f = open(os.path.join(self.path, str(next)), "w") 936 try: 937 f.write(item) 938 finally: 939 f.close() 940 941 def append(self, item): 942 943 "Append the given 'item' to the store." 944 945 if not self.can_write(): 946 return 947 948 self.lock.acquire() 949 try: 950 next = self.get_next() 951 self.write_item(item, next) 952 self.write_next(next + 1) 953 finally: 954 self.lock.release() 955 956 def can_write(self): 957 958 """ 959 Return whether the user associated with the request can write to the 960 page owning this store. 961 """ 962 963 user = self.page.request.user 964 return user and user.may.write(self.page.page_name) 965 966 # vim: tabstop=4 expandtab shiftwidth=4