1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012, 2013 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2004 by Florian Festi, 8 2006 by Mikko Virkkil, 9 2005-2008 MoinMoin:ThomasWaldmann, 10 2007 MoinMoin:ReimarBauer. 11 @license: GNU GPL (v2 or later), see COPYING.txt for details. 12 """ 13 14 from DateSupport import * 15 from ItemSupport import ItemDirectoryStore 16 from MoinMoin.Page import Page 17 from MoinMoin.util import lock 18 from MoinMoin import config, search, wikiutil 19 from StringIO import StringIO 20 from shlex import shlex 21 import re 22 import time 23 import os 24 25 # Moin 1.9 request parameters. 26 27 try: 28 from MoinMoin.support.werkzeug.datastructures import MultiDict 29 except ImportError: 30 pass 31 32 __version__ = "0.3" 33 34 # Extraction of shared fragments. 35 36 marker_regexp_str = r"([{]{3,}|[}]{3,})" 37 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 38 39 # Extraction of headings. 40 41 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 42 43 # Category extraction from pages. 44 45 category_regexp = None 46 47 # Simple content parsing. 48 49 verbatim_regexp = re.compile(ur'(?:' 50 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 51 ur'|' 52 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 53 ur'|' 54 ur'!(?P<verbatim3>.*?)(\s|$)?' 55 ur'|' 56 ur'`(?P<monospace>.*?)`' 57 ur'|' 58 ur'{{{(?P<preformatted>.*?)}}}' 59 ur')', re.UNICODE) 60 61 # Category discovery. 62 63 def getCategoryPattern(request): 64 global category_regexp 65 66 try: 67 return request.cfg.cache.page_category_regexact 68 except AttributeError: 69 70 # Use regular expression from MoinMoin 1.7.1 otherwise. 71 72 if category_regexp is None: 73 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 74 return category_regexp 75 76 def getCategories(request): 77 78 """ 79 From the AdvancedSearch macro, return a list of category page names using 80 the given 'request'. 81 """ 82 83 # This will return all pages with "Category" in the title. 84 85 cat_filter = getCategoryPattern(request).search 86 return request.rootpage.getPageList(filter=cat_filter) 87 88 def getCategoryMapping(category_pagenames, request): 89 90 """ 91 For the given 'category_pagenames' return a list of tuples of the form 92 (category name, category page name) using the given 'request'. 93 """ 94 95 cat_pattern = getCategoryPattern(request) 96 mapping = [] 97 for pagename in category_pagenames: 98 name = cat_pattern.match(pagename).group("key") 99 if name != "Category": 100 mapping.append((name, pagename)) 101 mapping.sort() 102 return mapping 103 104 def getCategoryPages(pagename, request): 105 106 """ 107 Return the pages associated with the given category 'pagename' using the 108 'request'. 109 """ 110 111 query = search.QueryParser().parse_query('category:%s' % pagename) 112 results = search.searchPages(request, query, "page_name") 113 return filterCategoryPages(results, request) 114 115 def filterCategoryPages(results, request): 116 117 "Filter category pages from the given 'results' using the 'request'." 118 119 cat_pattern = getCategoryPattern(request) 120 pages = [] 121 for page in results.hits: 122 if not cat_pattern.match(page.page_name): 123 pages.append(page) 124 return pages 125 126 def getAllCategoryPages(category_names, request): 127 128 """ 129 Return all pages belonging to the categories having the given 130 'category_names', using the given 'request'. 131 """ 132 133 pages = [] 134 pagenames = set() 135 136 for category_name in category_names: 137 138 # Get the pages and page names in the category. 139 140 pages_in_category = getCategoryPages(category_name, request) 141 142 # Visit each page in the category. 143 144 for page_in_category in pages_in_category: 145 pagename = page_in_category.page_name 146 147 # Only process each page once. 148 149 if pagename in pagenames: 150 continue 151 else: 152 pagenames.add(pagename) 153 154 pages.append(page_in_category) 155 156 return pages 157 158 def getPagesForSearch(search_pattern, request): 159 160 """ 161 Return result pages for a search employing the given 'search_pattern' and 162 using the given 'request'. 163 """ 164 165 query = search.QueryParser().parse_query(search_pattern) 166 results = search.searchPages(request, query, "page_name") 167 return filterCategoryPages(results, request) 168 169 # WikiDict functions. 170 171 def getWikiDict(pagename, request): 172 173 """ 174 Return the WikiDict provided by the given 'pagename' using the given 175 'request'. 176 """ 177 178 if pagename and Page(request, pagename).exists() and request.user.may.read(pagename): 179 if hasattr(request.dicts, "dict"): 180 return request.dicts.dict(pagename) 181 else: 182 return request.dicts[pagename] 183 else: 184 return None 185 186 # Searching-related functions. 187 188 def getPagesFromResults(result_pages, request): 189 190 "Return genuine pages for the given 'result_pages' using the 'request'." 191 192 return [Page(request, page.page_name) for page in result_pages] 193 194 # Region/section parsing. 195 196 def getRegions(s, include_non_regions=False): 197 198 """ 199 Parse the string 's', returning a list of explicitly declared regions. 200 201 If 'include_non_regions' is specified as a true value, fragments will be 202 included for text between explicitly declared regions. 203 """ 204 205 regions = [] 206 marker = None 207 is_block = True 208 209 # Start a region for exposed text, if appropriate. 210 211 if include_non_regions: 212 regions.append("") 213 214 for match_text in marker_regexp.split(s): 215 216 # Capture section text. 217 218 if is_block: 219 if marker or include_non_regions: 220 regions[-1] += match_text 221 222 # Handle section markers. 223 224 else: 225 226 # Close any open sections, returning to exposed text regions. 227 228 if marker: 229 230 # Add any marker to the current region, regardless of whether it 231 # successfully closes a section. 232 233 regions[-1] += match_text 234 235 if match_text.startswith("}") and len(marker) == len(match_text): 236 marker = None 237 238 # Start a region for exposed text, if appropriate. 239 240 if include_non_regions: 241 regions.append("") 242 243 # Without a current marker, start a new section. 244 245 else: 246 marker = match_text 247 regions.append("") 248 249 # Add the marker to the new region. 250 251 regions[-1] += match_text 252 253 # The match text alternates between text between markers and the markers 254 # themselves. 255 256 is_block = not is_block 257 258 return regions 259 260 def getFragmentsFromRegions(regions): 261 262 """ 263 Return fragments from the given 'regions', each having the form 264 (format, attributes, body text). 265 """ 266 267 fragments = [] 268 269 for region in regions: 270 format, attributes, body, header, close = getFragmentFromRegion(region) 271 fragments.append((format, attributes, body)) 272 273 return fragments 274 275 def getFragmentFromRegion(region): 276 277 """ 278 Return a fragment for the given 'region' having the form (format, 279 attributes, body text, header, close), where the 'header' is the original 280 declaration of the 'region' or None if no explicit region is defined, and 281 'close' is the closing marker of the 'region' or None if no explicit region 282 is defined. 283 """ 284 285 if region.startswith("{{{"): 286 287 body = region.lstrip("{") 288 level = len(region) - len(body) 289 body = body.rstrip("}").lstrip() 290 291 # Remove any prelude and process metadata. 292 293 if body.startswith("#!"): 294 295 try: 296 declaration, body = body.split("\n", 1) 297 except ValueError: 298 declaration = body 299 body = "" 300 301 arguments = declaration[2:] 302 303 # Get any parser/format declaration. 304 305 if arguments and not arguments[0].isspace(): 306 details = arguments.split(None, 1) 307 if len(details) == 2: 308 format, arguments = details 309 else: 310 format = details[0] 311 arguments = "" 312 else: 313 format = None 314 315 # Get the attributes/arguments for the region. 316 317 attributes = parseAttributes(arguments, False) 318 319 # Add an entry for the format in the attribute dictionary. 320 321 if format and not attributes.has_key(format): 322 attributes[format] = True 323 324 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 325 326 else: 327 return None, {}, body, level * "{" + "\n", level * "}" 328 329 else: 330 return None, {}, region, None, None 331 332 def getFragments(s, include_non_regions=False): 333 334 """ 335 Return fragments for the given string 's', each having the form 336 (format, arguments, body text). 337 338 If 'include_non_regions' is specified as a true value, fragments will be 339 included for text between explicitly declared regions. 340 """ 341 342 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 343 344 # Heading extraction. 345 346 def getHeadings(s): 347 348 """ 349 Return tuples of the form (level, title, span) for headings found within the 350 given string 's'. The span is itself a (start, end) tuple indicating the 351 matching region of 's' for a heading declaration. 352 """ 353 354 headings = [] 355 356 for match in heading_regexp.finditer(s): 357 headings.append( 358 (len(match.group("level")), match.group("heading"), match.span()) 359 ) 360 361 return headings 362 363 # Region/section attribute parsing. 364 365 def parseAttributes(s, escape=True): 366 367 """ 368 Parse the section attributes string 's', returning a mapping of names to 369 values. If 'escape' is set to a true value, the attributes will be suitable 370 for use with the formatter API. If 'escape' is set to a false value, the 371 attributes will have any quoting removed. 372 """ 373 374 attrs = {} 375 f = StringIO(s) 376 name = None 377 need_value = False 378 lex = shlex(f) 379 lex.wordchars += "-" 380 381 for token in lex: 382 383 # Capture the name if needed. 384 385 if name is None: 386 name = escape and wikiutil.escape(token) or strip_token(token) 387 388 # Detect either an equals sign or another name. 389 390 elif not need_value: 391 if token == "=": 392 need_value = True 393 else: 394 attrs[name.lower()] = escape and "true" or True 395 name = wikiutil.escape(token) 396 397 # Otherwise, capture a value. 398 399 else: 400 # Quoting of attributes done similarly to wikiutil.parseAttributes. 401 402 if token: 403 if escape: 404 if token[0] in ("'", '"'): 405 token = wikiutil.escape(token) 406 else: 407 token = '"%s"' % wikiutil.escape(token, 1) 408 else: 409 token = strip_token(token) 410 411 attrs[name.lower()] = token 412 name = None 413 need_value = False 414 415 # Handle any name-only attributes at the end of the collection. 416 417 if name and not need_value: 418 attrs[name.lower()] = escape and "true" or True 419 420 return attrs 421 422 def strip_token(token): 423 424 "Return the given 'token' stripped of quoting." 425 426 if token[0] in ("'", '"') and token[-1] == token[0]: 427 return token[1:-1] 428 else: 429 return token 430 431 # Macro argument parsing. 432 433 def parseMacroArguments(args): 434 435 """ 436 Interpret the arguments. To support commas in labels, the label argument 437 should be quoted. For example: 438 439 "label=No, thanks!" 440 """ 441 442 try: 443 parsed_args = args and wikiutil.parse_quoted_separated(args, name_value=False) or [] 444 except AttributeError: 445 parsed_args = args.split(",") 446 447 pairs = [] 448 for arg in parsed_args: 449 if arg: 450 pair = arg.split("=", 1) 451 if len(pair) < 2: 452 pairs.append((None, arg)) 453 else: 454 pairs.append(tuple(pair)) 455 456 return pairs 457 458 # Request-related classes and associated functions. 459 460 class Form: 461 462 """ 463 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 464 environment. 465 """ 466 467 def __init__(self, request): 468 self.request = request 469 self.form = request.values 470 471 def has_key(self, name): 472 return not not self.form.getlist(name) 473 474 def get(self, name, default=None): 475 values = self.form.getlist(name) 476 if not values: 477 return default 478 else: 479 return values 480 481 def __getitem__(self, name): 482 return self.form.getlist(name) 483 484 def __setitem__(self, name, value): 485 try: 486 self.form.setlist(name, value) 487 except TypeError: 488 self._write_enable() 489 self.form.setlist(name, value) 490 491 def __delitem__(self, name): 492 try: 493 del self.form[name] 494 except TypeError: 495 self._write_enable() 496 del self.form[name] 497 498 def _write_enable(self): 499 self.form = self.request.values = MultiDict(self.form) 500 501 def keys(self): 502 return self.form.keys() 503 504 def items(self): 505 return self.form.lists() 506 507 class ActionSupport: 508 509 """ 510 Work around disruptive MoinMoin changes in 1.9, and also provide useful 511 convenience methods. 512 """ 513 514 def get_form(self): 515 return get_form(self.request) 516 517 def _get_selected(self, value, input_value): 518 519 """ 520 Return the HTML attribute text indicating selection of an option (or 521 otherwise) if 'value' matches 'input_value'. 522 """ 523 524 return input_value is not None and value == input_value and 'selected="selected"' or '' 525 526 def _get_selected_for_list(self, value, input_values): 527 528 """ 529 Return the HTML attribute text indicating selection of an option (or 530 otherwise) if 'value' matches one of the 'input_values'. 531 """ 532 533 return value in input_values and 'selected="selected"' or '' 534 535 def get_option_list(self, value, values): 536 537 """ 538 Return a list of HTML element definitions for options describing the 539 given 'values', selecting the option with the specified 'value' if 540 present. 541 """ 542 543 options = [] 544 for available_value in values: 545 selected = self._get_selected(available_value, value) 546 options.append('<option value="%s" %s>%s</option>' % ( 547 escattr(available_value), selected, wikiutil.escape(available_value))) 548 return options 549 550 def _get_input(self, form, name, default=None): 551 552 """ 553 Return the input from 'form' having the given 'name', returning either 554 the input converted to an integer or the given 'default' (optional, None 555 if not specified). 556 """ 557 558 value = form.get(name, [None])[0] 559 if not value: # true if 0 obtained 560 return default 561 else: 562 return int(value) 563 564 def get_form(request): 565 566 "Work around disruptive MoinMoin changes in 1.9." 567 568 if hasattr(request, "values"): 569 return Form(request) 570 else: 571 return request.form 572 573 class send_headers_cls: 574 575 """ 576 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 577 1.9.x environment. 578 """ 579 580 def __init__(self, request): 581 self.request = request 582 583 def __call__(self, headers): 584 for header in headers: 585 parts = header.split(":") 586 self.request.headers.add(parts[0], ":".join(parts[1:])) 587 588 def get_send_headers(request): 589 590 "Return a function that can send response headers." 591 592 if hasattr(request, "http_headers"): 593 return request.http_headers 594 elif hasattr(request, "emit_http_headers"): 595 return request.emit_http_headers 596 else: 597 return send_headers_cls(request) 598 599 def escattr(s): 600 return wikiutil.escape(s, 1) 601 602 def getPathInfo(request): 603 if hasattr(request, "getPathinfo"): 604 return request.getPathinfo() 605 else: 606 return request.path 607 608 def getHeader(request, header_name, prefix=None): 609 610 """ 611 Using the 'request', return the value of the header with the given 612 'header_name', using the optional 'prefix' to obtain protocol-specific 613 headers if necessary. 614 615 If no value is found for the given 'header_name', None is returned. 616 """ 617 618 if hasattr(request, "getHeader"): 619 return request.getHeader(header_name) 620 elif hasattr(request, "headers"): 621 return request.headers.get(header_name) 622 else: 623 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 624 625 def writeHeaders(request, mimetype, metadata, status=None): 626 627 """ 628 Using the 'request', write resource headers using the given 'mimetype', 629 based on the given 'metadata'. If the optional 'status' is specified, set 630 the status header to the given value. 631 """ 632 633 send_headers = get_send_headers(request) 634 635 # Define headers. 636 637 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 638 639 # Define the last modified time. 640 # NOTE: Consider using request.httpDate. 641 642 latest_timestamp = metadata.get("last-modified") 643 if latest_timestamp: 644 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 645 646 if status: 647 headers.append("Status: %s" % status) 648 649 send_headers(headers) 650 651 # Page access functions. 652 653 def getPageURL(page): 654 655 "Return the URL of the given 'page'." 656 657 request = page.request 658 return request.getQualifiedURL(page.url(request, relative=0)) 659 660 def getFormat(page): 661 662 "Get the format used on the given 'page'." 663 664 return page.pi["format"] 665 666 def getMetadata(page): 667 668 """ 669 Return a dictionary containing items describing for the given 'page' the 670 page's "created" time, "last-modified" time, "sequence" (or revision number) 671 and the "last-comment" made about the last edit. 672 """ 673 674 request = page.request 675 676 # Get the initial revision of the page. 677 678 revisions = page.getRevList() 679 680 if not revisions: 681 return {} 682 683 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 684 685 # Get the created and last modified times. 686 687 initial_revision = getPageRevision(event_page_initial) 688 689 metadata = {} 690 metadata["created"] = initial_revision["timestamp"] 691 latest_revision = getPageRevision(page) 692 metadata["last-modified"] = latest_revision["timestamp"] 693 metadata["sequence"] = len(revisions) - 1 694 metadata["last-comment"] = latest_revision["comment"] 695 696 return metadata 697 698 def getPageRevision(page): 699 700 "Return the revision details dictionary for the given 'page'." 701 702 # From Page.edit_info... 703 704 if hasattr(page, "editlog_entry"): 705 line = page.editlog_entry() 706 else: 707 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 708 709 # Similar to Page.mtime_usecs behaviour... 710 711 if line: 712 timestamp = line.ed_time_usecs 713 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 714 comment = line.comment 715 else: 716 mtime = 0 717 comment = "" 718 719 # Leave the time zone empty. 720 721 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + (None,)), "comment" : comment} 722 723 # Page parsing and formatting of embedded content. 724 725 def getPageParserClass(request): 726 727 "Using 'request', return a parser class for the current page's format." 728 729 return getParserClass(request, getFormat(request.page)) 730 731 def getParserClass(request, format): 732 733 """ 734 Return a parser class using the 'request' for the given 'format', returning 735 a plain text parser if no parser can be found for the specified 'format'. 736 """ 737 738 try: 739 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 740 except wikiutil.PluginMissingError: 741 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 742 743 def getFormatterClass(request, format): 744 745 """ 746 Return a formatter class using the 'request' for the given output 'format', 747 returning a plain text formatter if no formatter can be found for the 748 specified 'format'. 749 """ 750 751 try: 752 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 753 except wikiutil.PluginMissingError: 754 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 755 756 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 757 758 """ 759 Format the given 'text' using the specified 'request' and formatter 'fmt'. 760 Suppress line anchors in the output, and fix lists by indicating that a 761 paragraph has already been started. 762 """ 763 764 if not parser_cls: 765 parser_cls = getPageParserClass(request) 766 parser = parser_cls(text, request, line_anchors=False) 767 768 old_fmt = request.formatter 769 request.formatter = fmt 770 try: 771 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 772 finally: 773 request.formatter = old_fmt 774 775 def redirectedOutput(request, parser, fmt, **kw): 776 777 "A fixed version of the request method of the same name." 778 779 buf = StringIO() 780 request.redirect(buf) 781 try: 782 parser.format(fmt, **kw) 783 if hasattr(fmt, "flush"): 784 buf.write(fmt.flush(True)) 785 finally: 786 request.redirect() 787 text = buf.getvalue() 788 buf.close() 789 return text 790 791 # Finding components for content types. 792 793 def getParsersForContentType(cfg, mimetype): 794 795 """ 796 Find parsers that support the given 'mimetype', constructing a dictionary 797 mapping content types to lists of parsers that is then cached in the 'cfg' 798 object. A list of suitable parsers is returned for 'mimetype'. 799 """ 800 801 if not hasattr(cfg.cache, "MIMETYPE_TO_PARSER"): 802 available = {} 803 804 for name in wikiutil.getPlugins("parser", cfg): 805 806 # Import each parser in order to inspect supported content types. 807 808 try: 809 parser_cls = wikiutil.importPlugin(cfg, "parser", name, "Parser") 810 except wikiutil.PluginMissingError: 811 continue 812 813 # Attempt to determine supported content types. 814 # NOTE: Extensions and /etc/mime.types (or equivalent) could also be 815 # NOTE: used. 816 817 if hasattr(parser_cls, "input_mimetypes"): 818 for input_mimetype in parser_cls.input_mimetypes: 819 if not available.has_key(input_mimetype): 820 available[input_mimetype] = [] 821 available[input_mimetype].append(parser_cls) 822 823 # Support some basic parsers. 824 825 elif name == "text_moin_wiki": 826 available["text/moin-wiki"] = [parser_cls] 827 available["text/moin"] = [parser_cls] 828 829 cfg.cache.MIMETYPE_TO_PARSER = available 830 831 return cfg.cache.MIMETYPE_TO_PARSER.get(mimetype, []) 832 833 # Textual representations. 834 835 def getSimpleWikiText(text): 836 837 """ 838 Return the plain text representation of the given 'text' which may employ 839 certain Wiki syntax features, such as those providing verbatim or monospaced 840 text. 841 """ 842 843 # NOTE: Re-implementing support for verbatim text and linking avoidance. 844 845 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 846 847 def getEncodedWikiText(text): 848 849 "Encode the given 'text' in a verbatim representation." 850 851 return "<<Verbatim(%s)>>" % text 852 853 def getPrettyTitle(title): 854 855 "Return a nicely formatted version of the given 'title'." 856 857 return title.replace("_", " ").replace("/", u" ? ") 858 859 # User interface functions. 860 861 def getParameter(request, name, default=None): 862 863 """ 864 Using the given 'request', return the value of the parameter with the given 865 'name', returning the optional 'default' (or None) if no value was supplied 866 in the 'request'. 867 """ 868 869 return get_form(request).get(name, [default])[0] 870 871 def getQualifiedParameter(request, prefix, argname, default=None): 872 873 """ 874 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 875 qualified parameter, returning the optional 'default' (or None) if no value 876 was supplied in the 'request'. 877 """ 878 879 argname = getQualifiedParameterName(prefix, argname) 880 return getParameter(request, argname, default) 881 882 def getQualifiedParameterName(prefix, argname): 883 884 """ 885 Return the qualified parameter name using the given 'prefix' and 'argname'. 886 """ 887 888 if not prefix: 889 return argname 890 else: 891 return "%s-%s" % (prefix, argname) 892 893 # Page-related functions. 894 895 def getPrettyPageName(page): 896 897 "Return a nicely formatted title/name for the given 'page'." 898 899 title = page.split_title(force=1) 900 return getPrettyTitle(title) 901 902 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 903 904 """ 905 Using 'request', return a link to 'page' with the given link 'text' and 906 optional 'query_string' and 'anchor'. 907 """ 908 909 text = wikiutil.escape(text) 910 return page.link_to_raw(request, text, query_string, anchor, **kw) 911 912 def linkToResource(url, request, text, query_string=None, anchor=None): 913 914 """ 915 Using 'request', return a link to 'url' with the given link 'text' and 916 optional 'query_string' and 'anchor'. 917 """ 918 919 if anchor: 920 url += "#%s" % anchor 921 922 if query_string: 923 query_string = wikiutil.makeQueryString(query_string) 924 url += "?%s" % query_string 925 926 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 927 928 output = [] 929 output.append(formatter.url(1, url)) 930 output.append(formatter.text(text)) 931 output.append(formatter.url(0)) 932 return "".join(output) 933 934 def getFullPageName(parent, title): 935 936 """ 937 Return a full page name from the given 'parent' page (can be empty or None) 938 and 'title' (a simple page name). 939 """ 940 941 if parent: 942 return "%s/%s" % (parent.rstrip("/"), title) 943 else: 944 return title 945 946 # Content storage support. 947 948 class ItemStore(ItemDirectoryStore): 949 950 "A page-specific item store." 951 952 def __init__(self, page, item_dir="items", lock_dir="item_locks"): 953 954 "Initialise an item store for the given 'page'." 955 956 item_dir_path = tuple(item_dir.split("/")) 957 lock_dir_path = tuple(lock_dir.split("/")) 958 ItemDirectoryStore.__init__(self, page.getPagePath(*item_dir_path), page.getPagePath(*lock_dir_path)) 959 self.page = page 960 961 def can_write(self): 962 963 """ 964 Return whether the user associated with the request can write to the 965 page owning this store. 966 """ 967 968 user = self.page.request.user 969 return user and user.may.write(self.page.page_name) 970 971 def can_read(self): 972 973 """ 974 Return whether the user associated with the request can read from the 975 page owning this store. 976 """ 977 978 user = self.page.request.user 979 return user and user.may.read(self.page.page_name) 980 981 # High-level methods. 982 983 def append(self, item): 984 985 "Append the given 'item' to the store." 986 987 if not self.can_write(): 988 return 989 990 ItemDirectoryStore.append(self, item) 991 992 def __len__(self): 993 994 """ 995 Return the number of the next item (which should also be the number of 996 items). 997 """ 998 999 if not self.can_read(): 1000 return 0 1001 1002 return ItemDirectoryStore.__len__(self) 1003 1004 def __getitem__(self, number): 1005 1006 "Return the item with the given 'number'." 1007 1008 if not self.can_read(): 1009 raise IndexError, number 1010 1011 return ItemDirectoryStore.__getitem__(self, number) 1012 1013 # vim: tabstop=4 expandtab shiftwidth=4