1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012, 2013 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de> 7 2004 by Florian Festi 8 2006 by Mikko Virkkil 9 2005-2008 MoinMoin:ThomasWaldmann 10 2007 MoinMoin:ReimarBauer 11 2008 MoinMoin:FlorianKrupicka (redirectedOutput code) 12 @license: GNU GPL (v2 or later), see COPYING.txt for details. 13 """ 14 15 from DateSupport import * 16 from ItemSupport import ItemDirectoryStore, ItemStoreBase 17 from MoinMoin.parser import text_moin_wiki 18 from MoinMoin.Page import Page 19 from MoinMoin.PageEditor import PageEditor 20 from MoinMoin.util import lock 21 from MoinMoin import config, search, wikiutil 22 from shlex import shlex 23 import re 24 import time 25 import os 26 import codecs 27 28 try: 29 from cStringIO import StringIO 30 except ImportError: 31 from StringIO import StringIO 32 33 # Moin 1.9 request parameters. 34 35 try: 36 from MoinMoin.support.werkzeug.datastructures import MultiDict 37 except ImportError: 38 pass 39 40 __version__ = "0.4.1" 41 42 # Extraction of shared fragments. 43 44 marker_regexp_str = r"([{]{3,}|[}]{3,})" 45 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 46 47 # Extraction of headings. 48 49 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 50 51 # Category extraction from pages. 52 53 category_regexp = None 54 55 # Simple content parsing. 56 57 verbatim_regexp = re.compile(ur'(?:' 58 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 59 ur'|' 60 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 61 ur'|' 62 ur'!(?P<verbatim3>.*?)(\s|$)?' 63 ur'|' 64 ur'`(?P<monospace>.*?)`' 65 ur'|' 66 ur'{{{(?P<preformatted>.*?)}}}' 67 ur')', re.UNICODE) 68 69 # Category discovery. 70 71 def getCategoryPattern(request): 72 global category_regexp 73 74 try: 75 return request.cfg.cache.page_category_regexact 76 except AttributeError: 77 78 # Use regular expression from MoinMoin 1.7.1 otherwise. 79 80 if category_regexp is None: 81 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 82 return category_regexp 83 84 def getCategories(request): 85 86 """ 87 From the AdvancedSearch macro, return a list of category page names using 88 the given 'request'. 89 """ 90 91 # This will return all pages with "Category" in the title. 92 93 cat_filter = getCategoryPattern(request).search 94 return request.rootpage.getPageList(filter=cat_filter) 95 96 def getCategoryMapping(category_pagenames, request): 97 98 """ 99 For the given 'category_pagenames' return a list of tuples of the form 100 (category name, category page name) using the given 'request'. 101 """ 102 103 cat_pattern = getCategoryPattern(request) 104 mapping = [] 105 for pagename in category_pagenames: 106 name = cat_pattern.match(pagename).group("key") 107 if name != "Category": 108 mapping.append((name, pagename)) 109 mapping.sort() 110 return mapping 111 112 def getCategoryPages(pagename, request): 113 114 """ 115 Return the pages associated with the given category 'pagename' using the 116 'request'. 117 """ 118 119 query = search.QueryParser().parse_query('category:%s' % pagename) 120 results = search.searchPages(request, query, "page_name") 121 return filterCategoryPages(results, request) 122 123 def filterCategoryPages(results, request): 124 125 "Filter category pages from the given 'results' using the 'request'." 126 127 cat_pattern = getCategoryPattern(request) 128 pages = [] 129 for page in results.hits: 130 if not cat_pattern.match(page.page_name): 131 pages.append(page) 132 return pages 133 134 def getAllCategoryPages(category_names, request): 135 136 """ 137 Return all pages belonging to the categories having the given 138 'category_names', using the given 'request'. 139 """ 140 141 pages = [] 142 pagenames = set() 143 144 for category_name in category_names: 145 146 # Get the pages and page names in the category. 147 148 pages_in_category = getCategoryPages(category_name, request) 149 150 # Visit each page in the category. 151 152 for page_in_category in pages_in_category: 153 pagename = page_in_category.page_name 154 155 # Only process each page once. 156 157 if pagename in pagenames: 158 continue 159 else: 160 pagenames.add(pagename) 161 162 pages.append(page_in_category) 163 164 return pages 165 166 def getPagesForSearch(search_pattern, request): 167 168 """ 169 Return result pages for a search employing the given 'search_pattern' and 170 using the given 'request'. 171 """ 172 173 query = search.QueryParser().parse_query(search_pattern) 174 results = search.searchPages(request, query, "page_name") 175 return filterCategoryPages(results, request) 176 177 # WikiDict functions. 178 179 def getWikiDict(pagename, request, superuser=False): 180 181 """ 182 Return the WikiDict provided by the given 'pagename' using the given 183 'request'. If the optional 'superuser' is specified as a true value, no read 184 access check will be made. 185 """ 186 187 if pagename and Page(request, pagename).exists() and (superuser or request.user.may.read(pagename)): 188 if hasattr(request.dicts, "dict"): 189 return request.dicts.dict(pagename) 190 else: 191 return request.dicts[pagename] 192 else: 193 return None 194 195 # Searching-related functions. 196 197 def getPagesFromResults(result_pages, request): 198 199 "Return genuine pages for the given 'result_pages' using the 'request'." 200 201 return [Page(request, page.page_name) for page in result_pages] 202 203 # Region/section parsing. 204 205 def getRegions(s, include_non_regions=False): 206 207 """ 208 Parse the string 's', returning a list of explicitly declared regions. 209 210 If 'include_non_regions' is specified as a true value, fragments will be 211 included for text between explicitly declared regions. 212 """ 213 214 regions = [] 215 marker = None 216 is_block = True 217 218 # Start a region for exposed text, if appropriate. 219 220 if include_non_regions: 221 regions.append("") 222 223 for match_text in marker_regexp.split(s): 224 225 # Capture section text. 226 227 if is_block: 228 if marker or include_non_regions: 229 regions[-1] += match_text 230 231 # Handle section markers. 232 233 else: 234 235 # Close any open sections, returning to exposed text regions. 236 237 if marker: 238 239 # Add any marker to the current region, regardless of whether it 240 # successfully closes a section. 241 242 regions[-1] += match_text 243 244 if match_text.startswith("}") and len(marker) == len(match_text): 245 marker = None 246 247 # Start a region for exposed text, if appropriate. 248 249 if include_non_regions: 250 regions.append("") 251 252 # Without a current marker, start a new section. 253 254 else: 255 marker = match_text 256 regions.append("") 257 258 # Add the marker to the new region. 259 260 regions[-1] += match_text 261 262 # The match text alternates between text between markers and the markers 263 # themselves. 264 265 is_block = not is_block 266 267 return regions 268 269 def getFragmentsFromRegions(regions): 270 271 """ 272 Return fragments from the given 'regions', each having the form 273 (format, attributes, body text). 274 """ 275 276 fragments = [] 277 278 for region in regions: 279 format, attributes, body, header, close = getFragmentFromRegion(region) 280 fragments.append((format, attributes, body)) 281 282 return fragments 283 284 def getFragmentFromRegion(region): 285 286 """ 287 Return a fragment for the given 'region' having the form (format, 288 attributes, body text, header, close), where the 'header' is the original 289 declaration of the 'region' or None if no explicit region is defined, and 290 'close' is the closing marker of the 'region' or None if no explicit region 291 is defined. 292 """ 293 294 if region.startswith("{{{"): 295 296 body = region.lstrip("{") 297 level = len(region) - len(body) 298 body = body.rstrip("}").lstrip() 299 300 # Remove any prelude and process metadata. 301 302 if body.startswith("#!"): 303 304 try: 305 declaration, body = body.split("\n", 1) 306 except ValueError: 307 declaration = body 308 body = "" 309 310 arguments = declaration[2:] 311 312 # Get any parser/format declaration. 313 314 if arguments and not arguments[0].isspace(): 315 details = arguments.split(None, 1) 316 if len(details) == 2: 317 format, arguments = details 318 else: 319 format = details[0] 320 arguments = "" 321 else: 322 format = None 323 324 # Get the attributes/arguments for the region. 325 326 attributes = parseAttributes(arguments, False) 327 328 # Add an entry for the format in the attribute dictionary. 329 330 if format and not attributes.has_key(format): 331 attributes[format] = True 332 333 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 334 335 else: 336 return None, {}, body, level * "{" + "\n", level * "}" 337 338 else: 339 return None, {}, region, None, None 340 341 def getFragments(s, include_non_regions=False): 342 343 """ 344 Return fragments for the given string 's', each having the form 345 (format, arguments, body text). 346 347 If 'include_non_regions' is specified as a true value, fragments will be 348 included for text between explicitly declared regions. 349 """ 350 351 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 352 353 # Heading extraction. 354 355 def getHeadings(s): 356 357 """ 358 Return tuples of the form (level, title, span) for headings found within the 359 given string 's'. The span is itself a (start, end) tuple indicating the 360 matching region of 's' for a heading declaration. 361 """ 362 363 headings = [] 364 365 for match in heading_regexp.finditer(s): 366 headings.append( 367 (len(match.group("level")), match.group("heading"), match.span()) 368 ) 369 370 return headings 371 372 # Region/section attribute parsing. 373 374 def parseAttributes(s, escape=True): 375 376 """ 377 Parse the section attributes string 's', returning a mapping of names to 378 values. If 'escape' is set to a true value, the attributes will be suitable 379 for use with the formatter API. If 'escape' is set to a false value, the 380 attributes will have any quoting removed. 381 382 Because Unicode was probably not around when shlex, used here to tokenise 383 the attributes, was introduced, and since StringIO is not Unicode-capable, 384 any non-ASCII characters should be quoted in attributes. 385 """ 386 387 attrs = {} 388 f = StringIO(s.encode("utf-8")) 389 name = None 390 need_value = False 391 lex = shlex(f) 392 lex.wordchars += "-" 393 394 for token in lex: 395 token = unicode(token, "utf-8") 396 397 # Capture the name if needed. 398 399 if name is None: 400 name = escape and wikiutil.escape(token) or strip_token(token) 401 402 # Detect either an equals sign or another name. 403 404 elif not need_value: 405 if token == "=": 406 need_value = True 407 else: 408 attrs[name.lower()] = escape and "true" or True 409 name = wikiutil.escape(token) 410 411 # Otherwise, capture a value. 412 413 else: 414 # Quoting of attributes done similarly to wikiutil.parseAttributes. 415 416 if token: 417 if escape: 418 if token[0] in ("'", '"'): 419 token = wikiutil.escape(token) 420 else: 421 token = '"%s"' % wikiutil.escape(token, 1) 422 else: 423 token = strip_token(token) 424 425 attrs[name.lower()] = token 426 name = None 427 need_value = False 428 429 # Handle any name-only attributes at the end of the collection. 430 431 if name and not need_value: 432 attrs[name.lower()] = escape and "true" or True 433 434 return attrs 435 436 def strip_token(token): 437 438 "Return the given 'token' stripped of quoting." 439 440 if token[0] in ("'", '"') and token[-1] == token[0]: 441 return token[1:-1] 442 else: 443 return token 444 445 # Macro argument parsing. 446 447 def parseMacroArguments(args): 448 449 """ 450 Interpret the arguments. To support commas in labels, the label argument 451 should be quoted. For example: 452 453 "label=No, thanks!" 454 """ 455 456 try: 457 parsed_args = args and wikiutil.parse_quoted_separated(args, name_value=False) or [] 458 except AttributeError: 459 parsed_args = args.split(",") 460 461 pairs = [] 462 for arg in parsed_args: 463 if arg: 464 pair = arg.split("=", 1) 465 if len(pair) < 2: 466 pairs.append((None, arg)) 467 else: 468 pairs.append(tuple(pair)) 469 470 return pairs 471 472 def parseDictEntry(entry, unqualified=None): 473 474 """ 475 Return the parameters specified by the given dict 'entry' string. The 476 optional 'unqualified' parameter can be used to indicate parameters that 477 need not be specified together with a keyword and can therefore be populated 478 in the given order as such unqualified parameters are encountered. 479 480 NOTE: This is similar to parseMacroArguments but employs space as a 481 NOTE: separator and attempts to assign unqualified parameters. 482 """ 483 484 parameters = {} 485 unqualified = unqualified or () 486 487 try: 488 parsed_args = entry and wikiutil.parse_quoted_separated(entry, separator=None, name_value=False) or [] 489 except AttributeError: 490 parsed_args = entry.split() 491 492 for arg in parsed_args: 493 try: 494 argname, argvalue = arg.split("=", 1) 495 496 # Detect unlikely parameter names. 497 498 if not argname.isalpha(): 499 raise ValueError 500 501 parameters[argname] = argvalue 502 503 # Unqualified parameters are assumed to be one of a recognised set. 504 505 except ValueError: 506 for argname in unqualified: 507 if not parameters.has_key(argname): 508 parameters[argname] = arg 509 break 510 511 return parameters 512 513 # Macro argument quoting. 514 515 def quoteMacroArguments(args): 516 517 """ 518 Quote the given 'args' - a collection of (name, value) tuples - returning a 519 string containing the comma-separated, quoted arguments. 520 """ 521 522 quoted = [] 523 524 for name, value in args: 525 quoted.append(quoteMacroArgument(name, value)) 526 527 return ",".join(quoted) 528 529 def quoteMacroArgument(name, value): 530 531 """ 532 Quote the argument with the given 'name' (or None indicating an unnamed 533 argument) and 'value' so that it can be used with a macro. 534 """ 535 536 value = unicode(value).replace('"', '""') 537 if name is None: 538 return '"%s"' % value 539 else: 540 return '"%s=%s"' % (name, value) 541 542 # Request-related classes and associated functions. 543 544 class Form: 545 546 """ 547 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 548 environment. 549 """ 550 551 def __init__(self, request): 552 self.request = request 553 self.form = request.values 554 555 def has_key(self, name): 556 return not not self.form.getlist(name) 557 558 def get(self, name, default=None): 559 values = self.form.getlist(name) 560 if not values: 561 return default 562 else: 563 return values 564 565 def __getitem__(self, name): 566 return self.form.getlist(name) 567 568 def __setitem__(self, name, value): 569 try: 570 self.form.setlist(name, value) 571 except TypeError: 572 self._write_enable() 573 self.form.setlist(name, value) 574 575 def __delitem__(self, name): 576 try: 577 del self.form[name] 578 except TypeError: 579 self._write_enable() 580 del self.form[name] 581 582 def _write_enable(self): 583 self.form = self.request.values = MultiDict(self.form) 584 585 def keys(self): 586 return self.form.keys() 587 588 def items(self): 589 return self.form.lists() 590 591 class ActionSupport: 592 593 """ 594 Work around disruptive MoinMoin changes in 1.9, and also provide useful 595 convenience methods. 596 """ 597 598 def get_form(self): 599 return get_form(self.request) 600 601 def _get_selected(self, value, input_value): 602 603 """ 604 Return the HTML attribute text indicating selection of an option (or 605 otherwise) if 'value' matches 'input_value'. 606 """ 607 608 return input_value is not None and value == input_value and 'selected="selected"' or '' 609 610 def _get_selected_for_list(self, value, input_values): 611 612 """ 613 Return the HTML attribute text indicating selection of an option (or 614 otherwise) if 'value' matches one of the 'input_values'. 615 """ 616 617 return value in input_values and 'selected="selected"' or '' 618 619 def get_option_list(self, value, values): 620 621 """ 622 Return a list of HTML element definitions for options describing the 623 given 'values', selecting the option with the specified 'value' if 624 present. 625 """ 626 627 options = [] 628 for available_value in values: 629 selected = self._get_selected(available_value, value) 630 options.append('<option value="%s" %s>%s</option>' % ( 631 escattr(available_value), selected, wikiutil.escape(available_value))) 632 return options 633 634 def _get_input(self, form, name, default=None): 635 636 """ 637 Return the input from 'form' having the given 'name', returning either 638 the input converted to an integer or the given 'default' (optional, None 639 if not specified). 640 """ 641 642 value = form.get(name, [None])[0] 643 if not value: # true if 0 obtained 644 return default 645 else: 646 return int(value) 647 648 def get_form(request): 649 650 "Work around disruptive MoinMoin changes in 1.9." 651 652 if hasattr(request, "values"): 653 return Form(request) 654 else: 655 return request.form 656 657 class send_headers_cls: 658 659 """ 660 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 661 1.9.x environment. 662 """ 663 664 def __init__(self, request): 665 self.request = request 666 667 def __call__(self, headers): 668 for header in headers: 669 parts = header.split(":") 670 self.request.headers.add(parts[0], ":".join(parts[1:])) 671 672 def get_send_headers(request): 673 674 "Return a function that can send response headers." 675 676 if hasattr(request, "http_headers"): 677 return request.http_headers 678 elif hasattr(request, "emit_http_headers"): 679 return request.emit_http_headers 680 else: 681 return send_headers_cls(request) 682 683 def escattr(s): 684 return wikiutil.escape(s, 1) 685 686 def getPathInfo(request): 687 if hasattr(request, "getPathinfo"): 688 return request.getPathinfo() 689 else: 690 return request.path 691 692 def getHeader(request, header_name, prefix=None): 693 694 """ 695 Using the 'request', return the value of the header with the given 696 'header_name', using the optional 'prefix' to obtain protocol-specific 697 headers if necessary. 698 699 If no value is found for the given 'header_name', None is returned. 700 """ 701 702 if hasattr(request, "getHeader"): 703 return request.getHeader(header_name) 704 elif hasattr(request, "headers"): 705 return request.headers.get(header_name) 706 elif hasattr(request, "env"): 707 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 708 else: 709 return None 710 711 def writeHeaders(request, mimetype, metadata, status=None): 712 713 """ 714 Using the 'request', write resource headers using the given 'mimetype', 715 based on the given 'metadata'. If the optional 'status' is specified, set 716 the status header to the given value. 717 """ 718 719 send_headers = get_send_headers(request) 720 721 # Define headers. 722 723 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 724 725 # Define the last modified time. 726 # NOTE: Consider using request.httpDate. 727 728 latest_timestamp = metadata.get("last-modified") 729 if latest_timestamp: 730 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 731 732 if status: 733 headers.append("Status: %s" % status) 734 735 send_headers(headers) 736 737 # Page access functions. 738 739 def getPageURL(page): 740 741 "Return the URL of the given 'page'." 742 743 request = page.request 744 return request.getQualifiedURL(page.url(request, relative=0)) 745 746 def getFormat(page): 747 748 "Get the format used on the given 'page'." 749 750 return page.pi["format"] 751 752 def getMetadata(page): 753 754 """ 755 Return a dictionary containing items describing for the given 'page' the 756 page's "created" time, "last-modified" time, "sequence" (or revision number) 757 and the "last-comment" made about the last edit. 758 """ 759 760 request = page.request 761 762 # Get the initial revision of the page. 763 764 revisions = page.getRevList() 765 766 if not revisions: 767 return {} 768 769 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 770 771 # Get the created and last modified times. 772 773 initial_revision = getPageRevision(event_page_initial) 774 775 metadata = {} 776 metadata["created"] = initial_revision["timestamp"] 777 latest_revision = getPageRevision(page) 778 metadata["last-modified"] = latest_revision["timestamp"] 779 metadata["sequence"] = len(revisions) - 1 780 metadata["last-comment"] = latest_revision["comment"] 781 782 return metadata 783 784 def getPageRevision(page): 785 786 "Return the revision details dictionary for the given 'page'." 787 788 # From Page.edit_info... 789 790 if hasattr(page, "editlog_entry"): 791 line = page.editlog_entry() 792 else: 793 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 794 795 # Similar to Page.mtime_usecs behaviour... 796 797 if line: 798 timestamp = line.ed_time_usecs 799 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 800 comment = line.comment 801 else: 802 mtime = 0 803 comment = "" 804 805 # Give the time zone as UTC. 806 807 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + ("UTC",)), "comment" : comment} 808 809 # Page parsing and formatting of embedded content. 810 811 def getOutputTypes(request, format): 812 813 """ 814 Using the 'request' and the 'format' of a fragment, return the media types 815 available for the fragment. 816 """ 817 818 return getParserOutputTypes(getParserClass(request, format)) 819 820 def getParserOutputTypes(parser): 821 822 "Return the media types supported by the given 'parser'." 823 824 # This uses an extended parser API method if available. 825 826 if parser and hasattr(parser, "getOutputTypes"): 827 return parser.getOutputTypes() 828 else: 829 return ["text/html"] 830 831 def getPageParserClass(request): 832 833 "Using 'request', return a parser class for the current page's format." 834 835 return getParserClass(request, getFormat(request.page)) 836 837 def getParserClass(request, format): 838 839 """ 840 Return a parser class using the 'request' for the given 'format', returning 841 a plain text parser if no parser can be found for the specified 'format'. 842 """ 843 844 try: 845 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 846 except wikiutil.PluginMissingError: 847 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 848 849 def getFormatterClass(request, format): 850 851 """ 852 Return a formatter class using the 'request' for the given output 'format', 853 returning a plain text formatter if no formatter can be found for the 854 specified 'format'. 855 """ 856 857 try: 858 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 859 except wikiutil.PluginMissingError: 860 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 861 862 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 863 864 """ 865 Format the given 'text' using the specified 'request' and formatter 'fmt'. 866 Suppress line anchors in the output, and fix lists by indicating that a 867 paragraph has already been started. 868 """ 869 870 if not parser_cls: 871 parser_cls = getPageParserClass(request) 872 parser = parser_cls(text, request, line_anchors=False) 873 874 old_fmt = request.formatter 875 request.formatter = fmt 876 try: 877 if isinstance(parser, text_moin_wiki.Parser): 878 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 879 else: 880 return redirectedOutput(request, parser, fmt) 881 finally: 882 request.formatter = old_fmt 883 884 def formatTextForOutputType(text, request, parser_cls, output_type): 885 886 """ 887 Format the given 'text' using the specified 'request' and parser class 888 'parser_cls', producing output of the given 'output_type'. 889 """ 890 891 parser = parser_cls(text, request) 892 buf = codecs.getwriter("utf-8")(StringIO()) 893 try: 894 parser.formatForOutputType(output_type, buf.write) 895 return unicode(buf.getvalue(), "utf-8") 896 finally: 897 buf.close() 898 899 def redirectedOutput(request, parser, fmt, **kw): 900 901 "A fixed version of the request method of the same name." 902 903 buf = codecs.getwriter("utf-8")(StringIO()) 904 request.redirect(buf) 905 try: 906 parser.format(fmt, **kw) 907 if hasattr(fmt, "flush"): 908 buf.write(fmt.flush(True)) 909 finally: 910 request.redirect() 911 text = buf.getvalue() 912 buf.close() 913 return unicode(text, "utf-8") 914 915 # Finding components for content types. 916 917 def getParsersForContentType(cfg, mimetype): 918 919 """ 920 Find parsers that support the given 'mimetype', constructing a dictionary 921 mapping content types to lists of parsers that is then cached in the 'cfg' 922 object. A list of suitable parsers is returned for 'mimetype'. 923 """ 924 925 if not hasattr(cfg.cache, "MIMETYPE_TO_PARSER"): 926 available = {} 927 928 for name in wikiutil.getPlugins("parser", cfg): 929 930 # Import each parser in order to inspect supported content types. 931 932 try: 933 parser_cls = wikiutil.importPlugin(cfg, "parser", name, "Parser") 934 except wikiutil.PluginMissingError: 935 continue 936 937 # Attempt to determine supported content types. 938 # NOTE: Extensions and /etc/mime.types (or equivalent) could also be 939 # NOTE: used. 940 941 if hasattr(parser_cls, "input_mimetypes"): 942 for input_mimetype in parser_cls.input_mimetypes: 943 if not available.has_key(input_mimetype): 944 available[input_mimetype] = [] 945 available[input_mimetype].append(parser_cls) 946 947 # Support some basic parsers. 948 949 elif name == "text_moin_wiki": 950 available["text/moin-wiki"] = [parser_cls] 951 available["text/moin"] = [parser_cls] 952 elif name == "text_html": 953 available["text/html"] = [parser_cls] 954 available["application/xhtml+xml"] = [parser_cls] 955 956 cfg.cache.MIMETYPE_TO_PARSER = available 957 958 return cfg.cache.MIMETYPE_TO_PARSER.get(mimetype, []) 959 960 # Textual representations. 961 962 def getSimpleWikiText(text): 963 964 """ 965 Return the plain text representation of the given 'text' which may employ 966 certain Wiki syntax features, such as those providing verbatim or monospaced 967 text. 968 """ 969 970 # NOTE: Re-implementing support for verbatim text and linking avoidance. 971 972 l = [] 973 last = 0 974 975 for m in verbatim_regexp.finditer(text): 976 start, end = m.span() 977 l.append(text[last:start]) 978 979 # Process the verbatim macro arguments. 980 981 args = m.group("verbatim") or m.group("verbatim2") 982 if args: 983 l += [v for (n, v) in parseMacroArguments(args)] 984 985 # Or just add the match groups. 986 987 else: 988 l += [s for s in m.groups() if s] 989 990 last = end 991 992 l.append(text[last:]) 993 return "".join(l) 994 995 def getEncodedWikiText(text): 996 997 "Encode the given 'text' in a verbatim representation." 998 999 return "<<Verbatim(%s)>>" % quoteMacroArgument(None, text) 1000 1001 def getPrettyTitle(title): 1002 1003 "Return a nicely formatted version of the given 'title'." 1004 1005 return title.replace("_", " ").replace("/", u" ? ") 1006 1007 # User interface functions. 1008 1009 def getParameter(request, name, default=None): 1010 1011 """ 1012 Using the given 'request', return the value of the parameter with the given 1013 'name', returning the optional 'default' (or None) if no value was supplied 1014 in the 'request'. 1015 """ 1016 1017 return get_form(request).get(name, [default])[0] 1018 1019 def getQualifiedParameter(request, prefix, argname, default=None): 1020 1021 """ 1022 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 1023 qualified parameter, returning the optional 'default' (or None) if no value 1024 was supplied in the 'request'. 1025 """ 1026 1027 argname = getQualifiedParameterName(prefix, argname) 1028 return getParameter(request, argname, default) 1029 1030 def getQualifiedParameterName(prefix, argname): 1031 1032 """ 1033 Return the qualified parameter name using the given 'prefix' and 'argname'. 1034 """ 1035 1036 if not prefix: 1037 return argname 1038 else: 1039 return "%s-%s" % (prefix, argname) 1040 1041 # Page-related functions. 1042 1043 def getPrettyPageName(page): 1044 1045 "Return a nicely formatted title/name for the given 'page'." 1046 1047 title = page.split_title(force=1) 1048 return getPrettyTitle(title) 1049 1050 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 1051 1052 """ 1053 Using 'request', return a link to 'page' with the given link 'text' and 1054 optional 'query_string' and 'anchor'. 1055 """ 1056 1057 text = wikiutil.escape(text) 1058 return page.link_to_raw(request, text, query_string, anchor, **kw) 1059 1060 def linkToResource(url, request, text, query_string=None, anchor=None): 1061 1062 """ 1063 Using 'request', return a link to 'url' with the given link 'text' and 1064 optional 'query_string' and 'anchor'. 1065 """ 1066 1067 if anchor: 1068 url += "#%s" % anchor 1069 1070 if query_string: 1071 query_string = wikiutil.makeQueryString(query_string) 1072 url += "?%s" % query_string 1073 1074 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1075 1076 output = [] 1077 output.append(formatter.url(1, url)) 1078 output.append(formatter.text(text)) 1079 output.append(formatter.url(0)) 1080 return "".join(output) 1081 1082 def getFullPageName(parent, title): 1083 1084 """ 1085 Return a full page name from the given 'parent' page (can be empty or None) 1086 and 'title' (a simple page name). 1087 """ 1088 1089 if parent: 1090 return "%s/%s" % (parent.rstrip("/"), title) 1091 else: 1092 return title 1093 1094 # Content storage support. 1095 1096 class ItemStore: 1097 1098 "A page-specific item store." 1099 1100 def __init__(self, page, item_dir="items", lock_dir="item_locks", store=None): 1101 1102 "Initialise an item store for the given 'page'." 1103 1104 item_dir_path = tuple(item_dir.split("/")) 1105 lock_dir_path = tuple(lock_dir.split("/")) 1106 self.page = page 1107 self.store = store or ItemDirectoryStore(page.getPagePath(*item_dir_path), page.getPagePath(*lock_dir_path)) 1108 1109 def can_write(self): 1110 1111 """ 1112 Return whether the user associated with the request can write to the 1113 page owning this store. 1114 """ 1115 1116 user = self.page.request.user 1117 return user and user.may.write(self.page.page_name) 1118 1119 def can_read(self): 1120 1121 """ 1122 Return whether the user associated with the request can read from the 1123 page owning this store. 1124 """ 1125 1126 user = self.page.request.user 1127 return user and user.may.read(self.page.page_name) 1128 1129 def can_delete(self): 1130 1131 """ 1132 Return whether the user associated with the request can delete the 1133 page owning this store. 1134 """ 1135 1136 user = self.page.request.user 1137 return user and user.may.delete(self.page.page_name) 1138 1139 # Store-specific methods. 1140 1141 def mtime(self): 1142 return self.store.mtime() 1143 1144 # High-level methods. 1145 1146 def keys(self): 1147 1148 "Return a list of keys for items in the store." 1149 1150 if not self.can_read(): 1151 return 0 1152 1153 return self.store.keys() 1154 1155 def append(self, item): 1156 1157 "Append the given 'item' to the store." 1158 1159 if not self.can_write(): 1160 return 1161 1162 self.store.append(item) 1163 1164 def __len__(self): 1165 1166 "Return the number of items in the store." 1167 1168 if not self.can_read(): 1169 return 0 1170 1171 return len(self.store) 1172 1173 def __getitem__(self, number): 1174 1175 "Return the item with the given 'number'." 1176 1177 if not self.can_read(): 1178 raise IndexError, number 1179 1180 return self.store.__getitem__(number) 1181 1182 def __delitem__(self, number): 1183 1184 "Remove the item with the given 'number'." 1185 1186 if not self.can_delete(): 1187 return 1188 1189 return self.store.__delitem__(number) 1190 1191 def __iter__(self): 1192 return self.store.__iter__() 1193 1194 def next(self): 1195 return self.store.next() 1196 1197 class ItemSubpageStore(ItemStoreBase): 1198 1199 "A subpage-based item store." 1200 1201 def __init__(self, page, lock_dir="item_locks"): 1202 1203 "Initialise an item store for subpages under the given 'page'." 1204 1205 lock_dir_path = tuple(lock_dir.split("/")) 1206 ItemStoreBase.__init__(self, page.getPagePath(*lock_dir_path)) 1207 self.page = page 1208 1209 def mtime(self): 1210 1211 "Return the last modified time of the item store." 1212 1213 keys = self.get_keys() 1214 if not keys: 1215 page = self.page 1216 else: 1217 page = Page(self.page.request, self.get_item_path(max(keys))) 1218 1219 return wikiutil.version2timestamp( 1220 getMetadata(page)["last-modified"] 1221 ) 1222 1223 def get_next(self): 1224 1225 "Return the next item number." 1226 1227 return self.deduce_next() 1228 1229 def get_keys(self): 1230 1231 "Return the item keys." 1232 1233 is_subpage = re.compile(u"^%s/" % re.escape(self.page.page_name), re.UNICODE).match 1234 1235 # Collect the strict subpages of the parent page. 1236 1237 leafnames = [] 1238 parentname = self.page.page_name 1239 1240 for pagename in RootPage(self.page.request).getPageList(filter=is_subpage): 1241 parts = pagename[len(parentname)+1:].split("/") 1242 1243 # Only collect numbered pages immediately below the parent. 1244 1245 if len(parts) == 1 and parts[0].isdigit(): 1246 leafnames.append(int(parts[0])) 1247 1248 return leafnames 1249 1250 def write_item(self, item, next): 1251 1252 "Write the given 'item' to a file with the given 'next' item number." 1253 1254 page = PageEditor(self.page.request, self.get_item_path(next)) 1255 page.saveText(item, 0) 1256 1257 def read_item(self, number): 1258 1259 "Read the item with the given item 'number'." 1260 1261 page = Page(self.page.request, self.get_item_path(number)) 1262 return page.get_raw_body() 1263 1264 def remove_item(self, number): 1265 1266 "Remove the item with the given item 'number'." 1267 1268 page = PageEditor(self.page.request, self.get_item_path(number)) 1269 page.deletePage() 1270 1271 def get_item_path(self, number): 1272 1273 "Get the path for the given item 'number'." 1274 1275 return "%s/%s" % (self.page.page_name, number) 1276 1277 # High-level methods. 1278 1279 def append(self, item): 1280 1281 "Append the given 'item' to the store." 1282 1283 self.writelock.acquire() 1284 try: 1285 next = self.get_next() 1286 self.write_item(item, next) 1287 finally: 1288 self.writelock.release() 1289 1290 # vim: tabstop=4 expandtab shiftwidth=4