1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012, 2013 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de> 7 2004 by Florian Festi 8 2006 by Mikko Virkkil 9 2005-2008 MoinMoin:ThomasWaldmann 10 2007 MoinMoin:ReimarBauer 11 2008 MoinMoin:FlorianKrupicka (redirectedOutput code) 12 @license: GNU GPL (v2 or later), see COPYING.txt for details. 13 """ 14 15 from DateSupport import * 16 from ItemSupport import ItemDirectoryStore 17 from MoinMoin.parser import text_moin_wiki 18 from MoinMoin.Page import Page 19 from MoinMoin.util import lock 20 from MoinMoin import config, search, wikiutil 21 from shlex import shlex 22 import re 23 import time 24 import os 25 import codecs 26 27 try: 28 from cStringIO import StringIO 29 except ImportError: 30 from StringIO import StringIO 31 32 # Moin 1.9 request parameters. 33 34 try: 35 from MoinMoin.support.werkzeug.datastructures import MultiDict 36 except ImportError: 37 pass 38 39 __version__ = "0.4" 40 41 # Extraction of shared fragments. 42 43 marker_regexp_str = r"([{]{3,}|[}]{3,})" 44 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 45 46 # Extraction of headings. 47 48 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 49 50 # Category extraction from pages. 51 52 category_regexp = None 53 54 # Simple content parsing. 55 56 verbatim_regexp = re.compile(ur'(?:' 57 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 58 ur'|' 59 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 60 ur'|' 61 ur'!(?P<verbatim3>.*?)(\s|$)?' 62 ur'|' 63 ur'`(?P<monospace>.*?)`' 64 ur'|' 65 ur'{{{(?P<preformatted>.*?)}}}' 66 ur')', re.UNICODE) 67 68 # Category discovery. 69 70 def getCategoryPattern(request): 71 global category_regexp 72 73 try: 74 return request.cfg.cache.page_category_regexact 75 except AttributeError: 76 77 # Use regular expression from MoinMoin 1.7.1 otherwise. 78 79 if category_regexp is None: 80 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 81 return category_regexp 82 83 def getCategories(request): 84 85 """ 86 From the AdvancedSearch macro, return a list of category page names using 87 the given 'request'. 88 """ 89 90 # This will return all pages with "Category" in the title. 91 92 cat_filter = getCategoryPattern(request).search 93 return request.rootpage.getPageList(filter=cat_filter) 94 95 def getCategoryMapping(category_pagenames, request): 96 97 """ 98 For the given 'category_pagenames' return a list of tuples of the form 99 (category name, category page name) using the given 'request'. 100 """ 101 102 cat_pattern = getCategoryPattern(request) 103 mapping = [] 104 for pagename in category_pagenames: 105 name = cat_pattern.match(pagename).group("key") 106 if name != "Category": 107 mapping.append((name, pagename)) 108 mapping.sort() 109 return mapping 110 111 def getCategoryPages(pagename, request): 112 113 """ 114 Return the pages associated with the given category 'pagename' using the 115 'request'. 116 """ 117 118 query = search.QueryParser().parse_query('category:%s' % pagename) 119 results = search.searchPages(request, query, "page_name") 120 return filterCategoryPages(results, request) 121 122 def filterCategoryPages(results, request): 123 124 "Filter category pages from the given 'results' using the 'request'." 125 126 cat_pattern = getCategoryPattern(request) 127 pages = [] 128 for page in results.hits: 129 if not cat_pattern.match(page.page_name): 130 pages.append(page) 131 return pages 132 133 def getAllCategoryPages(category_names, request): 134 135 """ 136 Return all pages belonging to the categories having the given 137 'category_names', using the given 'request'. 138 """ 139 140 pages = [] 141 pagenames = set() 142 143 for category_name in category_names: 144 145 # Get the pages and page names in the category. 146 147 pages_in_category = getCategoryPages(category_name, request) 148 149 # Visit each page in the category. 150 151 for page_in_category in pages_in_category: 152 pagename = page_in_category.page_name 153 154 # Only process each page once. 155 156 if pagename in pagenames: 157 continue 158 else: 159 pagenames.add(pagename) 160 161 pages.append(page_in_category) 162 163 return pages 164 165 def getPagesForSearch(search_pattern, request): 166 167 """ 168 Return result pages for a search employing the given 'search_pattern' and 169 using the given 'request'. 170 """ 171 172 query = search.QueryParser().parse_query(search_pattern) 173 results = search.searchPages(request, query, "page_name") 174 return filterCategoryPages(results, request) 175 176 # WikiDict functions. 177 178 def getWikiDict(pagename, request, superuser=False): 179 180 """ 181 Return the WikiDict provided by the given 'pagename' using the given 182 'request'. If the optional 'superuser' is specified as a true value, no read 183 access check will be made. 184 """ 185 186 if pagename and Page(request, pagename).exists() and (superuser or request.user.may.read(pagename)): 187 if hasattr(request.dicts, "dict"): 188 return request.dicts.dict(pagename) 189 else: 190 return request.dicts[pagename] 191 else: 192 return None 193 194 # Searching-related functions. 195 196 def getPagesFromResults(result_pages, request): 197 198 "Return genuine pages for the given 'result_pages' using the 'request'." 199 200 return [Page(request, page.page_name) for page in result_pages] 201 202 # Region/section parsing. 203 204 def getRegions(s, include_non_regions=False): 205 206 """ 207 Parse the string 's', returning a list of explicitly declared regions. 208 209 If 'include_non_regions' is specified as a true value, fragments will be 210 included for text between explicitly declared regions. 211 """ 212 213 regions = [] 214 marker = None 215 is_block = True 216 217 # Start a region for exposed text, if appropriate. 218 219 if include_non_regions: 220 regions.append("") 221 222 for match_text in marker_regexp.split(s): 223 224 # Capture section text. 225 226 if is_block: 227 if marker or include_non_regions: 228 regions[-1] += match_text 229 230 # Handle section markers. 231 232 else: 233 234 # Close any open sections, returning to exposed text regions. 235 236 if marker: 237 238 # Add any marker to the current region, regardless of whether it 239 # successfully closes a section. 240 241 regions[-1] += match_text 242 243 if match_text.startswith("}") and len(marker) == len(match_text): 244 marker = None 245 246 # Start a region for exposed text, if appropriate. 247 248 if include_non_regions: 249 regions.append("") 250 251 # Without a current marker, start a new section. 252 253 else: 254 marker = match_text 255 regions.append("") 256 257 # Add the marker to the new region. 258 259 regions[-1] += match_text 260 261 # The match text alternates between text between markers and the markers 262 # themselves. 263 264 is_block = not is_block 265 266 return regions 267 268 def getFragmentsFromRegions(regions): 269 270 """ 271 Return fragments from the given 'regions', each having the form 272 (format, attributes, body text). 273 """ 274 275 fragments = [] 276 277 for region in regions: 278 format, attributes, body, header, close = getFragmentFromRegion(region) 279 fragments.append((format, attributes, body)) 280 281 return fragments 282 283 def getFragmentFromRegion(region): 284 285 """ 286 Return a fragment for the given 'region' having the form (format, 287 attributes, body text, header, close), where the 'header' is the original 288 declaration of the 'region' or None if no explicit region is defined, and 289 'close' is the closing marker of the 'region' or None if no explicit region 290 is defined. 291 """ 292 293 if region.startswith("{{{"): 294 295 body = region.lstrip("{") 296 level = len(region) - len(body) 297 body = body.rstrip("}").lstrip() 298 299 # Remove any prelude and process metadata. 300 301 if body.startswith("#!"): 302 303 try: 304 declaration, body = body.split("\n", 1) 305 except ValueError: 306 declaration = body 307 body = "" 308 309 arguments = declaration[2:] 310 311 # Get any parser/format declaration. 312 313 if arguments and not arguments[0].isspace(): 314 details = arguments.split(None, 1) 315 if len(details) == 2: 316 format, arguments = details 317 else: 318 format = details[0] 319 arguments = "" 320 else: 321 format = None 322 323 # Get the attributes/arguments for the region. 324 325 attributes = parseAttributes(arguments, False) 326 327 # Add an entry for the format in the attribute dictionary. 328 329 if format and not attributes.has_key(format): 330 attributes[format] = True 331 332 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 333 334 else: 335 return None, {}, body, level * "{" + "\n", level * "}" 336 337 else: 338 return None, {}, region, None, None 339 340 def getFragments(s, include_non_regions=False): 341 342 """ 343 Return fragments for the given string 's', each having the form 344 (format, arguments, body text). 345 346 If 'include_non_regions' is specified as a true value, fragments will be 347 included for text between explicitly declared regions. 348 """ 349 350 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 351 352 # Heading extraction. 353 354 def getHeadings(s): 355 356 """ 357 Return tuples of the form (level, title, span) for headings found within the 358 given string 's'. The span is itself a (start, end) tuple indicating the 359 matching region of 's' for a heading declaration. 360 """ 361 362 headings = [] 363 364 for match in heading_regexp.finditer(s): 365 headings.append( 366 (len(match.group("level")), match.group("heading"), match.span()) 367 ) 368 369 return headings 370 371 # Region/section attribute parsing. 372 373 def parseAttributes(s, escape=True): 374 375 """ 376 Parse the section attributes string 's', returning a mapping of names to 377 values. If 'escape' is set to a true value, the attributes will be suitable 378 for use with the formatter API. If 'escape' is set to a false value, the 379 attributes will have any quoting removed. 380 381 Because Unicode was probably not around when shlex, used here to tokenise 382 the attributes, was introduced, and since StringIO is not Unicode-capable, 383 any non-ASCII characters should be quoted in attributes. 384 """ 385 386 attrs = {} 387 f = StringIO(s.encode("utf-8")) 388 name = None 389 need_value = False 390 lex = shlex(f) 391 lex.wordchars += "-" 392 393 for token in lex: 394 token = unicode(token, "utf-8") 395 396 # Capture the name if needed. 397 398 if name is None: 399 name = escape and wikiutil.escape(token) or strip_token(token) 400 401 # Detect either an equals sign or another name. 402 403 elif not need_value: 404 if token == "=": 405 need_value = True 406 else: 407 attrs[name.lower()] = escape and "true" or True 408 name = wikiutil.escape(token) 409 410 # Otherwise, capture a value. 411 412 else: 413 # Quoting of attributes done similarly to wikiutil.parseAttributes. 414 415 if token: 416 if escape: 417 if token[0] in ("'", '"'): 418 token = wikiutil.escape(token) 419 else: 420 token = '"%s"' % wikiutil.escape(token, 1) 421 else: 422 token = strip_token(token) 423 424 attrs[name.lower()] = token 425 name = None 426 need_value = False 427 428 # Handle any name-only attributes at the end of the collection. 429 430 if name and not need_value: 431 attrs[name.lower()] = escape and "true" or True 432 433 return attrs 434 435 def strip_token(token): 436 437 "Return the given 'token' stripped of quoting." 438 439 if token[0] in ("'", '"') and token[-1] == token[0]: 440 return token[1:-1] 441 else: 442 return token 443 444 # Macro argument parsing. 445 446 def parseMacroArguments(args): 447 448 """ 449 Interpret the arguments. To support commas in labels, the label argument 450 should be quoted. For example: 451 452 "label=No, thanks!" 453 """ 454 455 try: 456 parsed_args = args and wikiutil.parse_quoted_separated(args, name_value=False) or [] 457 except AttributeError: 458 parsed_args = args.split(",") 459 460 pairs = [] 461 for arg in parsed_args: 462 if arg: 463 pair = arg.split("=", 1) 464 if len(pair) < 2: 465 pairs.append((None, arg)) 466 else: 467 pairs.append(tuple(pair)) 468 469 return pairs 470 471 def parseDictEntry(entry, unqualified=None): 472 473 """ 474 Return the parameters specified by the given dict 'entry' string. The 475 optional 'unqualified' parameter can be used to indicate parameters that 476 need not be specified together with a keyword and can therefore be populated 477 in the given order as such unqualified parameters are encountered. 478 479 NOTE: This is similar to parseMacroArguments but employs space as a 480 NOTE: separator and attempts to assign unqualified parameters. 481 """ 482 483 parameters = {} 484 unqualified = unqualified or () 485 486 try: 487 parsed_args = entry and wikiutil.parse_quoted_separated(entry, separator=None, name_value=False) or [] 488 except AttributeError: 489 parsed_args = entry.split() 490 491 for arg in parsed_args: 492 try: 493 argname, argvalue = arg.split("=", 1) 494 495 # Detect unlikely parameter names. 496 497 if not argname.isalpha(): 498 raise ValueError 499 500 parameters[argname] = argvalue 501 502 # Unqualified parameters are assumed to be one of a recognised set. 503 504 except ValueError: 505 for argname in unqualified: 506 if not parameters.has_key(argname): 507 parameters[argname] = arg 508 break 509 510 return parameters 511 512 # Macro argument quoting. 513 514 def quoteMacroArguments(args): 515 516 """ 517 Quote the given 'args' - a collection of (name, value) tuples - returning a 518 string containing the comma-separated, quoted arguments. 519 """ 520 521 quoted = [] 522 523 for name, value in args: 524 quoted.append(quoteMacroArgument(name, value)) 525 526 return ",".join(quoted) 527 528 def quoteMacroArgument(name, value): 529 530 """ 531 Quote the argument with the given 'name' (or None indicating an unnamed 532 argument) and 'value' so that it can be used with a macro. 533 """ 534 535 value = unicode(value).replace('"', '""') 536 if name is None: 537 return '"%s"' % value 538 else: 539 return '"%s=%s"' % (name, value) 540 541 # Request-related classes and associated functions. 542 543 class Form: 544 545 """ 546 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 547 environment. 548 """ 549 550 def __init__(self, request): 551 self.request = request 552 self.form = request.values 553 554 def has_key(self, name): 555 return not not self.form.getlist(name) 556 557 def get(self, name, default=None): 558 values = self.form.getlist(name) 559 if not values: 560 return default 561 else: 562 return values 563 564 def __getitem__(self, name): 565 return self.form.getlist(name) 566 567 def __setitem__(self, name, value): 568 try: 569 self.form.setlist(name, value) 570 except TypeError: 571 self._write_enable() 572 self.form.setlist(name, value) 573 574 def __delitem__(self, name): 575 try: 576 del self.form[name] 577 except TypeError: 578 self._write_enable() 579 del self.form[name] 580 581 def _write_enable(self): 582 self.form = self.request.values = MultiDict(self.form) 583 584 def keys(self): 585 return self.form.keys() 586 587 def items(self): 588 return self.form.lists() 589 590 class ActionSupport: 591 592 """ 593 Work around disruptive MoinMoin changes in 1.9, and also provide useful 594 convenience methods. 595 """ 596 597 def get_form(self): 598 return get_form(self.request) 599 600 def _get_selected(self, value, input_value): 601 602 """ 603 Return the HTML attribute text indicating selection of an option (or 604 otherwise) if 'value' matches 'input_value'. 605 """ 606 607 return input_value is not None and value == input_value and 'selected="selected"' or '' 608 609 def _get_selected_for_list(self, value, input_values): 610 611 """ 612 Return the HTML attribute text indicating selection of an option (or 613 otherwise) if 'value' matches one of the 'input_values'. 614 """ 615 616 return value in input_values and 'selected="selected"' or '' 617 618 def get_option_list(self, value, values): 619 620 """ 621 Return a list of HTML element definitions for options describing the 622 given 'values', selecting the option with the specified 'value' if 623 present. 624 """ 625 626 options = [] 627 for available_value in values: 628 selected = self._get_selected(available_value, value) 629 options.append('<option value="%s" %s>%s</option>' % ( 630 escattr(available_value), selected, wikiutil.escape(available_value))) 631 return options 632 633 def _get_input(self, form, name, default=None): 634 635 """ 636 Return the input from 'form' having the given 'name', returning either 637 the input converted to an integer or the given 'default' (optional, None 638 if not specified). 639 """ 640 641 value = form.get(name, [None])[0] 642 if not value: # true if 0 obtained 643 return default 644 else: 645 return int(value) 646 647 def get_form(request): 648 649 "Work around disruptive MoinMoin changes in 1.9." 650 651 if hasattr(request, "values"): 652 return Form(request) 653 else: 654 return request.form 655 656 class send_headers_cls: 657 658 """ 659 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 660 1.9.x environment. 661 """ 662 663 def __init__(self, request): 664 self.request = request 665 666 def __call__(self, headers): 667 for header in headers: 668 parts = header.split(":") 669 self.request.headers.add(parts[0], ":".join(parts[1:])) 670 671 def get_send_headers(request): 672 673 "Return a function that can send response headers." 674 675 if hasattr(request, "http_headers"): 676 return request.http_headers 677 elif hasattr(request, "emit_http_headers"): 678 return request.emit_http_headers 679 else: 680 return send_headers_cls(request) 681 682 def escattr(s): 683 return wikiutil.escape(s, 1) 684 685 def getPathInfo(request): 686 if hasattr(request, "getPathinfo"): 687 return request.getPathinfo() 688 else: 689 return request.path 690 691 def getHeader(request, header_name, prefix=None): 692 693 """ 694 Using the 'request', return the value of the header with the given 695 'header_name', using the optional 'prefix' to obtain protocol-specific 696 headers if necessary. 697 698 If no value is found for the given 'header_name', None is returned. 699 """ 700 701 if hasattr(request, "getHeader"): 702 return request.getHeader(header_name) 703 elif hasattr(request, "headers"): 704 return request.headers.get(header_name) 705 elif hasattr(request, "env"): 706 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 707 else: 708 return None 709 710 def writeHeaders(request, mimetype, metadata, status=None): 711 712 """ 713 Using the 'request', write resource headers using the given 'mimetype', 714 based on the given 'metadata'. If the optional 'status' is specified, set 715 the status header to the given value. 716 """ 717 718 send_headers = get_send_headers(request) 719 720 # Define headers. 721 722 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 723 724 # Define the last modified time. 725 # NOTE: Consider using request.httpDate. 726 727 latest_timestamp = metadata.get("last-modified") 728 if latest_timestamp: 729 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 730 731 if status: 732 headers.append("Status: %s" % status) 733 734 send_headers(headers) 735 736 # Page access functions. 737 738 def getPageURL(page): 739 740 "Return the URL of the given 'page'." 741 742 request = page.request 743 return request.getQualifiedURL(page.url(request, relative=0)) 744 745 def getFormat(page): 746 747 "Get the format used on the given 'page'." 748 749 return page.pi["format"] 750 751 def getMetadata(page): 752 753 """ 754 Return a dictionary containing items describing for the given 'page' the 755 page's "created" time, "last-modified" time, "sequence" (or revision number) 756 and the "last-comment" made about the last edit. 757 """ 758 759 request = page.request 760 761 # Get the initial revision of the page. 762 763 revisions = page.getRevList() 764 765 if not revisions: 766 return {} 767 768 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 769 770 # Get the created and last modified times. 771 772 initial_revision = getPageRevision(event_page_initial) 773 774 metadata = {} 775 metadata["created"] = initial_revision["timestamp"] 776 latest_revision = getPageRevision(page) 777 metadata["last-modified"] = latest_revision["timestamp"] 778 metadata["sequence"] = len(revisions) - 1 779 metadata["last-comment"] = latest_revision["comment"] 780 781 return metadata 782 783 def getPageRevision(page): 784 785 "Return the revision details dictionary for the given 'page'." 786 787 # From Page.edit_info... 788 789 if hasattr(page, "editlog_entry"): 790 line = page.editlog_entry() 791 else: 792 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 793 794 # Similar to Page.mtime_usecs behaviour... 795 796 if line: 797 timestamp = line.ed_time_usecs 798 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 799 comment = line.comment 800 else: 801 mtime = 0 802 comment = "" 803 804 # Give the time zone as UTC. 805 806 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + ("UTC",)), "comment" : comment} 807 808 # Page parsing and formatting of embedded content. 809 810 def getOutputTypes(request, format): 811 812 """ 813 Using the 'request' and the 'format' of a fragment, return the media types 814 available for the fragment. 815 """ 816 817 return getParserOutputTypes(getParserClass(request, format)) 818 819 def getParserOutputTypes(parser): 820 821 "Return the media types supported by the given 'parser'." 822 823 # This uses an extended parser API method if available. 824 825 if parser and hasattr(parser, "getOutputTypes"): 826 return parser.getOutputTypes() 827 else: 828 return ["text/html"] 829 830 def getPageParserClass(request): 831 832 "Using 'request', return a parser class for the current page's format." 833 834 return getParserClass(request, getFormat(request.page)) 835 836 def getParserClass(request, format): 837 838 """ 839 Return a parser class using the 'request' for the given 'format', returning 840 a plain text parser if no parser can be found for the specified 'format'. 841 """ 842 843 try: 844 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 845 except wikiutil.PluginMissingError: 846 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 847 848 def getFormatterClass(request, format): 849 850 """ 851 Return a formatter class using the 'request' for the given output 'format', 852 returning a plain text formatter if no formatter can be found for the 853 specified 'format'. 854 """ 855 856 try: 857 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 858 except wikiutil.PluginMissingError: 859 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 860 861 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 862 863 """ 864 Format the given 'text' using the specified 'request' and formatter 'fmt'. 865 Suppress line anchors in the output, and fix lists by indicating that a 866 paragraph has already been started. 867 """ 868 869 if not parser_cls: 870 parser_cls = getPageParserClass(request) 871 parser = parser_cls(text, request, line_anchors=False) 872 873 old_fmt = request.formatter 874 request.formatter = fmt 875 try: 876 if isinstance(parser, text_moin_wiki.Parser): 877 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 878 else: 879 return redirectedOutput(request, parser, fmt) 880 finally: 881 request.formatter = old_fmt 882 883 def formatTextForOutputType(text, request, parser_cls, output_type): 884 885 """ 886 Format the given 'text' using the specified 'request' and parser class 887 'parser_cls', producing output of the given 'output_type'. 888 """ 889 890 parser = parser_cls(text, request) 891 buf = codecs.getwriter("utf-8")(StringIO()) 892 try: 893 parser.formatForOutputType(output_type, buf.write) 894 return unicode(buf.getvalue(), "utf-8") 895 finally: 896 buf.close() 897 898 def redirectedOutput(request, parser, fmt, **kw): 899 900 "A fixed version of the request method of the same name." 901 902 buf = codecs.getwriter("utf-8")(StringIO()) 903 request.redirect(buf) 904 try: 905 parser.format(fmt, **kw) 906 if hasattr(fmt, "flush"): 907 buf.write(fmt.flush(True)) 908 finally: 909 request.redirect() 910 text = buf.getvalue() 911 buf.close() 912 return unicode(text, "utf-8") 913 914 # Finding components for content types. 915 916 def getParsersForContentType(cfg, mimetype): 917 918 """ 919 Find parsers that support the given 'mimetype', constructing a dictionary 920 mapping content types to lists of parsers that is then cached in the 'cfg' 921 object. A list of suitable parsers is returned for 'mimetype'. 922 """ 923 924 if not hasattr(cfg.cache, "MIMETYPE_TO_PARSER"): 925 available = {} 926 927 for name in wikiutil.getPlugins("parser", cfg): 928 929 # Import each parser in order to inspect supported content types. 930 931 try: 932 parser_cls = wikiutil.importPlugin(cfg, "parser", name, "Parser") 933 except wikiutil.PluginMissingError: 934 continue 935 936 # Attempt to determine supported content types. 937 # NOTE: Extensions and /etc/mime.types (or equivalent) could also be 938 # NOTE: used. 939 940 if hasattr(parser_cls, "input_mimetypes"): 941 for input_mimetype in parser_cls.input_mimetypes: 942 if not available.has_key(input_mimetype): 943 available[input_mimetype] = [] 944 available[input_mimetype].append(parser_cls) 945 946 # Support some basic parsers. 947 948 elif name == "text_moin_wiki": 949 available["text/moin-wiki"] = [parser_cls] 950 available["text/moin"] = [parser_cls] 951 elif name == "text_html": 952 available["text/html"] = [parser_cls] 953 available["application/xhtml+xml"] = [parser_cls] 954 955 cfg.cache.MIMETYPE_TO_PARSER = available 956 957 return cfg.cache.MIMETYPE_TO_PARSER.get(mimetype, []) 958 959 # Textual representations. 960 961 def getSimpleWikiText(text): 962 963 """ 964 Return the plain text representation of the given 'text' which may employ 965 certain Wiki syntax features, such as those providing verbatim or monospaced 966 text. 967 """ 968 969 # NOTE: Re-implementing support for verbatim text and linking avoidance. 970 971 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 972 973 def getEncodedWikiText(text): 974 975 "Encode the given 'text' in a verbatim representation." 976 977 return "<<Verbatim(%s)>>" % text 978 979 def getPrettyTitle(title): 980 981 "Return a nicely formatted version of the given 'title'." 982 983 return title.replace("_", " ").replace("/", u" ? ") 984 985 # User interface functions. 986 987 def getParameter(request, name, default=None): 988 989 """ 990 Using the given 'request', return the value of the parameter with the given 991 'name', returning the optional 'default' (or None) if no value was supplied 992 in the 'request'. 993 """ 994 995 return get_form(request).get(name, [default])[0] 996 997 def getQualifiedParameter(request, prefix, argname, default=None): 998 999 """ 1000 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 1001 qualified parameter, returning the optional 'default' (or None) if no value 1002 was supplied in the 'request'. 1003 """ 1004 1005 argname = getQualifiedParameterName(prefix, argname) 1006 return getParameter(request, argname, default) 1007 1008 def getQualifiedParameterName(prefix, argname): 1009 1010 """ 1011 Return the qualified parameter name using the given 'prefix' and 'argname'. 1012 """ 1013 1014 if not prefix: 1015 return argname 1016 else: 1017 return "%s-%s" % (prefix, argname) 1018 1019 # Page-related functions. 1020 1021 def getPrettyPageName(page): 1022 1023 "Return a nicely formatted title/name for the given 'page'." 1024 1025 title = page.split_title(force=1) 1026 return getPrettyTitle(title) 1027 1028 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 1029 1030 """ 1031 Using 'request', return a link to 'page' with the given link 'text' and 1032 optional 'query_string' and 'anchor'. 1033 """ 1034 1035 text = wikiutil.escape(text) 1036 return page.link_to_raw(request, text, query_string, anchor, **kw) 1037 1038 def linkToResource(url, request, text, query_string=None, anchor=None): 1039 1040 """ 1041 Using 'request', return a link to 'url' with the given link 'text' and 1042 optional 'query_string' and 'anchor'. 1043 """ 1044 1045 if anchor: 1046 url += "#%s" % anchor 1047 1048 if query_string: 1049 query_string = wikiutil.makeQueryString(query_string) 1050 url += "?%s" % query_string 1051 1052 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1053 1054 output = [] 1055 output.append(formatter.url(1, url)) 1056 output.append(formatter.text(text)) 1057 output.append(formatter.url(0)) 1058 return "".join(output) 1059 1060 def getFullPageName(parent, title): 1061 1062 """ 1063 Return a full page name from the given 'parent' page (can be empty or None) 1064 and 'title' (a simple page name). 1065 """ 1066 1067 if parent: 1068 return "%s/%s" % (parent.rstrip("/"), title) 1069 else: 1070 return title 1071 1072 # Content storage support. 1073 1074 class ItemStore(ItemDirectoryStore): 1075 1076 "A page-specific item store." 1077 1078 def __init__(self, page, item_dir="items", lock_dir="item_locks"): 1079 1080 "Initialise an item store for the given 'page'." 1081 1082 item_dir_path = tuple(item_dir.split("/")) 1083 lock_dir_path = tuple(lock_dir.split("/")) 1084 ItemDirectoryStore.__init__(self, page.getPagePath(*item_dir_path), page.getPagePath(*lock_dir_path)) 1085 self.page = page 1086 1087 def can_write(self): 1088 1089 """ 1090 Return whether the user associated with the request can write to the 1091 page owning this store. 1092 """ 1093 1094 user = self.page.request.user 1095 return user and user.may.write(self.page.page_name) 1096 1097 def can_read(self): 1098 1099 """ 1100 Return whether the user associated with the request can read from the 1101 page owning this store. 1102 """ 1103 1104 user = self.page.request.user 1105 return user and user.may.read(self.page.page_name) 1106 1107 def can_delete(self): 1108 1109 """ 1110 Return whether the user associated with the request can delete the 1111 page owning this store. 1112 """ 1113 1114 user = self.page.request.user 1115 return user and user.may.delete(self.page.page_name) 1116 1117 # High-level methods. 1118 1119 def append(self, item): 1120 1121 "Append the given 'item' to the store." 1122 1123 if not self.can_write(): 1124 return 1125 1126 ItemDirectoryStore.append(self, item) 1127 1128 def __len__(self): 1129 1130 "Return the number of items in the store." 1131 1132 if not self.can_read(): 1133 return 0 1134 1135 return ItemDirectoryStore.__len__(self) 1136 1137 def __getitem__(self, number): 1138 1139 "Return the item with the given 'number'." 1140 1141 if not self.can_read(): 1142 raise IndexError, number 1143 1144 return ItemDirectoryStore.__getitem__(self, number) 1145 1146 def __delitem__(self, number): 1147 1148 "Remove the item with the given 'number'." 1149 1150 if not self.can_delete(): 1151 return 1152 1153 return ItemDirectoryStore.__delitem__(self, number) 1154 1155 # vim: tabstop=4 expandtab shiftwidth=4