1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012, 2013, 2014 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de> 7 2004 by Florian Festi 8 2006 by Mikko Virkkil 9 2005-2008 MoinMoin:ThomasWaldmann 10 2007 MoinMoin:ReimarBauer 11 2008 MoinMoin:FlorianKrupicka (redirectedOutput code) 12 @license: GNU GPL (v2 or later), see COPYING.txt for details. 13 """ 14 15 from DateSupport import * 16 from MoinMoin.parser import text_moin_wiki 17 from MoinMoin.Page import Page 18 from MoinMoin import config, search, wikiutil 19 from shlex import shlex 20 import re 21 import time 22 import os 23 import codecs 24 25 try: 26 from cStringIO import StringIO 27 except ImportError: 28 from StringIO import StringIO 29 30 # Moin 1.9 request parameters. 31 32 try: 33 from MoinMoin.support.werkzeug.datastructures import MultiDict 34 except ImportError: 35 pass 36 37 __version__ = "0.5" 38 39 # Extraction of shared fragments. 40 41 marker_regexp_str = r"([{]{3,}|[}]{3,})" 42 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 43 44 # Extraction of headings. 45 46 heading_regexp_str = r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$" 47 heading_regexp = re.compile(heading_regexp_str, re.UNICODE | re.MULTILINE) 48 49 # Category extraction from pages. 50 51 category_regexp = None 52 53 # Simple content parsing. 54 55 verbatim_regexp = re.compile(ur'(?:' 56 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 57 ur'|' 58 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 59 ur'|' 60 ur'!(?P<verbatim3>.*?)(\s|$)?' 61 ur'|' 62 ur'`(?P<monospace>.*?)`' 63 ur'|' 64 ur'{{{(?P<preformatted>.*?)}}}' 65 ur')', re.UNICODE) 66 67 # Category discovery. 68 69 def getCategoryPattern(request): 70 global category_regexp 71 72 try: 73 return request.cfg.cache.page_category_regexact 74 except AttributeError: 75 76 # Use regular expression from MoinMoin 1.7.1 otherwise. 77 78 if category_regexp is None: 79 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 80 return category_regexp 81 82 def getCategories(request): 83 84 """ 85 From the AdvancedSearch macro, return a list of category page names using 86 the given 'request'. 87 """ 88 89 # This will return all pages with "Category" in the title. 90 91 cat_filter = getCategoryPattern(request).search 92 return request.rootpage.getPageList(filter=cat_filter) 93 94 def getCategoryMapping(category_pagenames, request): 95 96 """ 97 For the given 'category_pagenames' return a list of tuples of the form 98 (category name, category page name) using the given 'request'. 99 """ 100 101 cat_pattern = getCategoryPattern(request) 102 mapping = [] 103 for pagename in category_pagenames: 104 name = cat_pattern.match(pagename).group("key") 105 if name != "Category": 106 mapping.append((name, pagename)) 107 mapping.sort() 108 return mapping 109 110 def getCategoryPages(pagename, request): 111 112 """ 113 Return the pages associated with the given category 'pagename' using the 114 'request'. 115 """ 116 117 query = search.QueryParser().parse_query('category:%s' % pagename) 118 results = search.searchPages(request, query, "page_name") 119 return filterCategoryPages(results, request) 120 121 def filterCategoryPages(results, request): 122 123 "Filter category pages from the given 'results' using the 'request'." 124 125 cat_pattern = getCategoryPattern(request) 126 pages = [] 127 for page in results.hits: 128 if not cat_pattern.match(page.page_name): 129 pages.append(page) 130 return pages 131 132 def getAllCategoryPages(category_names, request): 133 134 """ 135 Return all pages belonging to the categories having the given 136 'category_names', using the given 'request'. 137 """ 138 139 pages = [] 140 pagenames = set() 141 142 for category_name in category_names: 143 144 # Get the pages and page names in the category. 145 146 pages_in_category = getCategoryPages(category_name, request) 147 148 # Visit each page in the category. 149 150 for page_in_category in pages_in_category: 151 pagename = page_in_category.page_name 152 153 # Only process each page once. 154 155 if pagename in pagenames: 156 continue 157 else: 158 pagenames.add(pagename) 159 160 pages.append(page_in_category) 161 162 return pages 163 164 def getPagesForSearch(search_pattern, request): 165 166 """ 167 Return result pages for a search employing the given 'search_pattern' and 168 using the given 'request'. 169 """ 170 171 query = search.QueryParser().parse_query(search_pattern) 172 results = search.searchPages(request, query, "page_name") 173 return filterCategoryPages(results, request) 174 175 # WikiDict functions. 176 177 def getWikiDict(pagename, request, superuser=False): 178 179 """ 180 Return the WikiDict provided by the given 'pagename' using the given 181 'request'. If the optional 'superuser' is specified as a true value, no read 182 access check will be made. 183 """ 184 185 if pagename and Page(request, pagename).exists() and (superuser or request.user.may.read(pagename)): 186 if hasattr(request.dicts, "dict"): 187 return request.dicts.dict(pagename) 188 else: 189 return request.dicts[pagename] 190 else: 191 return None 192 193 def groupHasMember(request, groupname, username): 194 if hasattr(request.dicts, "has_member"): 195 return request.dicts.has_member(groupname, username) 196 else: 197 return username in request.groups.get(groupname, []) 198 199 # Searching-related functions. 200 201 def getPagesFromResults(result_pages, request): 202 203 "Return genuine pages for the given 'result_pages' using the 'request'." 204 205 return [Page(request, page.page_name) for page in result_pages] 206 207 # Region/section parsing. 208 209 def getRegions(s, include_non_regions=False): 210 211 """ 212 Parse the string 's', returning a list of explicitly declared regions. 213 214 If 'include_non_regions' is specified as a true value, fragments will be 215 included for text between explicitly declared regions. 216 """ 217 218 regions = [] 219 marker = None 220 is_block = True 221 222 # Start a region for exposed text, if appropriate. 223 224 if include_non_regions: 225 regions.append("") 226 227 for match_text in marker_regexp.split(s): 228 229 # Capture section text. 230 231 if is_block: 232 if marker or include_non_regions: 233 regions[-1] += match_text 234 235 # Handle section markers. 236 237 else: 238 239 # Close any open sections, returning to exposed text regions. 240 241 if marker: 242 243 # Add any marker to the current region, regardless of whether it 244 # successfully closes a section. 245 246 regions[-1] += match_text 247 248 if match_text.startswith("}") and len(marker) == len(match_text): 249 marker = None 250 251 # Start a region for exposed text, if appropriate. 252 253 if include_non_regions: 254 regions.append("") 255 256 # Without a current marker, start a new section. 257 258 else: 259 marker = match_text 260 regions.append("") 261 262 # Add the marker to the new region. 263 264 regions[-1] += match_text 265 266 # The match text alternates between text between markers and the markers 267 # themselves. 268 269 is_block = not is_block 270 271 return regions 272 273 def getFragmentsFromRegions(regions): 274 275 """ 276 Return fragments from the given 'regions', each having the form 277 (format, attributes, body text). 278 """ 279 280 fragments = [] 281 282 for region in regions: 283 format, attributes, body, header, close = getFragmentFromRegion(region) 284 fragments.append((format, attributes, body)) 285 286 return fragments 287 288 def getFragmentFromRegion(region): 289 290 """ 291 Return a fragment for the given 'region' having the form (format, 292 attributes, body text, header, close), where the 'header' is the original 293 declaration of the 'region' or None if no explicit region is defined, and 294 'close' is the closing marker of the 'region' or None if no explicit region 295 is defined. 296 """ 297 298 if region.startswith("{{{"): 299 300 body = region.lstrip("{") 301 level = len(region) - len(body) 302 body = body.rstrip("}").lstrip() 303 304 # Remove any prelude and process metadata. 305 306 if body.startswith("#!"): 307 308 try: 309 declaration, body = body.split("\n", 1) 310 except ValueError: 311 declaration = body 312 body = "" 313 314 arguments = declaration[2:] 315 316 # Get any parser/format declaration. 317 318 if arguments and not arguments[0].isspace(): 319 details = arguments.split(None, 1) 320 if len(details) == 2: 321 format, arguments = details 322 else: 323 format = details[0] 324 arguments = "" 325 else: 326 format = None 327 328 # Get the attributes/arguments for the region. 329 330 attributes = parseAttributes(arguments, False) 331 332 # Add an entry for the format in the attribute dictionary. 333 334 if format and not attributes.has_key(format): 335 attributes[format] = True 336 337 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 338 339 else: 340 return None, {}, body, level * "{" + "\n", level * "}" 341 342 else: 343 return None, {}, region, None, None 344 345 def getFragments(s, include_non_regions=False): 346 347 """ 348 Return fragments for the given string 's', each having the form 349 (format, arguments, body text). 350 351 If 'include_non_regions' is specified as a true value, fragments will be 352 included for text between explicitly declared regions. 353 """ 354 355 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 356 357 # Heading extraction. 358 359 def getHeadings(s): 360 361 """ 362 Return tuples of the form (level, title, span) for headings found within the 363 given string 's'. The span is itself a (start, end) tuple indicating the 364 matching region of 's' for a heading declaration. 365 """ 366 367 headings = [] 368 369 for match in heading_regexp.finditer(s): 370 headings.append( 371 (len(match.group("level")), match.group("heading"), match.span()) 372 ) 373 374 return headings 375 376 # Region/section attribute parsing. 377 378 def parseAttributes(s, escape=True): 379 380 """ 381 Parse the section attributes string 's', returning a mapping of names to 382 values. If 'escape' is set to a true value, the attributes will be suitable 383 for use with the formatter API. If 'escape' is set to a false value, the 384 attributes will have any quoting removed. 385 386 Because Unicode was probably not around when shlex, used here to tokenise 387 the attributes, was introduced, and since StringIO is not Unicode-capable, 388 any non-ASCII characters should be quoted in attributes. 389 """ 390 391 attrs = {} 392 f = StringIO(s.encode("utf-8")) 393 name = None 394 need_value = False 395 lex = shlex(f) 396 lex.wordchars += "-" 397 398 for token in lex: 399 token = unicode(token, "utf-8") 400 401 # Capture the name if needed. 402 403 if name is None: 404 name = escape and wikiutil.escape(token) or strip_token(token) 405 406 # Detect either an equals sign or another name. 407 408 elif not need_value: 409 if token == "=": 410 need_value = True 411 else: 412 attrs[name.lower()] = escape and "true" or True 413 name = wikiutil.escape(token) 414 415 # Otherwise, capture a value. 416 417 else: 418 # Quoting of attributes done similarly to wikiutil.parseAttributes. 419 420 if token: 421 if escape: 422 if token[0] in ("'", '"'): 423 token = wikiutil.escape(token) 424 else: 425 token = '"%s"' % wikiutil.escape(token, 1) 426 else: 427 token = strip_token(token) 428 429 attrs[name.lower()] = token 430 name = None 431 need_value = False 432 433 # Handle any name-only attributes at the end of the collection. 434 435 if name and not need_value: 436 attrs[name.lower()] = escape and "true" or True 437 438 return attrs 439 440 def strip_token(token): 441 442 "Return the given 'token' stripped of quoting." 443 444 if token[0] in ("'", '"') and token[-1] == token[0]: 445 return token[1:-1] 446 else: 447 return token 448 449 # Macro argument parsing. 450 451 def parseMacroArguments(args): 452 453 """ 454 Interpret the arguments. To support commas in labels, the label argument 455 should be quoted. For example: 456 457 "label=No, thanks!" 458 """ 459 460 try: 461 parsed_args = args and wikiutil.parse_quoted_separated(args, name_value=False) or [] 462 except AttributeError: 463 parsed_args = args.split(",") 464 465 pairs = [] 466 for arg in parsed_args: 467 if arg: 468 pair = arg.split("=", 1) 469 if len(pair) < 2: 470 pairs.append((None, arg)) 471 else: 472 pairs.append(tuple(pair)) 473 474 return pairs 475 476 def parseDictEntry(entry, unqualified=None): 477 478 """ 479 Return the parameters specified by the given dict 'entry' string. The 480 optional 'unqualified' parameter can be used to indicate parameters that 481 need not be specified together with a keyword and can therefore be populated 482 in the given order as such unqualified parameters are encountered. 483 484 NOTE: This is similar to parseMacroArguments but employs space as a 485 NOTE: separator and attempts to assign unqualified parameters. 486 """ 487 488 parameters = {} 489 unqualified = unqualified or () 490 491 try: 492 parsed_args = entry and wikiutil.parse_quoted_separated(entry, separator=None, name_value=False) or [] 493 except AttributeError: 494 parsed_args = entry.split() 495 496 for arg in parsed_args: 497 try: 498 argname, argvalue = arg.split("=", 1) 499 500 # Detect unlikely parameter names. 501 502 if not argname.isalpha(): 503 raise ValueError 504 505 parameters[argname] = argvalue 506 507 # Unqualified parameters are assumed to be one of a recognised set. 508 509 except ValueError: 510 for argname in unqualified: 511 if not parameters.has_key(argname): 512 parameters[argname] = arg 513 break 514 515 return parameters 516 517 # Macro argument quoting. 518 519 def quoteMacroArguments(args): 520 521 """ 522 Quote the given 'args' - a collection of (name, value) tuples - returning a 523 string containing the comma-separated, quoted arguments. 524 """ 525 526 quoted = [] 527 528 for name, value in args: 529 quoted.append(quoteMacroArgument(name, value)) 530 531 return ",".join(quoted) 532 533 def quoteMacroArgument(name, value): 534 535 """ 536 Quote the argument with the given 'name' (or None indicating an unnamed 537 argument) and 'value' so that it can be used with a macro. 538 """ 539 540 value = unicode(value).replace('"', '""') 541 if name is None: 542 return '"%s"' % value 543 else: 544 return '"%s=%s"' % (name, value) 545 546 # Request-related classes and associated functions. 547 548 class Form: 549 550 """ 551 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 552 environment. 553 """ 554 555 def __init__(self, request): 556 self.request = request 557 self.form = request.values 558 559 def has_key(self, name): 560 return not not self.form.getlist(name) 561 562 def get(self, name, default=None): 563 values = self.form.getlist(name) 564 if not values: 565 return default 566 else: 567 return values 568 569 def __getitem__(self, name): 570 return self.form.getlist(name) 571 572 def __setitem__(self, name, value): 573 try: 574 self.form.setlist(name, value) 575 except TypeError: 576 self._write_enable() 577 self.form.setlist(name, value) 578 579 def __delitem__(self, name): 580 try: 581 del self.form[name] 582 except TypeError: 583 self._write_enable() 584 del self.form[name] 585 586 def _write_enable(self): 587 self.form = self.request.values = MultiDict(self.form) 588 589 def keys(self): 590 return self.form.keys() 591 592 def items(self): 593 return self.form.lists() 594 595 class ActionSupport: 596 597 """ 598 Work around disruptive MoinMoin changes in 1.9, and also provide useful 599 convenience methods. 600 """ 601 602 def get_form(self): 603 return get_form(self.request) 604 605 def _get_selected(self, value, input_value): 606 607 """ 608 Return the HTML attribute text indicating selection of an option (or 609 otherwise) if 'value' matches 'input_value'. 610 """ 611 612 return input_value is not None and value == input_value and 'selected="selected"' or '' 613 614 def _get_selected_for_list(self, value, input_values): 615 616 """ 617 Return the HTML attribute text indicating selection of an option (or 618 otherwise) if 'value' matches one of the 'input_values'. 619 """ 620 621 return value in input_values and 'selected="selected"' or '' 622 623 def get_option_list(self, value, values): 624 625 """ 626 Return a list of HTML element definitions for options describing the 627 given 'values', selecting the option with the specified 'value' if 628 present. 629 """ 630 631 options = [] 632 for available_value in values: 633 selected = self._get_selected(available_value, value) 634 options.append('<option value="%s" %s>%s</option>' % ( 635 escattr(available_value), selected, wikiutil.escape(available_value))) 636 return options 637 638 def _get_input(self, form, name, default=None): 639 640 """ 641 Return the input from 'form' having the given 'name', returning either 642 the input converted to an integer or the given 'default' (optional, None 643 if not specified). 644 """ 645 646 value = form.get(name, [None])[0] 647 if not value: # true if 0 obtained 648 return default 649 else: 650 return int(value) 651 652 def get_form(request): 653 654 "Work around disruptive MoinMoin changes in 1.9." 655 656 if hasattr(request, "values"): 657 return Form(request) 658 else: 659 return request.form 660 661 class send_headers_cls: 662 663 """ 664 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 665 1.9.x environment. 666 """ 667 668 def __init__(self, request): 669 self.request = request 670 671 def __call__(self, headers): 672 for header in headers: 673 parts = header.split(":") 674 self.request.headers.add(parts[0], ":".join(parts[1:])) 675 676 def get_send_headers(request): 677 678 "Return a function that can send response headers." 679 680 if hasattr(request, "http_headers"): 681 return request.http_headers 682 elif hasattr(request, "emit_http_headers"): 683 return request.emit_http_headers 684 else: 685 return send_headers_cls(request) 686 687 def escattr(s): 688 return wikiutil.escape(s, 1) 689 690 def getPathInfo(request): 691 if hasattr(request, "getPathinfo"): 692 return request.getPathinfo() 693 else: 694 return request.path 695 696 def getHeader(request, header_name, prefix=None): 697 698 """ 699 Using the 'request', return the value of the header with the given 700 'header_name', using the optional 'prefix' to obtain protocol-specific 701 headers if necessary. 702 703 If no value is found for the given 'header_name', None is returned. 704 """ 705 706 if hasattr(request, "getHeader"): 707 return request.getHeader(header_name) 708 elif hasattr(request, "headers"): 709 return request.headers.get(header_name) 710 elif hasattr(request, "env"): 711 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 712 else: 713 return None 714 715 def writeHeaders(request, mimetype, metadata, status=None): 716 717 """ 718 Using the 'request', write resource headers using the given 'mimetype', 719 based on the given 'metadata'. If the optional 'status' is specified, set 720 the status header to the given value. 721 """ 722 723 send_headers = get_send_headers(request) 724 725 # Define headers. 726 727 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 728 729 # Define the last modified time. 730 # NOTE: Consider using request.httpDate. 731 732 latest_timestamp = metadata.get("last-modified") 733 if latest_timestamp: 734 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 735 736 if status: 737 headers.append("Status: %s" % status) 738 739 send_headers(headers) 740 741 # Page access functions. 742 743 def getPageURL(page): 744 745 "Return the URL of the given 'page'." 746 747 request = page.request 748 return request.getQualifiedURL(page.url(request, relative=0)) 749 750 def getFormat(page): 751 752 "Get the format used on the given 'page'." 753 754 return page.pi["format"] 755 756 def getMetadata(page): 757 758 """ 759 Return a dictionary containing items describing for the given 'page' the 760 page's "created" time, "last-modified" time, "sequence" (or revision number) 761 and the "last-comment" made about the last edit. 762 """ 763 764 request = page.request 765 766 # Get the initial revision of the page. 767 768 revisions = page.getRevList() 769 770 if not revisions: 771 return {} 772 773 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 774 775 # Get the created and last modified times. 776 777 initial_revision = getPageRevision(event_page_initial) 778 779 metadata = {} 780 metadata["created"] = initial_revision["timestamp"] 781 latest_revision = getPageRevision(page) 782 metadata["last-modified"] = latest_revision["timestamp"] 783 metadata["sequence"] = len(revisions) - 1 784 metadata["last-comment"] = latest_revision["comment"] 785 786 return metadata 787 788 def getPageRevision(page): 789 790 "Return the revision details dictionary for the given 'page'." 791 792 # From Page.edit_info... 793 794 if hasattr(page, "editlog_entry"): 795 line = page.editlog_entry() 796 else: 797 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 798 799 # Similar to Page.mtime_usecs behaviour... 800 801 if line: 802 timestamp = line.ed_time_usecs 803 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 804 comment = line.comment 805 else: 806 mtime = 0 807 comment = "" 808 809 # Give the time zone as UTC. 810 811 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + ("UTC",)), "comment" : comment} 812 813 # Page parsing and formatting of embedded content. 814 815 def getOutputTypes(request, format): 816 817 """ 818 Using the 'request' and the 'format' of a fragment, return the media types 819 available for the fragment. 820 """ 821 822 return getParserOutputTypes(getParserClass(request, format)) 823 824 def getParserOutputTypes(parser): 825 826 "Return the media types supported by the given 'parser'." 827 828 # This uses an extended parser API method if available. 829 830 if parser and hasattr(parser, "getOutputTypes"): 831 return parser.getOutputTypes() 832 else: 833 return ["text/html"] 834 835 def getPageParserClass(request): 836 837 "Using 'request', return a parser class for the current page's format." 838 839 return getParserClass(request, getFormat(request.page)) 840 841 def getParserClass(request, format): 842 843 """ 844 Return a parser class using the 'request' for the given 'format', returning 845 a plain text parser if no parser can be found for the specified 'format'. 846 """ 847 848 try: 849 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 850 except wikiutil.PluginMissingError: 851 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 852 853 def getFormatterClass(request, format): 854 855 """ 856 Return a formatter class using the 'request' for the given output 'format', 857 returning a plain text formatter if no formatter can be found for the 858 specified 'format'. 859 """ 860 861 try: 862 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 863 except wikiutil.PluginMissingError: 864 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 865 866 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 867 868 """ 869 Format the given 'text' using the specified 'request' and formatter 'fmt'. 870 Suppress line anchors in the output, and fix lists by indicating that a 871 paragraph has already been started. 872 """ 873 874 if not parser_cls: 875 parser_cls = getPageParserClass(request) 876 parser = parser_cls(text, request, line_anchors=False) 877 878 old_fmt = request.formatter 879 request.formatter = fmt 880 try: 881 if isinstance(parser, text_moin_wiki.Parser): 882 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 883 else: 884 return redirectedOutput(request, parser, fmt) 885 finally: 886 request.formatter = old_fmt 887 888 def formatTextForOutputType(text, request, parser_cls, output_type): 889 890 """ 891 Format the given 'text' using the specified 'request' and parser class 892 'parser_cls', producing output of the given 'output_type'. 893 """ 894 895 parser = parser_cls(text, request) 896 buf = codecs.getwriter("utf-8")(StringIO()) 897 try: 898 parser.formatForOutputType(output_type, buf.write) 899 return unicode(buf.getvalue(), "utf-8") 900 finally: 901 buf.close() 902 903 def redirectedOutput(request, parser, fmt, **kw): 904 905 "A fixed version of the request method of the same name." 906 907 buf = codecs.getwriter("utf-8")(StringIO()) 908 request.redirect(buf) 909 try: 910 parser.format(fmt, **kw) 911 if hasattr(fmt, "flush"): 912 buf.write(fmt.flush(True)) 913 finally: 914 request.redirect() 915 text = buf.getvalue() 916 buf.close() 917 return unicode(text, "utf-8") 918 919 class RawParser: 920 921 "A parser that just formats everything as text." 922 923 def __init__(self, raw, request, **kw): 924 self.raw = raw 925 self.request = request 926 927 def format(self, fmt, write=None): 928 (write or self.request.write)(fmt.text(self.raw)) 929 930 # Finding components for content types. 931 932 def getParsersForContentType(cfg, mimetype): 933 934 """ 935 Find parsers that support the given 'mimetype', constructing a dictionary 936 mapping content types to lists of parsers that is then cached in the 'cfg' 937 object. A list of suitable parsers is returned for 'mimetype'. 938 """ 939 940 if not hasattr(cfg.cache, "MIMETYPE_TO_PARSER"): 941 available = {} 942 943 for name in wikiutil.getPlugins("parser", cfg): 944 945 # Import each parser in order to inspect supported content types. 946 947 try: 948 parser_cls = wikiutil.importPlugin(cfg, "parser", name, "Parser") 949 except wikiutil.PluginMissingError: 950 continue 951 952 # Attempt to determine supported content types. 953 # NOTE: Extensions and /etc/mime.types (or equivalent) could also be 954 # NOTE: used. 955 956 if hasattr(parser_cls, "input_mimetypes"): 957 for input_mimetype in parser_cls.input_mimetypes: 958 if not available.has_key(input_mimetype): 959 available[input_mimetype] = [] 960 available[input_mimetype].append(parser_cls) 961 962 # Support some basic parsers. 963 964 elif name == "text_moin_wiki": 965 available["text/moin-wiki"] = [parser_cls] 966 available["text/moin"] = [parser_cls] 967 elif name == "text_html": 968 available["text/html"] = [parser_cls] 969 available["application/xhtml+xml"] = [parser_cls] 970 971 cfg.cache.MIMETYPE_TO_PARSER = available 972 973 return cfg.cache.MIMETYPE_TO_PARSER.get(mimetype, []) 974 975 # Textual representations. 976 977 def getSimpleWikiText(text): 978 979 """ 980 Return the plain text representation of the given 'text' which may employ 981 certain Wiki syntax features, such as those providing verbatim or monospaced 982 text. 983 """ 984 985 # NOTE: Re-implementing support for verbatim text and linking avoidance. 986 987 l = [] 988 last = 0 989 990 for m in verbatim_regexp.finditer(text): 991 start, end = m.span() 992 l.append(text[last:start]) 993 994 # Process the verbatim macro arguments. 995 996 args = m.group("verbatim") or m.group("verbatim2") 997 if args: 998 l += [v for (n, v) in parseMacroArguments(args)] 999 1000 # Or just add the match groups. 1001 1002 else: 1003 l += [s for s in m.groups() if s] 1004 1005 last = end 1006 1007 l.append(text[last:]) 1008 return "".join(l) 1009 1010 def getEncodedWikiText(text): 1011 1012 "Encode the given 'text' in a verbatim representation." 1013 1014 return "<<Verbatim(%s)>>" % quoteMacroArgument(None, text) 1015 1016 def getPrettyTitle(title): 1017 1018 "Return a nicely formatted version of the given 'title'." 1019 1020 return title.replace("_", " ").replace("/", u" ? ") 1021 1022 # User interface functions. 1023 1024 def getParameter(request, name, default=None): 1025 1026 """ 1027 Using the given 'request', return the value of the parameter with the given 1028 'name', returning the optional 'default' (or None) if no value was supplied 1029 in the 'request'. 1030 """ 1031 1032 return get_form(request).get(name, [default])[0] 1033 1034 def getQualifiedParameter(request, prefix, argname, default=None): 1035 1036 """ 1037 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 1038 qualified parameter, returning the optional 'default' (or None) if no value 1039 was supplied in the 'request'. 1040 """ 1041 1042 argname = getQualifiedParameterName(prefix, argname) 1043 return getParameter(request, argname, default) 1044 1045 def getQualifiedParameterName(prefix, argname): 1046 1047 """ 1048 Return the qualified parameter name using the given 'prefix' and 'argname'. 1049 """ 1050 1051 if not prefix: 1052 return argname 1053 else: 1054 return "%s-%s" % (prefix, argname) 1055 1056 # Page-related functions. 1057 1058 def getPrettyPageName(page): 1059 1060 "Return a nicely formatted title/name for the given 'page'." 1061 1062 title = page.split_title(force=1) 1063 return getPrettyTitle(title) 1064 1065 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 1066 1067 """ 1068 Using 'request', return a link to 'page' with the given link 'text' and 1069 optional 'query_string' and 'anchor'. 1070 """ 1071 1072 text = wikiutil.escape(text) 1073 return page.link_to_raw(request, text, query_string, anchor, **kw) 1074 1075 def linkToResource(url, request, text, query_string=None, anchor=None): 1076 1077 """ 1078 Using 'request', return a link to 'url' with the given link 'text' and 1079 optional 'query_string' and 'anchor'. 1080 """ 1081 1082 if anchor: 1083 url += "#%s" % anchor 1084 1085 if query_string: 1086 query_string = wikiutil.makeQueryString(query_string) 1087 url += "?%s" % query_string 1088 1089 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1090 1091 output = [] 1092 output.append(formatter.url(1, url)) 1093 output.append(formatter.text(text)) 1094 output.append(formatter.url(0)) 1095 return "".join(output) 1096 1097 def getFullPageName(parent, title): 1098 1099 """ 1100 Return a full page name from the given 'parent' page (can be empty or None) 1101 and 'title' (a simple page name). 1102 """ 1103 1104 if parent: 1105 return "%s/%s" % (parent.rstrip("/"), title) 1106 else: 1107 return title 1108 1109 # vim: tabstop=4 expandtab shiftwidth=4