1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2005-2008 MoinMoin:ThomasWaldmann. 8 @license: GNU GPL (v2 or later), see COPYING.txt for details. 9 """ 10 11 from DateSupport import * 12 from MoinMoin.Page import Page 13 from MoinMoin import config, search, wikiutil 14 from StringIO import StringIO 15 from shlex import shlex 16 import re 17 import time 18 19 __version__ = "0.2" 20 21 # Content type parsing. 22 23 encoding_regexp_str = ur'(?P<content_type>[^\s;]*)(?:;\s*charset=(?P<encoding>[-A-Za-z0-9]+))?' 24 encoding_regexp = re.compile(encoding_regexp_str) 25 26 # Accept header parsing. 27 28 accept_regexp_str = ur';\s*q=' 29 accept_regexp = re.compile(accept_regexp_str) 30 31 # Extraction of shared fragments. 32 33 marker_regexp_str = r"([{]{3,}|[}]{3,})" 34 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 35 36 # Extraction of headings. 37 38 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 39 40 # Category extraction from pages. 41 42 category_regexp = None 43 44 # Simple content parsing. 45 46 verbatim_regexp = re.compile(ur'(?:' 47 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 48 ur'|' 49 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 50 ur'|' 51 ur'!(?P<verbatim3>.*?)(\s|$)?' 52 ur'|' 53 ur'`(?P<monospace>.*?)`' 54 ur'|' 55 ur'{{{(?P<preformatted>.*?)}}}' 56 ur')', re.UNICODE) 57 58 # Category discovery. 59 60 def getCategoryPattern(request): 61 global category_regexp 62 63 try: 64 return request.cfg.cache.page_category_regexact 65 except AttributeError: 66 67 # Use regular expression from MoinMoin 1.7.1 otherwise. 68 69 if category_regexp is None: 70 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 71 return category_regexp 72 73 def getCategories(request): 74 75 """ 76 From the AdvancedSearch macro, return a list of category page names using 77 the given 'request'. 78 """ 79 80 # This will return all pages with "Category" in the title. 81 82 cat_filter = getCategoryPattern(request).search 83 return request.rootpage.getPageList(filter=cat_filter) 84 85 def getCategoryMapping(category_pagenames, request): 86 87 """ 88 For the given 'category_pagenames' return a list of tuples of the form 89 (category name, category page name) using the given 'request'. 90 """ 91 92 cat_pattern = getCategoryPattern(request) 93 mapping = [] 94 for pagename in category_pagenames: 95 name = cat_pattern.match(pagename).group("key") 96 if name != "Category": 97 mapping.append((name, pagename)) 98 mapping.sort() 99 return mapping 100 101 def getCategoryPages(pagename, request): 102 103 """ 104 Return the pages associated with the given category 'pagename' using the 105 'request'. 106 """ 107 108 query = search.QueryParser().parse_query('category:%s' % pagename) 109 results = search.searchPages(request, query, "page_name") 110 return filterCategoryPages(results, request) 111 112 def filterCategoryPages(results, request): 113 114 "Filter category pages from the given 'results' using the 'request'." 115 116 cat_pattern = getCategoryPattern(request) 117 pages = [] 118 for page in results.hits: 119 if not cat_pattern.match(page.page_name): 120 pages.append(page) 121 return pages 122 123 def getAllCategoryPages(category_names, request): 124 125 """ 126 Return all pages belonging to the categories having the given 127 'category_names', using the given 'request'. 128 """ 129 130 pages = [] 131 pagenames = set() 132 133 for category_name in category_names: 134 135 # Get the pages and page names in the category. 136 137 pages_in_category = getCategoryPages(category_name, request) 138 139 # Visit each page in the category. 140 141 for page_in_category in pages_in_category: 142 pagename = page_in_category.page_name 143 144 # Only process each page once. 145 146 if pagename in pagenames: 147 continue 148 else: 149 pagenames.add(pagename) 150 151 pages.append(page_in_category) 152 153 return pages 154 155 def getPagesForSearch(search_pattern, request): 156 157 """ 158 Return result pages for a search employing the given 'search_pattern' and 159 using the given 'request'. 160 """ 161 162 query = search.QueryParser().parse_query(search_pattern) 163 results = search.searchPages(request, query, "page_name") 164 return filterCategoryPages(results, request) 165 166 # WikiDict functions. 167 168 def getWikiDict(pagename, request): 169 170 """ 171 Return the WikiDict provided by the given 'pagename' using the given 172 'request'. 173 """ 174 175 if pagename and Page(request, pagename).exists() and request.user.may.read(pagename): 176 if hasattr(request.dicts, "dict"): 177 return request.dicts.dict(pagename) 178 else: 179 return request.dicts[pagename] 180 else: 181 return None 182 183 # Searching-related functions. 184 185 def getPagesFromResults(result_pages, request): 186 187 "Return genuine pages for the given 'result_pages' using the 'request'." 188 189 return [Page(request, page.page_name) for page in result_pages] 190 191 # Region/section parsing. 192 193 def getRegions(s, include_non_regions=False): 194 195 """ 196 Parse the string 's', returning a list of explicitly declared regions. 197 198 If 'include_non_regions' is specified as a true value, fragments will be 199 included for text between explicitly declared regions. 200 """ 201 202 regions = [] 203 marker = None 204 is_block = True 205 206 # Start a region for exposed text, if appropriate. 207 208 if include_non_regions: 209 regions.append("") 210 211 for match_text in marker_regexp.split(s): 212 213 # Capture section text. 214 215 if is_block: 216 if marker or include_non_regions: 217 regions[-1] += match_text 218 219 # Handle section markers. 220 221 else: 222 223 # Close any open sections, returning to exposed text regions. 224 225 if marker: 226 227 # Add any marker to the current region, regardless of whether it 228 # successfully closes a section. 229 230 regions[-1] += match_text 231 232 if match_text.startswith("}") and len(marker) == len(match_text): 233 marker = None 234 235 # Start a region for exposed text, if appropriate. 236 237 if include_non_regions: 238 regions.append("") 239 240 # Without a current marker, start a new section. 241 242 else: 243 marker = match_text 244 regions.append("") 245 246 # Add the marker to the new region. 247 248 regions[-1] += match_text 249 250 # The match text alternates between text between markers and the markers 251 # themselves. 252 253 is_block = not is_block 254 255 return regions 256 257 def getFragmentsFromRegions(regions): 258 259 """ 260 Return fragments from the given 'regions', each having the form 261 (format, attributes, body text). 262 """ 263 264 fragments = [] 265 266 for region in regions: 267 format, attributes, body, header, close = getFragmentFromRegion(region) 268 fragments.append((format, attributes, body)) 269 270 return fragments 271 272 def getFragmentFromRegion(region): 273 274 """ 275 Return a fragment for the given 'region' having the form (format, 276 attributes, body text, header, close), where the 'header' is the original 277 declaration of the 'region' or None if no explicit region is defined, and 278 'close' is the closing marker of the 'region' or None if no explicit region 279 is defined. 280 """ 281 282 if region.startswith("{{{"): 283 284 body = region.lstrip("{") 285 level = len(region) - len(body) 286 body = body.rstrip("}").lstrip() 287 288 # Remove any prelude and process metadata. 289 290 if body.startswith("#!"): 291 292 try: 293 declaration, body = body.split("\n", 1) 294 except ValueError: 295 declaration = body 296 body = "" 297 298 arguments = declaration[2:] 299 300 # Get any parser/format declaration. 301 302 if arguments and not arguments[0].isspace(): 303 details = arguments.split(None, 1) 304 if len(details) == 2: 305 format, arguments = details 306 else: 307 format = details[0] 308 arguments = "" 309 else: 310 format = None 311 312 # Get the attributes/arguments for the region. 313 314 attributes = parseAttributes(arguments, False) 315 316 # Add an entry for the format in the attribute dictionary. 317 318 if format and not attributes.has_key(format): 319 attributes[format] = True 320 321 return format, attributes, body, level * "{" + declaration + "\n", level * "}" 322 323 else: 324 return None, {}, body, level * "{" + "\n", level * "}" 325 326 else: 327 return None, {}, region, None, None 328 329 def getFragments(s, include_non_regions=False): 330 331 """ 332 Return fragments for the given string 's', each having the form 333 (format, arguments, body text). 334 335 If 'include_non_regions' is specified as a true value, fragments will be 336 included for text between explicitly declared regions. 337 """ 338 339 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 340 341 # Heading extraction. 342 343 def getHeadings(s): 344 345 """ 346 Return tuples of the form (level, title, span) for headings found within the 347 given string 's'. The span is itself a (start, end) tuple indicating the 348 matching region of 's' for a heading declaration. 349 """ 350 351 headings = [] 352 353 for match in heading_regexp.finditer(s): 354 headings.append( 355 (len(match.group("level")), match.group("heading"), match.span()) 356 ) 357 358 return headings 359 360 # Region/section attribute parsing. 361 362 def parseAttributes(s, escape=True): 363 364 """ 365 Parse the section attributes string 's', returning a mapping of names to 366 values. If 'escape' is set to a true value, the attributes will be suitable 367 for use with the formatter API. If 'escape' is set to a false value, the 368 attributes will have any quoting removed. 369 """ 370 371 attrs = {} 372 f = StringIO(s) 373 name = None 374 need_value = False 375 376 for token in shlex(f): 377 378 # Capture the name if needed. 379 380 if name is None: 381 name = escape and wikiutil.escape(token) or strip_token(token) 382 383 # Detect either an equals sign or another name. 384 385 elif not need_value: 386 if token == "=": 387 need_value = True 388 else: 389 attrs[name.lower()] = escape and "true" or True 390 name = wikiutil.escape(token) 391 392 # Otherwise, capture a value. 393 394 else: 395 # Quoting of attributes done similarly to wikiutil.parseAttributes. 396 397 if token: 398 if escape: 399 if token[0] in ("'", '"'): 400 token = wikiutil.escape(token) 401 else: 402 token = '"%s"' % wikiutil.escape(token, 1) 403 else: 404 token = strip_token(token) 405 406 attrs[name.lower()] = token 407 name = None 408 need_value = False 409 410 # Handle any name-only attributes at the end of the collection. 411 412 if name and not need_value: 413 attrs[name.lower()] = escape and "true" or True 414 415 return attrs 416 417 def strip_token(token): 418 419 "Return the given 'token' stripped of quoting." 420 421 if token[0] in ("'", '"') and token[-1] == token[0]: 422 return token[1:-1] 423 else: 424 return token 425 426 # Request-related classes and associated functions. 427 428 class Form: 429 430 """ 431 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 432 environment. 433 """ 434 435 def __init__(self, form): 436 self.form = form 437 438 def has_key(self, name): 439 return not not self.form.getlist(name) 440 441 def get(self, name, default=None): 442 values = self.form.getlist(name) 443 if not values: 444 return default 445 else: 446 return values 447 448 def __getitem__(self, name): 449 return self.form.getlist(name) 450 451 def __delitem__(self, name): 452 del self.form[name] 453 454 def keys(self): 455 return self.form.keys() 456 457 def items(self): 458 return self.form.items(True) 459 460 class ActionSupport: 461 462 """ 463 Work around disruptive MoinMoin changes in 1.9, and also provide useful 464 convenience methods. 465 """ 466 467 def get_form(self): 468 return get_form(self.request) 469 470 def _get_selected(self, value, input_value): 471 472 """ 473 Return the HTML attribute text indicating selection of an option (or 474 otherwise) if 'value' matches 'input_value'. 475 """ 476 477 return input_value is not None and value == input_value and 'selected="selected"' or '' 478 479 def _get_selected_for_list(self, value, input_values): 480 481 """ 482 Return the HTML attribute text indicating selection of an option (or 483 otherwise) if 'value' matches one of the 'input_values'. 484 """ 485 486 return value in input_values and 'selected="selected"' or '' 487 488 def get_option_list(self, value, values): 489 490 """ 491 Return a list of HTML element definitions for options describing the 492 given 'values', selecting the option with the specified 'value' if 493 present. 494 """ 495 496 options = [] 497 for available_value in values: 498 selected = self._get_selected(available_value, value) 499 options.append('<option value="%s" %s>%s</option>' % ( 500 escattr(available_value), selected, wikiutil.escape(available_value))) 501 return options 502 503 def _get_input(self, form, name, default=None): 504 505 """ 506 Return the input from 'form' having the given 'name', returning either 507 the input converted to an integer or the given 'default' (optional, None 508 if not specified). 509 """ 510 511 value = form.get(name, [None])[0] 512 if not value: # true if 0 obtained 513 return default 514 else: 515 return int(value) 516 517 def get_form(request): 518 519 "Work around disruptive MoinMoin changes in 1.9." 520 521 if hasattr(request, "values"): 522 return Form(request.values) 523 else: 524 return request.form 525 526 class send_headers_cls: 527 528 """ 529 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 530 1.9.x environment. 531 """ 532 533 def __init__(self, request): 534 self.request = request 535 536 def __call__(self, headers): 537 for header in headers: 538 parts = header.split(":") 539 self.request.headers.add(parts[0], ":".join(parts[1:])) 540 541 def get_send_headers(request): 542 543 "Return a function that can send response headers." 544 545 if hasattr(request, "http_headers"): 546 return request.http_headers 547 elif hasattr(request, "emit_http_headers"): 548 return request.emit_http_headers 549 else: 550 return send_headers_cls(request) 551 552 def escattr(s): 553 return wikiutil.escape(s, 1) 554 555 def getPathInfo(request): 556 if hasattr(request, "getPathinfo"): 557 return request.getPathinfo() 558 else: 559 return request.path 560 561 def getHeader(request, header_name, prefix=None): 562 563 """ 564 Using the 'request', return the value of the header with the given 565 'header_name', using the optional 'prefix' to obtain protocol-specific 566 headers if necessary. 567 568 If no value is found for the given 'header_name', None is returned. 569 """ 570 571 if hasattr(request, "getHeader"): 572 return request.getHeader(header_name) 573 elif hasattr(request, "headers"): 574 return request.headers.get(header_name) 575 else: 576 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 577 578 def writeHeaders(request, mimetype, metadata, status=None): 579 580 """ 581 Using the 'request', write resource headers using the given 'mimetype', 582 based on the given 'metadata'. If the optional 'status' is specified, set 583 the status header to the given value. 584 """ 585 586 send_headers = get_send_headers(request) 587 588 # Define headers. 589 590 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 591 592 # Define the last modified time. 593 # NOTE: Consider using request.httpDate. 594 595 latest_timestamp = metadata.get("last-modified") 596 if latest_timestamp: 597 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 598 599 if status: 600 headers.append("Status: %s" % status) 601 602 send_headers(headers) 603 604 # Content/media type and preferences support. 605 606 class MediaRange: 607 608 "A content/media type value which supports whole categories of data." 609 610 def __init__(self, media_range, accept_parameters=None): 611 self.media_range = media_range 612 self.accept_parameters = accept_parameters or {} 613 614 parts = media_range.split(";") 615 self.media_type = parts[0] 616 self.parameters = getMappingFromParameterStrings(parts[1:]) 617 618 # The media type is divided into category and subcategory. 619 620 parts = self.media_type.split("/") 621 self.category = parts[0] 622 self.subcategory = "/".join(parts[1:]) 623 624 def get_parts(self): 625 626 "Return the category, subcategory parts." 627 628 return self.category, self.subcategory 629 630 def get_specificity(self): 631 632 """ 633 Return the specificity of the media type in terms of the scope of the 634 category and subcategory, and also in terms of any qualifying 635 parameters. 636 """ 637 638 if "*" in self.get_parts(): 639 return -list(self.get_parts()).count("*") 640 else: 641 return len(self.parameters) 642 643 def permits(self, other): 644 645 """ 646 Return whether this media type permits the use of the 'other' media type 647 if suggested as suitable content. 648 """ 649 650 if not isinstance(other, MediaRange): 651 other = MediaRange(other) 652 653 category = categoryPermits(self.category, other.category) 654 subcategory = categoryPermits(self.subcategory, other.subcategory) 655 656 if category and subcategory: 657 if "*" not in (category, subcategory): 658 return not self.parameters or self.parameters == other.parameters 659 else: 660 return True 661 else: 662 return False 663 664 def __eq__(self, other): 665 666 """ 667 Return whether this media type is effectively the same as the 'other' 668 media type. 669 """ 670 671 if not isinstance(other, MediaRange): 672 other = MediaRange(other) 673 674 category = categoryMatches(self.category, other.category) 675 subcategory = categoryMatches(self.subcategory, other.subcategory) 676 677 if category and subcategory: 678 if "*" not in (category, subcategory): 679 return self.parameters == other.parameters or \ 680 not self.parameters or not other.parameters 681 else: 682 return True 683 else: 684 return False 685 686 def __ne__(self, other): 687 return not self.__eq__(other) 688 689 def __hash__(self): 690 return hash(self.media_range) 691 692 def __repr__(self): 693 return "MediaRange(%r)" % self.media_range 694 695 def categoryMatches(this, that): 696 697 """ 698 Return the basis of a match between 'this' and 'that' or False if the given 699 categories do not match. 700 """ 701 702 return (this == "*" or this == that) and this or \ 703 that == "*" and that or False 704 705 def categoryPermits(this, that): 706 707 """ 708 Return whether 'this' category permits 'that' category. Where 'this' is a 709 wildcard ("*"), 'that' should always match. A value of False is returned if 710 the categories do not otherwise match. 711 """ 712 713 return (this == "*" or this == that) and this or False 714 715 def getMappingFromParameterStrings(l): 716 717 """ 718 Return a mapping representing the list of "name=value" strings given by 'l'. 719 """ 720 721 parameters = {} 722 723 for parameter in l: 724 parts = parameter.split("=") 725 name = parts[0].strip() 726 value = "=".join(parts[1:]).strip() 727 parameters[name] = value 728 729 return parameters 730 731 def getContentPreferences(accept): 732 733 """ 734 Return a mapping from media types to parameters for content/media types 735 extracted from the given 'accept' header value. The mapping is returned in 736 the form of a list of (media type, parameters) tuples. 737 738 See: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 739 """ 740 741 preferences = [] 742 743 for field in accept.split(","): 744 745 # The media type with parameters (defined by the "media-range") is 746 # separated from any other parameters (defined as "accept-extension" 747 # parameters) by a quality parameter. 748 749 fparts = accept_regexp.split(field) 750 751 # The first part is always the media type. 752 753 media_type = fparts[0].strip() 754 755 # Any other parts can be interpreted as extension parameters. 756 757 if len(fparts) > 1: 758 fparts = ("q=" + ";q=".join(fparts[1:])).split(";") 759 else: 760 fparts = [] 761 762 # Each field in the preferences can incorporate parameters separated by 763 # semicolon characters. 764 765 parameters = getMappingFromParameterStrings(fparts) 766 media_range = MediaRange(media_type, parameters) 767 preferences.append(media_range) 768 769 return ContentPreferences(preferences) 770 771 class ContentPreferences: 772 773 "A wrapper around content preference information." 774 775 def __init__(self, preferences): 776 self.preferences = preferences 777 778 def __iter__(self): 779 return iter(self.preferences) 780 781 def get_ordered(self, by_quality=0): 782 783 """ 784 Return a list of content/media types in descending order of preference. 785 If 'by_quality' is set to a true value, the "q" value will be used as 786 the primary measure of preference; otherwise, only the specificity will 787 be considered. 788 """ 789 790 ordered = {} 791 792 for media_range in self.preferences: 793 specificity = media_range.get_specificity() 794 795 if by_quality: 796 q = float(media_range.accept_parameters.get("q", "1")) 797 key = q, specificity 798 else: 799 key = specificity 800 801 if not ordered.has_key(key): 802 ordered[key] = [] 803 804 ordered[key].append(media_range) 805 806 # Return the preferences in descending order of quality and specificity. 807 808 keys = ordered.keys() 809 keys.sort(reverse=True) 810 return [ordered[key] for key in keys] 811 812 def get_acceptable_types(self, available): 813 814 """ 815 Return content/media types from those in the 'available' list supported 816 by the known preferences grouped by preference level in descending order 817 of preference. 818 """ 819 820 matches = {} 821 available = set(available[:]) 822 823 for level in self.get_ordered(): 824 for media_range in level: 825 826 # Attempt to match available types. 827 828 found = set() 829 for available_type in available: 830 if media_range.permits(available_type): 831 q = float(media_range.accept_parameters.get("q", "1")) 832 if not matches.has_key(q): 833 matches[q] = [] 834 matches[q].append(available_type) 835 found.add(available_type) 836 837 # Stop looking for matches for matched available types. 838 839 if found: 840 available.difference_update(found) 841 842 # Sort the matches in descending order of quality. 843 844 all_q = matches.keys() 845 846 if all_q: 847 all_q.sort(reverse=True) 848 return [matches[q] for q in all_q] 849 else: 850 return [] 851 852 def get_preferred_types(self, available): 853 854 """ 855 Return the preferred content/media types from those in the 'available' 856 list, given the known preferences. 857 """ 858 859 preferred = self.get_acceptable_types(available) 860 if preferred: 861 return preferred[0] 862 else: 863 return [] 864 865 # Content type parsing. 866 867 def getContentTypeAndEncoding(content_type): 868 869 """ 870 Return a tuple with the content/media type and encoding, extracted from the 871 given 'content_type' header value. 872 """ 873 874 m = encoding_regexp.search(content_type) 875 if m: 876 return m.group("content_type"), m.group("encoding") 877 else: 878 return None, None 879 880 # Page access functions. 881 882 def getPageURL(page): 883 884 "Return the URL of the given 'page'." 885 886 request = page.request 887 return request.getQualifiedURL(page.url(request, relative=0)) 888 889 def getFormat(page): 890 891 "Get the format used on the given 'page'." 892 893 return page.pi["format"] 894 895 def getMetadata(page): 896 897 """ 898 Return a dictionary containing items describing for the given 'page' the 899 page's "created" time, "last-modified" time, "sequence" (or revision number) 900 and the "last-comment" made about the last edit. 901 """ 902 903 request = page.request 904 905 # Get the initial revision of the page. 906 907 revisions = page.getRevList() 908 909 if not revisions: 910 return {} 911 912 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 913 914 # Get the created and last modified times. 915 916 initial_revision = getPageRevision(event_page_initial) 917 918 metadata = {} 919 metadata["created"] = initial_revision["timestamp"] 920 latest_revision = getPageRevision(page) 921 metadata["last-modified"] = latest_revision["timestamp"] 922 metadata["sequence"] = len(revisions) - 1 923 metadata["last-comment"] = latest_revision["comment"] 924 925 return metadata 926 927 def getPageRevision(page): 928 929 "Return the revision details dictionary for the given 'page'." 930 931 # From Page.edit_info... 932 933 if hasattr(page, "editlog_entry"): 934 line = page.editlog_entry() 935 else: 936 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 937 938 # Similar to Page.mtime_usecs behaviour... 939 940 if line: 941 timestamp = line.ed_time_usecs 942 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 943 comment = line.comment 944 else: 945 mtime = 0 946 comment = "" 947 948 # Leave the time zone empty. 949 950 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + (None,)), "comment" : comment} 951 952 # Page parsing and formatting of embedded content. 953 954 def getPageParserClass(request): 955 956 "Using 'request', return a parser class for the current page's format." 957 958 return getParserClass(request, getFormat(request.page)) 959 960 def getParserClass(request, format): 961 962 """ 963 Return a parser class using the 'request' for the given 'format', returning 964 a plain text parser if no parser can be found for the specified 'format'. 965 """ 966 967 try: 968 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 969 except wikiutil.PluginMissingError: 970 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 971 972 def getFormatterClass(request, format): 973 974 """ 975 Return a formatter class using the 'request' for the given output 'format', 976 returning a plain text formatter if no formatter can be found for the 977 specified 'format'. 978 """ 979 980 try: 981 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 982 except wikiutil.PluginMissingError: 983 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 984 985 def formatText(text, request, fmt, inhibit_p=True, parser_cls=None): 986 987 """ 988 Format the given 'text' using the specified 'request' and formatter 'fmt'. 989 Suppress line anchors in the output, and fix lists by indicating that a 990 paragraph has already been started. 991 """ 992 993 if not parser_cls: 994 parser_cls = getPageParserClass(request) 995 parser = parser_cls(text, request, line_anchors=False) 996 997 old_fmt = request.formatter 998 request.formatter = fmt 999 try: 1000 return redirectedOutput(request, parser, fmt, inhibit_p=inhibit_p) 1001 finally: 1002 request.formatter = old_fmt 1003 1004 def redirectedOutput(request, parser, fmt, **kw): 1005 1006 "A fixed version of the request method of the same name." 1007 1008 buf = StringIO() 1009 request.redirect(buf) 1010 try: 1011 parser.format(fmt, **kw) 1012 if hasattr(fmt, "flush"): 1013 buf.write(fmt.flush(True)) 1014 finally: 1015 request.redirect() 1016 text = buf.getvalue() 1017 buf.close() 1018 return text 1019 1020 # Textual representations. 1021 1022 def getSimpleWikiText(text): 1023 1024 """ 1025 Return the plain text representation of the given 'text' which may employ 1026 certain Wiki syntax features, such as those providing verbatim or monospaced 1027 text. 1028 """ 1029 1030 # NOTE: Re-implementing support for verbatim text and linking avoidance. 1031 1032 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 1033 1034 def getEncodedWikiText(text): 1035 1036 "Encode the given 'text' in a verbatim representation." 1037 1038 return "<<Verbatim(%s)>>" % text 1039 1040 def getPrettyTitle(title): 1041 1042 "Return a nicely formatted version of the given 'title'." 1043 1044 return title.replace("_", " ").replace("/", u" ? ") 1045 1046 # User interface functions. 1047 1048 def getParameter(request, name, default=None): 1049 1050 """ 1051 Using the given 'request', return the value of the parameter with the given 1052 'name', returning the optional 'default' (or None) if no value was supplied 1053 in the 'request'. 1054 """ 1055 1056 return get_form(request).get(name, [default])[0] 1057 1058 def getQualifiedParameter(request, prefix, argname, default=None): 1059 1060 """ 1061 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 1062 qualified parameter, returning the optional 'default' (or None) if no value 1063 was supplied in the 'request'. 1064 """ 1065 1066 argname = getQualifiedParameterName(prefix, argname) 1067 return getParameter(request, argname, default) 1068 1069 def getQualifiedParameterName(prefix, argname): 1070 1071 """ 1072 Return the qualified parameter name using the given 'prefix' and 'argname'. 1073 """ 1074 1075 if not prefix: 1076 return argname 1077 else: 1078 return "%s-%s" % (prefix, argname) 1079 1080 # Page-related functions. 1081 1082 def getPrettyPageName(page): 1083 1084 "Return a nicely formatted title/name for the given 'page'." 1085 1086 title = page.split_title(force=1) 1087 return getPrettyTitle(title) 1088 1089 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 1090 1091 """ 1092 Using 'request', return a link to 'page' with the given link 'text' and 1093 optional 'query_string' and 'anchor'. 1094 """ 1095 1096 text = wikiutil.escape(text) 1097 return page.link_to_raw(request, text, query_string, anchor, **kw) 1098 1099 def linkToResource(url, request, text, query_string=None, anchor=None): 1100 1101 """ 1102 Using 'request', return a link to 'url' with the given link 'text' and 1103 optional 'query_string' and 'anchor'. 1104 """ 1105 1106 if anchor: 1107 url += "#%s" % anchor 1108 1109 if query_string: 1110 query_string = wikiutil.makeQueryString(query_string) 1111 url += "?%s" % query_string 1112 1113 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1114 1115 output = [] 1116 output.append(formatter.url(1, url)) 1117 output.append(formatter.text(text)) 1118 output.append(formatter.url(0)) 1119 return "".join(output) 1120 1121 def getFullPageName(parent, title): 1122 1123 """ 1124 Return a full page name from the given 'parent' page (can be empty or None) 1125 and 'title' (a simple page name). 1126 """ 1127 1128 if parent: 1129 return "%s/%s" % (parent.rstrip("/"), title) 1130 else: 1131 return title 1132 1133 # vim: tabstop=4 expandtab shiftwidth=4