1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2005-2008 MoinMoin:ThomasWaldmann. 8 @license: GNU GPL (v2 or later), see COPYING.txt for details. 9 """ 10 11 from DateSupport import * 12 from MoinMoin.Page import Page 13 from MoinMoin import config, search, wikiutil 14 from StringIO import StringIO 15 from shlex import shlex 16 import re 17 import time 18 19 __version__ = "0.2" 20 21 # Content type parsing. 22 23 encoding_regexp_str = ur'(?P<content_type>[^\s;]*)(?:;\s*charset=(?P<encoding>[-A-Za-z0-9]+))?' 24 encoding_regexp = re.compile(encoding_regexp_str) 25 26 # Accept header parsing. 27 28 accept_regexp_str = ur';\s*q=' 29 accept_regexp = re.compile(accept_regexp_str) 30 31 # Extraction of shared fragments. 32 33 marker_regexp_str = r"([{]{3,}|[}]{3,})" 34 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 35 36 # Extraction of headings. 37 38 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 39 40 # Category extraction from pages. 41 42 category_regexp = None 43 44 # Simple content parsing. 45 46 verbatim_regexp = re.compile(ur'(?:' 47 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 48 ur'|' 49 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 50 ur'|' 51 ur'!(?P<verbatim3>.*?)(\s|$)?' 52 ur'|' 53 ur'`(?P<monospace>.*?)`' 54 ur'|' 55 ur'{{{(?P<preformatted>.*?)}}}' 56 ur')', re.UNICODE) 57 58 # Category discovery. 59 60 def getCategoryPattern(request): 61 global category_regexp 62 63 try: 64 return request.cfg.cache.page_category_regexact 65 except AttributeError: 66 67 # Use regular expression from MoinMoin 1.7.1 otherwise. 68 69 if category_regexp is None: 70 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 71 return category_regexp 72 73 def getCategories(request): 74 75 """ 76 From the AdvancedSearch macro, return a list of category page names using 77 the given 'request'. 78 """ 79 80 # This will return all pages with "Category" in the title. 81 82 cat_filter = getCategoryPattern(request).search 83 return request.rootpage.getPageList(filter=cat_filter) 84 85 def getCategoryMapping(category_pagenames, request): 86 87 """ 88 For the given 'category_pagenames' return a list of tuples of the form 89 (category name, category page name) using the given 'request'. 90 """ 91 92 cat_pattern = getCategoryPattern(request) 93 mapping = [] 94 for pagename in category_pagenames: 95 name = cat_pattern.match(pagename).group("key") 96 if name != "Category": 97 mapping.append((name, pagename)) 98 mapping.sort() 99 return mapping 100 101 def getCategoryPages(pagename, request): 102 103 """ 104 Return the pages associated with the given category 'pagename' using the 105 'request'. 106 """ 107 108 query = search.QueryParser().parse_query('category:%s' % pagename) 109 results = search.searchPages(request, query, "page_name") 110 return filterCategoryPages(results, request) 111 112 def filterCategoryPages(results, request): 113 114 "Filter category pages from the given 'results' using the 'request'." 115 116 cat_pattern = getCategoryPattern(request) 117 pages = [] 118 for page in results.hits: 119 if not cat_pattern.match(page.page_name): 120 pages.append(page) 121 return pages 122 123 def getAllCategoryPages(category_names, request): 124 125 """ 126 Return all pages belonging to the categories having the given 127 'category_names', using the given 'request'. 128 """ 129 130 pages = [] 131 pagenames = set() 132 133 for category_name in category_names: 134 135 # Get the pages and page names in the category. 136 137 pages_in_category = getCategoryPages(category_name, request) 138 139 # Visit each page in the category. 140 141 for page_in_category in pages_in_category: 142 pagename = page_in_category.page_name 143 144 # Only process each page once. 145 146 if pagename in pagenames: 147 continue 148 else: 149 pagenames.add(pagename) 150 151 pages.append(page_in_category) 152 153 return pages 154 155 def getPagesForSearch(search_pattern, request): 156 157 """ 158 Return result pages for a search employing the given 'search_pattern' and 159 using the given 'request'. 160 """ 161 162 query = search.QueryParser().parse_query(search_pattern) 163 results = search.searchPages(request, query, "page_name") 164 return filterCategoryPages(results, request) 165 166 # WikiDict functions. 167 168 def getWikiDict(pagename, request): 169 170 """ 171 Return the WikiDict provided by the given 'pagename' using the given 172 'request'. 173 """ 174 175 if pagename and Page(request, pagename).exists() and request.user.may.read(pagename): 176 if hasattr(request.dicts, "dict"): 177 return request.dicts.dict(pagename) 178 else: 179 return request.dicts[pagename] 180 else: 181 return None 182 183 # Searching-related functions. 184 185 def getPagesFromResults(result_pages, request): 186 187 "Return genuine pages for the given 'result_pages' using the 'request'." 188 189 return [Page(request, page.page_name) for page in result_pages] 190 191 # Region/section parsing. 192 193 def getRegions(s, include_non_regions=False): 194 195 """ 196 Parse the string 's', returning a list of explicitly declared regions. 197 198 If 'include_non_regions' is specified as a true value, fragments will be 199 included for text between explicitly declared regions. 200 """ 201 202 regions = [] 203 marker = None 204 is_block = True 205 206 # Start a region for exposed text, if appropriate. 207 208 if include_non_regions: 209 regions.append("") 210 211 for match_text in marker_regexp.split(s): 212 213 # Capture section text. 214 215 if is_block: 216 if marker or include_non_regions: 217 regions[-1] += match_text 218 219 # Handle section markers. 220 221 elif not is_block: 222 223 # Close any open sections, returning to exposed text regions. 224 225 if marker: 226 if match_text.startswith("}") and len(marker) == len(match_text): 227 marker = None 228 229 # Start a region for exposed text, if appropriate. 230 231 if include_non_regions: 232 regions.append("") 233 234 # Without a current marker, start a section if an appropriate marker 235 # is given. 236 237 elif match_text.startswith("{"): 238 marker = match_text 239 regions.append("") 240 241 # Markers and section text are added to the current region. 242 243 regions[-1] += match_text 244 245 # The match text alternates between text between markers and the markers 246 # themselves. 247 248 is_block = not is_block 249 250 return regions 251 252 def getFragmentsFromRegions(regions): 253 254 """ 255 Return fragments from the given 'regions', each having the form 256 (format, arguments, body text). 257 """ 258 259 fragments = [] 260 261 for region in regions: 262 if region.startswith("{{{"): 263 264 body = region.lstrip("{").rstrip("}").lstrip() 265 266 # Remove any prelude and process metadata. 267 268 if body.startswith("#!"): 269 body = body[2:] 270 271 try: 272 arguments, body = body.split("\n", 1) 273 except ValueError: 274 arguments = body 275 body = "" 276 277 # Get any parser/format declaration. 278 279 if arguments and not arguments[0].isspace(): 280 details = arguments.split(None, 1) 281 if len(details) == 2: 282 format, arguments = details 283 else: 284 format = details[0] 285 arguments = "" 286 else: 287 format = None 288 289 # Get the attributes/arguments for the region. 290 291 attributes = parseAttributes(arguments, False) 292 293 # Add an entry for the format in the attribute dictionary. 294 295 if format and not attributes.has_key(format): 296 attributes[format] = True 297 298 fragments.append((format, attributes, body)) 299 300 else: 301 fragments.append((None, {}, body)) 302 303 else: 304 fragments.append((None, {}, region)) 305 306 return fragments 307 308 def getFragments(s, include_non_regions=False): 309 310 """ 311 Return fragments for the given string 's', each having the form 312 (format, arguments, body text). 313 314 If 'include_non_regions' is specified as a true value, fragments will be 315 included for text between explicitly declared regions. 316 """ 317 318 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 319 320 # Heading extraction. 321 322 def getHeadings(s): 323 324 """ 325 Return tuples of the form (level, title, span) for headings found within the 326 given string 's'. The span is itself a (start, end) tuple indicating the 327 matching region of 's' for a heading declaration. 328 """ 329 330 headings = [] 331 332 for match in heading_regexp.finditer(s): 333 headings.append( 334 (len(match.group("level")), match.group("heading"), match.span()) 335 ) 336 337 return headings 338 339 # Region/section attribute parsing. 340 341 def parseAttributes(s, escape=True): 342 343 """ 344 Parse the section attributes string 's', returning a mapping of names to 345 values. If 'escape' is set to a true value, the attributes will be suitable 346 for use with the formatter API. If 'escape' is set to a false value, the 347 attributes will have any quoting removed. 348 """ 349 350 attrs = {} 351 f = StringIO(s) 352 name = None 353 need_value = False 354 355 for token in shlex(f): 356 357 # Capture the name if needed. 358 359 if name is None: 360 name = escape and wikiutil.escape(token) or strip_token(token) 361 362 # Detect either an equals sign or another name. 363 364 elif not need_value: 365 if token == "=": 366 need_value = True 367 else: 368 attrs[name.lower()] = escape and "true" or True 369 name = wikiutil.escape(token) 370 371 # Otherwise, capture a value. 372 373 else: 374 # Quoting of attributes done similarly to wikiutil.parseAttributes. 375 376 if token: 377 if escape: 378 if token[0] in ("'", '"'): 379 token = wikiutil.escape(token) 380 else: 381 token = '"%s"' % wikiutil.escape(token, 1) 382 else: 383 token = strip_token(token) 384 385 attrs[name.lower()] = token 386 name = None 387 need_value = False 388 389 # Handle any name-only attributes at the end of the collection. 390 391 if name and not need_value: 392 attrs[name.lower()] = escape and "true" or True 393 394 return attrs 395 396 def strip_token(token): 397 398 "Return the given 'token' stripped of quoting." 399 400 if token[0] in ("'", '"') and token[-1] == token[0]: 401 return token[1:-1] 402 else: 403 return token 404 405 # Request-related classes and associated functions. 406 407 class Form: 408 409 """ 410 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 411 environment. 412 """ 413 414 def __init__(self, form): 415 self.form = form 416 417 def has_key(self, name): 418 return not not self.form.getlist(name) 419 420 def get(self, name, default=None): 421 values = self.form.getlist(name) 422 if not values: 423 return default 424 else: 425 return values 426 427 def __getitem__(self, name): 428 return self.form.getlist(name) 429 430 class ActionSupport: 431 432 """ 433 Work around disruptive MoinMoin changes in 1.9, and also provide useful 434 convenience methods. 435 """ 436 437 def get_form(self): 438 return get_form(self.request) 439 440 def _get_selected(self, value, input_value): 441 442 """ 443 Return the HTML attribute text indicating selection of an option (or 444 otherwise) if 'value' matches 'input_value'. 445 """ 446 447 return input_value is not None and value == input_value and 'selected="selected"' or '' 448 449 def _get_selected_for_list(self, value, input_values): 450 451 """ 452 Return the HTML attribute text indicating selection of an option (or 453 otherwise) if 'value' matches one of the 'input_values'. 454 """ 455 456 return value in input_values and 'selected="selected"' or '' 457 458 def get_option_list(self, value, values): 459 460 """ 461 Return a list of HTML element definitions for options describing the 462 given 'values', selecting the option with the specified 'value' if 463 present. 464 """ 465 466 options = [] 467 for available_value in values: 468 selected = self._get_selected(available_value, value) 469 options.append('<option value="%s" %s>%s</option>' % ( 470 escattr(available_value), selected, wikiutil.escape(available_value))) 471 return options 472 473 def _get_input(self, form, name, default=None): 474 475 """ 476 Return the input from 'form' having the given 'name', returning either 477 the input converted to an integer or the given 'default' (optional, None 478 if not specified). 479 """ 480 481 value = form.get(name, [None])[0] 482 if not value: # true if 0 obtained 483 return default 484 else: 485 return int(value) 486 487 def get_form(request): 488 489 "Work around disruptive MoinMoin changes in 1.9." 490 491 if hasattr(request, "values"): 492 return Form(request.values) 493 else: 494 return request.form 495 496 class send_headers_cls: 497 498 """ 499 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 500 1.9.x environment. 501 """ 502 503 def __init__(self, request): 504 self.request = request 505 506 def __call__(self, headers): 507 for header in headers: 508 parts = header.split(":") 509 self.request.headers.add(parts[0], ":".join(parts[1:])) 510 511 def get_send_headers(request): 512 513 "Return a function that can send response headers." 514 515 if hasattr(request, "http_headers"): 516 return request.http_headers 517 elif hasattr(request, "emit_http_headers"): 518 return request.emit_http_headers 519 else: 520 return send_headers_cls(request) 521 522 def escattr(s): 523 return wikiutil.escape(s, 1) 524 525 def getPathInfo(request): 526 if hasattr(request, "getPathinfo"): 527 return request.getPathinfo() 528 else: 529 return request.path 530 531 def getHeader(request, header_name, prefix=None): 532 533 """ 534 Using the 'request', return the value of the header with the given 535 'header_name', using the optional 'prefix' to obtain protocol-specific 536 headers if necessary. 537 538 If no value is found for the given 'header_name', None is returned. 539 """ 540 541 if hasattr(request, "getHeader"): 542 return request.getHeader(header_name) 543 elif hasattr(request, "headers"): 544 return request.headers.get(header_name) 545 else: 546 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 547 548 def writeHeaders(request, mimetype, metadata, status=None): 549 550 """ 551 Using the 'request', write resource headers using the given 'mimetype', 552 based on the given 'metadata'. If the optional 'status' is specified, set 553 the status header to the given value. 554 """ 555 556 send_headers = get_send_headers(request) 557 558 # Define headers. 559 560 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 561 562 # Define the last modified time. 563 # NOTE: Consider using request.httpDate. 564 565 latest_timestamp = metadata.get("last-modified") 566 if latest_timestamp: 567 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 568 569 if status: 570 headers.append("Status: %s" % status) 571 572 send_headers(headers) 573 574 # Content/media type and preferences support. 575 576 class MediaRange: 577 578 "A content/media type value which supports whole categories of data." 579 580 def __init__(self, media_range, accept_parameters=None): 581 self.media_range = media_range 582 self.accept_parameters = accept_parameters or {} 583 584 parts = media_range.split(";") 585 self.media_type = parts[0] 586 self.parameters = getMappingFromParameterStrings(parts[1:]) 587 588 # The media type is divided into category and subcategory. 589 590 parts = self.media_type.split("/") 591 self.category = parts[0] 592 self.subcategory = "/".join(parts[1:]) 593 594 def get_parts(self): 595 596 "Return the category, subcategory parts." 597 598 return self.category, self.subcategory 599 600 def get_specificity(self): 601 602 """ 603 Return the specificity of the media type in terms of the scope of the 604 category and subcategory, and also in terms of any qualifying 605 parameters. 606 """ 607 608 if "*" in self.get_parts(): 609 return -list(self.get_parts()).count("*") 610 else: 611 return len(self.parameters) 612 613 def permits(self, other): 614 615 """ 616 Return whether this media type permits the use of the 'other' media type 617 if suggested as suitable content. 618 """ 619 620 if not isinstance(other, MediaRange): 621 other = MediaRange(other) 622 623 category = categoryPermits(self.category, other.category) 624 subcategory = categoryPermits(self.subcategory, other.subcategory) 625 626 if category and subcategory: 627 if "*" not in (category, subcategory): 628 return not self.parameters or self.parameters == other.parameters 629 else: 630 return True 631 else: 632 return False 633 634 def __eq__(self, other): 635 636 """ 637 Return whether this media type is effectively the same as the 'other' 638 media type. 639 """ 640 641 if not isinstance(other, MediaRange): 642 other = MediaRange(other) 643 644 category = categoryMatches(self.category, other.category) 645 subcategory = categoryMatches(self.subcategory, other.subcategory) 646 647 if category and subcategory: 648 if "*" not in (category, subcategory): 649 return self.parameters == other.parameters or \ 650 not self.parameters or not other.parameters 651 else: 652 return True 653 else: 654 return False 655 656 def __ne__(self, other): 657 return not self.__eq__(other) 658 659 def __hash__(self): 660 return hash(self.media_range) 661 662 def __repr__(self): 663 return "MediaRange(%r)" % self.media_range 664 665 def categoryMatches(this, that): 666 667 """ 668 Return the basis of a match between 'this' and 'that' or False if the given 669 categories do not match. 670 """ 671 672 return (this == "*" or this == that) and this or \ 673 that == "*" and that or False 674 675 def categoryPermits(this, that): 676 677 """ 678 Return whether 'this' category permits 'that' category. Where 'this' is a 679 wildcard ("*"), 'that' should always match. A value of False is returned if 680 the categories do not otherwise match. 681 """ 682 683 return (this == "*" or this == that) and this or False 684 685 def getMappingFromParameterStrings(l): 686 687 """ 688 Return a mapping representing the list of "name=value" strings given by 'l'. 689 """ 690 691 parameters = {} 692 693 for parameter in l: 694 parts = parameter.split("=") 695 name = parts[0].strip() 696 value = "=".join(parts[1:]).strip() 697 parameters[name] = value 698 699 return parameters 700 701 def getContentPreferences(accept): 702 703 """ 704 Return a mapping from media types to parameters for content/media types 705 extracted from the given 'accept' header value. The mapping is returned in 706 the form of a list of (media type, parameters) tuples. 707 708 See: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 709 """ 710 711 preferences = [] 712 713 for field in accept.split(","): 714 715 # The media type with parameters (defined by the "media-range") is 716 # separated from any other parameters (defined as "accept-extension" 717 # parameters) by a quality parameter. 718 719 fparts = accept_regexp.split(field) 720 721 # The first part is always the media type. 722 723 media_type = fparts[0].strip() 724 725 # Any other parts can be interpreted as extension parameters. 726 727 if len(fparts) > 1: 728 fparts = ("q=" + ";q=".join(fparts[1:])).split(";") 729 else: 730 fparts = [] 731 732 # Each field in the preferences can incorporate parameters separated by 733 # semicolon characters. 734 735 parameters = getMappingFromParameterStrings(fparts) 736 media_range = MediaRange(media_type, parameters) 737 preferences.append(media_range) 738 739 return ContentPreferences(preferences) 740 741 class ContentPreferences: 742 743 "A wrapper around content preference information." 744 745 def __init__(self, preferences): 746 self.preferences = preferences 747 748 def __iter__(self): 749 return iter(self.preferences) 750 751 def get_ordered(self, by_quality=0): 752 753 """ 754 Return a list of content/media types in descending order of preference. 755 If 'by_quality' is set to a true value, the "q" value will be used as 756 the primary measure of preference; otherwise, only the specificity will 757 be considered. 758 """ 759 760 ordered = {} 761 762 for media_range in self.preferences: 763 specificity = media_range.get_specificity() 764 765 if by_quality: 766 q = float(media_range.accept_parameters.get("q", "1")) 767 key = q, specificity 768 else: 769 key = specificity 770 771 if not ordered.has_key(key): 772 ordered[key] = [] 773 774 ordered[key].append(media_range) 775 776 # Return the preferences in descending order of quality and specificity. 777 778 keys = ordered.keys() 779 keys.sort(reverse=True) 780 return [ordered[key] for key in keys] 781 782 def get_acceptable_types(self, available): 783 784 """ 785 Return content/media types from those in the 'available' list supported 786 by the known preferences grouped by preference level in descending order 787 of preference. 788 """ 789 790 matches = {} 791 available = set(available[:]) 792 793 for level in self.get_ordered(): 794 for media_range in level: 795 796 # Attempt to match available types. 797 798 found = set() 799 for available_type in available: 800 if media_range.permits(available_type): 801 q = float(media_range.accept_parameters.get("q", "1")) 802 if not matches.has_key(q): 803 matches[q] = [] 804 matches[q].append(available_type) 805 found.add(available_type) 806 807 # Stop looking for matches for matched available types. 808 809 if found: 810 available.difference_update(found) 811 812 # Sort the matches in descending order of quality. 813 814 all_q = matches.keys() 815 816 if all_q: 817 all_q.sort(reverse=True) 818 return [matches[q] for q in all_q] 819 else: 820 return [] 821 822 def get_preferred_types(self, available): 823 824 """ 825 Return the preferred content/media types from those in the 'available' 826 list, given the known preferences. 827 """ 828 829 preferred = self.get_acceptable_types(available) 830 if preferred: 831 return preferred[0] 832 else: 833 return [] 834 835 # Content type parsing. 836 837 def getContentTypeAndEncoding(content_type): 838 839 """ 840 Return a tuple with the content/media type and encoding, extracted from the 841 given 'content_type' header value. 842 """ 843 844 m = encoding_regexp.search(content_type) 845 if m: 846 return m.group("content_type"), m.group("encoding") 847 else: 848 return None, None 849 850 # Page access functions. 851 852 def getPageURL(page): 853 854 "Return the URL of the given 'page'." 855 856 request = page.request 857 return request.getQualifiedURL(page.url(request, relative=0)) 858 859 def getFormat(page): 860 861 "Get the format used on the given 'page'." 862 863 return page.pi["format"] 864 865 def getMetadata(page): 866 867 """ 868 Return a dictionary containing items describing for the given 'page' the 869 page's "created" time, "last-modified" time, "sequence" (or revision number) 870 and the "last-comment" made about the last edit. 871 """ 872 873 request = page.request 874 875 # Get the initial revision of the page. 876 877 revisions = page.getRevList() 878 879 if not revisions: 880 return {} 881 882 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 883 884 # Get the created and last modified times. 885 886 initial_revision = getPageRevision(event_page_initial) 887 888 metadata = {} 889 metadata["created"] = initial_revision["timestamp"] 890 latest_revision = getPageRevision(page) 891 metadata["last-modified"] = latest_revision["timestamp"] 892 metadata["sequence"] = len(revisions) - 1 893 metadata["last-comment"] = latest_revision["comment"] 894 895 return metadata 896 897 def getPageRevision(page): 898 899 "Return the revision details dictionary for the given 'page'." 900 901 # From Page.edit_info... 902 903 if hasattr(page, "editlog_entry"): 904 line = page.editlog_entry() 905 else: 906 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 907 908 # Similar to Page.mtime_usecs behaviour... 909 910 if line: 911 timestamp = line.ed_time_usecs 912 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 913 comment = line.comment 914 else: 915 mtime = 0 916 comment = "" 917 918 # Leave the time zone empty. 919 920 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + (None,)), "comment" : comment} 921 922 # Page parsing and formatting of embedded content. 923 924 def getPageParserClass(request): 925 926 "Using 'request', return a parser class for the current page's format." 927 928 return getParserClass(request, getFormat(request.page)) 929 930 def getParserClass(request, format): 931 932 """ 933 Return a parser class using the 'request' for the given 'format', returning 934 a plain text parser if no parser can be found for the specified 'format'. 935 """ 936 937 try: 938 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 939 except wikiutil.PluginMissingError: 940 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 941 942 def getFormatterClass(request, format): 943 944 """ 945 Return a formatter class using the 'request' for the given output 'format', 946 returning a plain text formatter if no formatter can be found for the 947 specified 'format'. 948 """ 949 950 try: 951 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 952 except wikiutil.PluginMissingError: 953 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 954 955 def formatText(text, request, fmt, parser_cls=None): 956 957 """ 958 Format the given 'text' using the specified 'request' and formatter 'fmt'. 959 Suppress line anchors in the output, and fix lists by indicating that a 960 paragraph has already been started. 961 """ 962 963 if not parser_cls: 964 parser_cls = getPageParserClass(request) 965 parser = parser_cls(text, request, line_anchors=False) 966 967 old_fmt = request.formatter 968 request.formatter = fmt 969 try: 970 return redirectedOutput(request, parser, fmt, inhibit_p=True) 971 finally: 972 request.formatter = old_fmt 973 974 def redirectedOutput(request, parser, fmt, **kw): 975 976 "A fixed version of the request method of the same name." 977 978 buf = StringIO() 979 request.redirect(buf) 980 try: 981 parser.format(fmt, **kw) 982 if hasattr(fmt, "flush"): 983 buf.write(fmt.flush(True)) 984 finally: 985 request.redirect() 986 text = buf.getvalue() 987 buf.close() 988 return text 989 990 # Textual representations. 991 992 def getSimpleWikiText(text): 993 994 """ 995 Return the plain text representation of the given 'text' which may employ 996 certain Wiki syntax features, such as those providing verbatim or monospaced 997 text. 998 """ 999 1000 # NOTE: Re-implementing support for verbatim text and linking avoidance. 1001 1002 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 1003 1004 def getEncodedWikiText(text): 1005 1006 "Encode the given 'text' in a verbatim representation." 1007 1008 return "<<Verbatim(%s)>>" % text 1009 1010 def getPrettyTitle(title): 1011 1012 "Return a nicely formatted version of the given 'title'." 1013 1014 return title.replace("_", " ").replace("/", u" ? ") 1015 1016 # User interface functions. 1017 1018 def getParameter(request, name, default=None): 1019 1020 """ 1021 Using the given 'request', return the value of the parameter with the given 1022 'name', returning the optional 'default' (or None) if no value was supplied 1023 in the 'request'. 1024 """ 1025 1026 return get_form(request).get(name, [default])[0] 1027 1028 def getQualifiedParameter(request, prefix, argname, default=None): 1029 1030 """ 1031 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 1032 qualified parameter, returning the optional 'default' (or None) if no value 1033 was supplied in the 'request'. 1034 """ 1035 1036 argname = getQualifiedParameterName(prefix, argname) 1037 return getParameter(request, argname, default) 1038 1039 def getQualifiedParameterName(prefix, argname): 1040 1041 """ 1042 Return the qualified parameter name using the given 'prefix' and 'argname'. 1043 """ 1044 1045 if not prefix: 1046 return argname 1047 else: 1048 return "%s-%s" % (prefix, argname) 1049 1050 # Page-related functions. 1051 1052 def getPrettyPageName(page): 1053 1054 "Return a nicely formatted title/name for the given 'page'." 1055 1056 title = page.split_title(force=1) 1057 return getPrettyTitle(title) 1058 1059 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 1060 1061 """ 1062 Using 'request', return a link to 'page' with the given link 'text' and 1063 optional 'query_string' and 'anchor'. 1064 """ 1065 1066 text = wikiutil.escape(text) 1067 return page.link_to_raw(request, text, query_string, anchor, **kw) 1068 1069 def linkToResource(url, request, text, query_string=None, anchor=None): 1070 1071 """ 1072 Using 'request', return a link to 'url' with the given link 'text' and 1073 optional 'query_string' and 'anchor'. 1074 """ 1075 1076 if anchor: 1077 url += "#%s" % anchor 1078 1079 if query_string: 1080 query_string = wikiutil.makeQueryString(query_string) 1081 url += "?%s" % query_string 1082 1083 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1084 1085 output = [] 1086 output.append(formatter.url(1, url)) 1087 output.append(formatter.text(text)) 1088 output.append(formatter.url(0)) 1089 return "".join(output) 1090 1091 def getFullPageName(parent, title): 1092 1093 """ 1094 Return a full page name from the given 'parent' page (can be empty or None) 1095 and 'title' (a simple page name). 1096 """ 1097 1098 if parent: 1099 return "%s/%s" % (parent.rstrip("/"), title) 1100 else: 1101 return title 1102 1103 # vim: tabstop=4 expandtab shiftwidth=4