1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2005-2008 MoinMoin:ThomasWaldmann. 8 @license: GNU GPL (v2 or later), see COPYING.txt for details. 9 """ 10 11 from DateSupport import * 12 from MoinMoin.Page import Page 13 from MoinMoin import config, search, wikiutil 14 from StringIO import StringIO 15 from shlex import shlex 16 import re 17 import time 18 19 __version__ = "0.2" 20 21 # Content type parsing. 22 23 encoding_regexp_str = ur'(?P<content_type>[^\s;]*)(?:;\s*charset=(?P<encoding>[-A-Za-z0-9]+))?' 24 encoding_regexp = re.compile(encoding_regexp_str) 25 26 # Accept header parsing. 27 28 accept_regexp_str = ur';\s*q=' 29 accept_regexp = re.compile(accept_regexp_str) 30 31 # Extraction of shared fragments. 32 33 marker_regexp_str = r"([{]{3,}|[}]{3,})" 34 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 35 36 # Extraction of headings. 37 38 heading_regexp = re.compile(r"^(?P<level>=+)(?P<heading>.*?)(?P=level)$", re.UNICODE | re.MULTILINE) 39 40 # Category extraction from pages. 41 42 category_regexp = None 43 44 # Simple content parsing. 45 46 verbatim_regexp = re.compile(ur'(?:' 47 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 48 ur'|' 49 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 50 ur'|' 51 ur'!(?P<verbatim3>.*?)(\s|$)?' 52 ur'|' 53 ur'`(?P<monospace>.*?)`' 54 ur'|' 55 ur'{{{(?P<preformatted>.*?)}}}' 56 ur')', re.UNICODE) 57 58 # Category discovery. 59 60 def getCategoryPattern(request): 61 global category_regexp 62 63 try: 64 return request.cfg.cache.page_category_regexact 65 except AttributeError: 66 67 # Use regular expression from MoinMoin 1.7.1 otherwise. 68 69 if category_regexp is None: 70 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 71 return category_regexp 72 73 def getCategories(request): 74 75 """ 76 From the AdvancedSearch macro, return a list of category page names using 77 the given 'request'. 78 """ 79 80 # This will return all pages with "Category" in the title. 81 82 cat_filter = getCategoryPattern(request).search 83 return request.rootpage.getPageList(filter=cat_filter) 84 85 def getCategoryMapping(category_pagenames, request): 86 87 """ 88 For the given 'category_pagenames' return a list of tuples of the form 89 (category name, category page name) using the given 'request'. 90 """ 91 92 cat_pattern = getCategoryPattern(request) 93 mapping = [] 94 for pagename in category_pagenames: 95 name = cat_pattern.match(pagename).group("key") 96 if name != "Category": 97 mapping.append((name, pagename)) 98 mapping.sort() 99 return mapping 100 101 def getCategoryPages(pagename, request): 102 103 """ 104 Return the pages associated with the given category 'pagename' using the 105 'request'. 106 """ 107 108 query = search.QueryParser().parse_query('category:%s' % pagename) 109 results = search.searchPages(request, query, "page_name") 110 return filterCategoryPages(results, request) 111 112 def filterCategoryPages(results, request): 113 114 "Filter category pages from the given 'results' using the 'request'." 115 116 cat_pattern = getCategoryPattern(request) 117 pages = [] 118 for page in results.hits: 119 if not cat_pattern.match(page.page_name): 120 pages.append(page) 121 return pages 122 123 def getAllCategoryPages(category_names, request): 124 125 """ 126 Return all pages belonging to the categories having the given 127 'category_names', using the given 'request'. 128 """ 129 130 pages = [] 131 pagenames = set() 132 133 for category_name in category_names: 134 135 # Get the pages and page names in the category. 136 137 pages_in_category = getCategoryPages(category_name, request) 138 139 # Visit each page in the category. 140 141 for page_in_category in pages_in_category: 142 pagename = page_in_category.page_name 143 144 # Only process each page once. 145 146 if pagename in pagenames: 147 continue 148 else: 149 pagenames.add(pagename) 150 151 pages.append(page_in_category) 152 153 return pages 154 155 def getPagesForSearch(search_pattern, request): 156 157 """ 158 Return result pages for a search employing the given 'search_pattern' and 159 using the given 'request'. 160 """ 161 162 query = search.QueryParser().parse_query(search_pattern) 163 results = search.searchPages(request, query, "page_name") 164 return filterCategoryPages(results, request) 165 166 # WikiDict functions. 167 168 def getWikiDict(pagename, request): 169 170 """ 171 Return the WikiDict provided by the given 'pagename' using the given 172 'request'. 173 """ 174 175 if pagename and Page(request, pagename).exists() and request.user.may.read(pagename): 176 if hasattr(request.dicts, "dict"): 177 return request.dicts.dict(pagename) 178 else: 179 return request.dicts[pagename] 180 else: 181 return None 182 183 # Searching-related functions. 184 185 def getPagesFromResults(result_pages, request): 186 187 "Return genuine pages for the given 'result_pages' using the 'request'." 188 189 return [Page(request, page.page_name) for page in result_pages] 190 191 # Region/section parsing. 192 193 def getRegions(s, include_non_regions=False): 194 195 """ 196 Parse the string 's', returning a list of explicitly declared regions. 197 198 If 'include_non_regions' is specified as a true value, fragments will be 199 included for text between explicitly declared regions. 200 """ 201 202 regions = [] 203 marker = None 204 is_block = True 205 206 # Start a region for exposed text, if appropriate. 207 208 if include_non_regions: 209 regions.append("") 210 211 for match_text in marker_regexp.split(s): 212 213 # Capture section text. 214 215 if is_block: 216 if marker or include_non_regions: 217 regions[-1] += match_text 218 219 # Handle section markers. 220 221 else: 222 223 # Close any open sections, returning to exposed text regions. 224 225 if marker: 226 227 # Add any marker to the current region, regardless of whether it 228 # successfully closes a section. 229 230 regions[-1] += match_text 231 232 if match_text.startswith("}") and len(marker) == len(match_text): 233 marker = None 234 235 # Start a region for exposed text, if appropriate. 236 237 if include_non_regions: 238 regions.append("") 239 240 # Without a current marker, start a new section. 241 242 else: 243 marker = match_text 244 regions.append("") 245 246 # Add the marker to the new region. 247 248 regions[-1] += match_text 249 250 # The match text alternates between text between markers and the markers 251 # themselves. 252 253 is_block = not is_block 254 255 return regions 256 257 def getFragmentsFromRegions(regions): 258 259 """ 260 Return fragments from the given 'regions', each having the form 261 (format, arguments, body text). 262 """ 263 264 fragments = [] 265 266 for region in regions: 267 if region.startswith("{{{"): 268 269 body = region.lstrip("{").rstrip("}").lstrip() 270 271 # Remove any prelude and process metadata. 272 273 if body.startswith("#!"): 274 body = body[2:] 275 276 try: 277 arguments, body = body.split("\n", 1) 278 except ValueError: 279 arguments = body 280 body = "" 281 282 # Get any parser/format declaration. 283 284 if arguments and not arguments[0].isspace(): 285 details = arguments.split(None, 1) 286 if len(details) == 2: 287 format, arguments = details 288 else: 289 format = details[0] 290 arguments = "" 291 else: 292 format = None 293 294 # Get the attributes/arguments for the region. 295 296 attributes = parseAttributes(arguments, False) 297 298 # Add an entry for the format in the attribute dictionary. 299 300 if format and not attributes.has_key(format): 301 attributes[format] = True 302 303 fragments.append((format, attributes, body)) 304 305 else: 306 fragments.append((None, {}, body)) 307 308 else: 309 fragments.append((None, {}, region)) 310 311 return fragments 312 313 def getFragments(s, include_non_regions=False): 314 315 """ 316 Return fragments for the given string 's', each having the form 317 (format, arguments, body text). 318 319 If 'include_non_regions' is specified as a true value, fragments will be 320 included for text between explicitly declared regions. 321 """ 322 323 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 324 325 # Heading extraction. 326 327 def getHeadings(s): 328 329 """ 330 Return tuples of the form (level, title, span) for headings found within the 331 given string 's'. The span is itself a (start, end) tuple indicating the 332 matching region of 's' for a heading declaration. 333 """ 334 335 headings = [] 336 337 for match in heading_regexp.finditer(s): 338 headings.append( 339 (len(match.group("level")), match.group("heading"), match.span()) 340 ) 341 342 return headings 343 344 # Region/section attribute parsing. 345 346 def parseAttributes(s, escape=True): 347 348 """ 349 Parse the section attributes string 's', returning a mapping of names to 350 values. If 'escape' is set to a true value, the attributes will be suitable 351 for use with the formatter API. If 'escape' is set to a false value, the 352 attributes will have any quoting removed. 353 """ 354 355 attrs = {} 356 f = StringIO(s) 357 name = None 358 need_value = False 359 360 for token in shlex(f): 361 362 # Capture the name if needed. 363 364 if name is None: 365 name = escape and wikiutil.escape(token) or strip_token(token) 366 367 # Detect either an equals sign or another name. 368 369 elif not need_value: 370 if token == "=": 371 need_value = True 372 else: 373 attrs[name.lower()] = escape and "true" or True 374 name = wikiutil.escape(token) 375 376 # Otherwise, capture a value. 377 378 else: 379 # Quoting of attributes done similarly to wikiutil.parseAttributes. 380 381 if token: 382 if escape: 383 if token[0] in ("'", '"'): 384 token = wikiutil.escape(token) 385 else: 386 token = '"%s"' % wikiutil.escape(token, 1) 387 else: 388 token = strip_token(token) 389 390 attrs[name.lower()] = token 391 name = None 392 need_value = False 393 394 # Handle any name-only attributes at the end of the collection. 395 396 if name and not need_value: 397 attrs[name.lower()] = escape and "true" or True 398 399 return attrs 400 401 def strip_token(token): 402 403 "Return the given 'token' stripped of quoting." 404 405 if token[0] in ("'", '"') and token[-1] == token[0]: 406 return token[1:-1] 407 else: 408 return token 409 410 # Request-related classes and associated functions. 411 412 class Form: 413 414 """ 415 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 416 environment. 417 """ 418 419 def __init__(self, form): 420 self.form = form 421 422 def has_key(self, name): 423 return not not self.form.getlist(name) 424 425 def get(self, name, default=None): 426 values = self.form.getlist(name) 427 if not values: 428 return default 429 else: 430 return values 431 432 def __getitem__(self, name): 433 return self.form.getlist(name) 434 435 class ActionSupport: 436 437 """ 438 Work around disruptive MoinMoin changes in 1.9, and also provide useful 439 convenience methods. 440 """ 441 442 def get_form(self): 443 return get_form(self.request) 444 445 def _get_selected(self, value, input_value): 446 447 """ 448 Return the HTML attribute text indicating selection of an option (or 449 otherwise) if 'value' matches 'input_value'. 450 """ 451 452 return input_value is not None and value == input_value and 'selected="selected"' or '' 453 454 def _get_selected_for_list(self, value, input_values): 455 456 """ 457 Return the HTML attribute text indicating selection of an option (or 458 otherwise) if 'value' matches one of the 'input_values'. 459 """ 460 461 return value in input_values and 'selected="selected"' or '' 462 463 def get_option_list(self, value, values): 464 465 """ 466 Return a list of HTML element definitions for options describing the 467 given 'values', selecting the option with the specified 'value' if 468 present. 469 """ 470 471 options = [] 472 for available_value in values: 473 selected = self._get_selected(available_value, value) 474 options.append('<option value="%s" %s>%s</option>' % ( 475 escattr(available_value), selected, wikiutil.escape(available_value))) 476 return options 477 478 def _get_input(self, form, name, default=None): 479 480 """ 481 Return the input from 'form' having the given 'name', returning either 482 the input converted to an integer or the given 'default' (optional, None 483 if not specified). 484 """ 485 486 value = form.get(name, [None])[0] 487 if not value: # true if 0 obtained 488 return default 489 else: 490 return int(value) 491 492 def get_form(request): 493 494 "Work around disruptive MoinMoin changes in 1.9." 495 496 if hasattr(request, "values"): 497 return Form(request.values) 498 else: 499 return request.form 500 501 class send_headers_cls: 502 503 """ 504 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 505 1.9.x environment. 506 """ 507 508 def __init__(self, request): 509 self.request = request 510 511 def __call__(self, headers): 512 for header in headers: 513 parts = header.split(":") 514 self.request.headers.add(parts[0], ":".join(parts[1:])) 515 516 def get_send_headers(request): 517 518 "Return a function that can send response headers." 519 520 if hasattr(request, "http_headers"): 521 return request.http_headers 522 elif hasattr(request, "emit_http_headers"): 523 return request.emit_http_headers 524 else: 525 return send_headers_cls(request) 526 527 def escattr(s): 528 return wikiutil.escape(s, 1) 529 530 def getPathInfo(request): 531 if hasattr(request, "getPathinfo"): 532 return request.getPathinfo() 533 else: 534 return request.path 535 536 def getHeader(request, header_name, prefix=None): 537 538 """ 539 Using the 'request', return the value of the header with the given 540 'header_name', using the optional 'prefix' to obtain protocol-specific 541 headers if necessary. 542 543 If no value is found for the given 'header_name', None is returned. 544 """ 545 546 if hasattr(request, "getHeader"): 547 return request.getHeader(header_name) 548 elif hasattr(request, "headers"): 549 return request.headers.get(header_name) 550 else: 551 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 552 553 def writeHeaders(request, mimetype, metadata, status=None): 554 555 """ 556 Using the 'request', write resource headers using the given 'mimetype', 557 based on the given 'metadata'. If the optional 'status' is specified, set 558 the status header to the given value. 559 """ 560 561 send_headers = get_send_headers(request) 562 563 # Define headers. 564 565 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 566 567 # Define the last modified time. 568 # NOTE: Consider using request.httpDate. 569 570 latest_timestamp = metadata.get("last-modified") 571 if latest_timestamp: 572 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 573 574 if status: 575 headers.append("Status: %s" % status) 576 577 send_headers(headers) 578 579 # Content/media type and preferences support. 580 581 class MediaRange: 582 583 "A content/media type value which supports whole categories of data." 584 585 def __init__(self, media_range, accept_parameters=None): 586 self.media_range = media_range 587 self.accept_parameters = accept_parameters or {} 588 589 parts = media_range.split(";") 590 self.media_type = parts[0] 591 self.parameters = getMappingFromParameterStrings(parts[1:]) 592 593 # The media type is divided into category and subcategory. 594 595 parts = self.media_type.split("/") 596 self.category = parts[0] 597 self.subcategory = "/".join(parts[1:]) 598 599 def get_parts(self): 600 601 "Return the category, subcategory parts." 602 603 return self.category, self.subcategory 604 605 def get_specificity(self): 606 607 """ 608 Return the specificity of the media type in terms of the scope of the 609 category and subcategory, and also in terms of any qualifying 610 parameters. 611 """ 612 613 if "*" in self.get_parts(): 614 return -list(self.get_parts()).count("*") 615 else: 616 return len(self.parameters) 617 618 def permits(self, other): 619 620 """ 621 Return whether this media type permits the use of the 'other' media type 622 if suggested as suitable content. 623 """ 624 625 if not isinstance(other, MediaRange): 626 other = MediaRange(other) 627 628 category = categoryPermits(self.category, other.category) 629 subcategory = categoryPermits(self.subcategory, other.subcategory) 630 631 if category and subcategory: 632 if "*" not in (category, subcategory): 633 return not self.parameters or self.parameters == other.parameters 634 else: 635 return True 636 else: 637 return False 638 639 def __eq__(self, other): 640 641 """ 642 Return whether this media type is effectively the same as the 'other' 643 media type. 644 """ 645 646 if not isinstance(other, MediaRange): 647 other = MediaRange(other) 648 649 category = categoryMatches(self.category, other.category) 650 subcategory = categoryMatches(self.subcategory, other.subcategory) 651 652 if category and subcategory: 653 if "*" not in (category, subcategory): 654 return self.parameters == other.parameters or \ 655 not self.parameters or not other.parameters 656 else: 657 return True 658 else: 659 return False 660 661 def __ne__(self, other): 662 return not self.__eq__(other) 663 664 def __hash__(self): 665 return hash(self.media_range) 666 667 def __repr__(self): 668 return "MediaRange(%r)" % self.media_range 669 670 def categoryMatches(this, that): 671 672 """ 673 Return the basis of a match between 'this' and 'that' or False if the given 674 categories do not match. 675 """ 676 677 return (this == "*" or this == that) and this or \ 678 that == "*" and that or False 679 680 def categoryPermits(this, that): 681 682 """ 683 Return whether 'this' category permits 'that' category. Where 'this' is a 684 wildcard ("*"), 'that' should always match. A value of False is returned if 685 the categories do not otherwise match. 686 """ 687 688 return (this == "*" or this == that) and this or False 689 690 def getMappingFromParameterStrings(l): 691 692 """ 693 Return a mapping representing the list of "name=value" strings given by 'l'. 694 """ 695 696 parameters = {} 697 698 for parameter in l: 699 parts = parameter.split("=") 700 name = parts[0].strip() 701 value = "=".join(parts[1:]).strip() 702 parameters[name] = value 703 704 return parameters 705 706 def getContentPreferences(accept): 707 708 """ 709 Return a mapping from media types to parameters for content/media types 710 extracted from the given 'accept' header value. The mapping is returned in 711 the form of a list of (media type, parameters) tuples. 712 713 See: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 714 """ 715 716 preferences = [] 717 718 for field in accept.split(","): 719 720 # The media type with parameters (defined by the "media-range") is 721 # separated from any other parameters (defined as "accept-extension" 722 # parameters) by a quality parameter. 723 724 fparts = accept_regexp.split(field) 725 726 # The first part is always the media type. 727 728 media_type = fparts[0].strip() 729 730 # Any other parts can be interpreted as extension parameters. 731 732 if len(fparts) > 1: 733 fparts = ("q=" + ";q=".join(fparts[1:])).split(";") 734 else: 735 fparts = [] 736 737 # Each field in the preferences can incorporate parameters separated by 738 # semicolon characters. 739 740 parameters = getMappingFromParameterStrings(fparts) 741 media_range = MediaRange(media_type, parameters) 742 preferences.append(media_range) 743 744 return ContentPreferences(preferences) 745 746 class ContentPreferences: 747 748 "A wrapper around content preference information." 749 750 def __init__(self, preferences): 751 self.preferences = preferences 752 753 def __iter__(self): 754 return iter(self.preferences) 755 756 def get_ordered(self, by_quality=0): 757 758 """ 759 Return a list of content/media types in descending order of preference. 760 If 'by_quality' is set to a true value, the "q" value will be used as 761 the primary measure of preference; otherwise, only the specificity will 762 be considered. 763 """ 764 765 ordered = {} 766 767 for media_range in self.preferences: 768 specificity = media_range.get_specificity() 769 770 if by_quality: 771 q = float(media_range.accept_parameters.get("q", "1")) 772 key = q, specificity 773 else: 774 key = specificity 775 776 if not ordered.has_key(key): 777 ordered[key] = [] 778 779 ordered[key].append(media_range) 780 781 # Return the preferences in descending order of quality and specificity. 782 783 keys = ordered.keys() 784 keys.sort(reverse=True) 785 return [ordered[key] for key in keys] 786 787 def get_acceptable_types(self, available): 788 789 """ 790 Return content/media types from those in the 'available' list supported 791 by the known preferences grouped by preference level in descending order 792 of preference. 793 """ 794 795 matches = {} 796 available = set(available[:]) 797 798 for level in self.get_ordered(): 799 for media_range in level: 800 801 # Attempt to match available types. 802 803 found = set() 804 for available_type in available: 805 if media_range.permits(available_type): 806 q = float(media_range.accept_parameters.get("q", "1")) 807 if not matches.has_key(q): 808 matches[q] = [] 809 matches[q].append(available_type) 810 found.add(available_type) 811 812 # Stop looking for matches for matched available types. 813 814 if found: 815 available.difference_update(found) 816 817 # Sort the matches in descending order of quality. 818 819 all_q = matches.keys() 820 821 if all_q: 822 all_q.sort(reverse=True) 823 return [matches[q] for q in all_q] 824 else: 825 return [] 826 827 def get_preferred_types(self, available): 828 829 """ 830 Return the preferred content/media types from those in the 'available' 831 list, given the known preferences. 832 """ 833 834 preferred = self.get_acceptable_types(available) 835 if preferred: 836 return preferred[0] 837 else: 838 return [] 839 840 # Content type parsing. 841 842 def getContentTypeAndEncoding(content_type): 843 844 """ 845 Return a tuple with the content/media type and encoding, extracted from the 846 given 'content_type' header value. 847 """ 848 849 m = encoding_regexp.search(content_type) 850 if m: 851 return m.group("content_type"), m.group("encoding") 852 else: 853 return None, None 854 855 # Page access functions. 856 857 def getPageURL(page): 858 859 "Return the URL of the given 'page'." 860 861 request = page.request 862 return request.getQualifiedURL(page.url(request, relative=0)) 863 864 def getFormat(page): 865 866 "Get the format used on the given 'page'." 867 868 return page.pi["format"] 869 870 def getMetadata(page): 871 872 """ 873 Return a dictionary containing items describing for the given 'page' the 874 page's "created" time, "last-modified" time, "sequence" (or revision number) 875 and the "last-comment" made about the last edit. 876 """ 877 878 request = page.request 879 880 # Get the initial revision of the page. 881 882 revisions = page.getRevList() 883 884 if not revisions: 885 return {} 886 887 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 888 889 # Get the created and last modified times. 890 891 initial_revision = getPageRevision(event_page_initial) 892 893 metadata = {} 894 metadata["created"] = initial_revision["timestamp"] 895 latest_revision = getPageRevision(page) 896 metadata["last-modified"] = latest_revision["timestamp"] 897 metadata["sequence"] = len(revisions) - 1 898 metadata["last-comment"] = latest_revision["comment"] 899 900 return metadata 901 902 def getPageRevision(page): 903 904 "Return the revision details dictionary for the given 'page'." 905 906 # From Page.edit_info... 907 908 if hasattr(page, "editlog_entry"): 909 line = page.editlog_entry() 910 else: 911 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 912 913 # Similar to Page.mtime_usecs behaviour... 914 915 if line: 916 timestamp = line.ed_time_usecs 917 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 918 comment = line.comment 919 else: 920 mtime = 0 921 comment = "" 922 923 # Leave the time zone empty. 924 925 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + (None,)), "comment" : comment} 926 927 # Page parsing and formatting of embedded content. 928 929 def getPageParserClass(request): 930 931 "Using 'request', return a parser class for the current page's format." 932 933 return getParserClass(request, getFormat(request.page)) 934 935 def getParserClass(request, format): 936 937 """ 938 Return a parser class using the 'request' for the given 'format', returning 939 a plain text parser if no parser can be found for the specified 'format'. 940 """ 941 942 try: 943 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 944 except wikiutil.PluginMissingError: 945 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 946 947 def getFormatterClass(request, format): 948 949 """ 950 Return a formatter class using the 'request' for the given output 'format', 951 returning a plain text formatter if no formatter can be found for the 952 specified 'format'. 953 """ 954 955 try: 956 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 957 except wikiutil.PluginMissingError: 958 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 959 960 def formatText(text, request, fmt, parser_cls=None): 961 962 """ 963 Format the given 'text' using the specified 'request' and formatter 'fmt'. 964 Suppress line anchors in the output, and fix lists by indicating that a 965 paragraph has already been started. 966 """ 967 968 if not parser_cls: 969 parser_cls = getPageParserClass(request) 970 parser = parser_cls(text, request, line_anchors=False) 971 972 old_fmt = request.formatter 973 request.formatter = fmt 974 try: 975 return redirectedOutput(request, parser, fmt, inhibit_p=True) 976 finally: 977 request.formatter = old_fmt 978 979 def redirectedOutput(request, parser, fmt, **kw): 980 981 "A fixed version of the request method of the same name." 982 983 buf = StringIO() 984 request.redirect(buf) 985 try: 986 parser.format(fmt, **kw) 987 if hasattr(fmt, "flush"): 988 buf.write(fmt.flush(True)) 989 finally: 990 request.redirect() 991 text = buf.getvalue() 992 buf.close() 993 return text 994 995 # Textual representations. 996 997 def getSimpleWikiText(text): 998 999 """ 1000 Return the plain text representation of the given 'text' which may employ 1001 certain Wiki syntax features, such as those providing verbatim or monospaced 1002 text. 1003 """ 1004 1005 # NOTE: Re-implementing support for verbatim text and linking avoidance. 1006 1007 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 1008 1009 def getEncodedWikiText(text): 1010 1011 "Encode the given 'text' in a verbatim representation." 1012 1013 return "<<Verbatim(%s)>>" % text 1014 1015 def getPrettyTitle(title): 1016 1017 "Return a nicely formatted version of the given 'title'." 1018 1019 return title.replace("_", " ").replace("/", u" ? ") 1020 1021 # User interface functions. 1022 1023 def getParameter(request, name, default=None): 1024 1025 """ 1026 Using the given 'request', return the value of the parameter with the given 1027 'name', returning the optional 'default' (or None) if no value was supplied 1028 in the 'request'. 1029 """ 1030 1031 return get_form(request).get(name, [default])[0] 1032 1033 def getQualifiedParameter(request, prefix, argname, default=None): 1034 1035 """ 1036 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 1037 qualified parameter, returning the optional 'default' (or None) if no value 1038 was supplied in the 'request'. 1039 """ 1040 1041 argname = getQualifiedParameterName(prefix, argname) 1042 return getParameter(request, argname, default) 1043 1044 def getQualifiedParameterName(prefix, argname): 1045 1046 """ 1047 Return the qualified parameter name using the given 'prefix' and 'argname'. 1048 """ 1049 1050 if not prefix: 1051 return argname 1052 else: 1053 return "%s-%s" % (prefix, argname) 1054 1055 # Page-related functions. 1056 1057 def getPrettyPageName(page): 1058 1059 "Return a nicely formatted title/name for the given 'page'." 1060 1061 title = page.split_title(force=1) 1062 return getPrettyTitle(title) 1063 1064 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 1065 1066 """ 1067 Using 'request', return a link to 'page' with the given link 'text' and 1068 optional 'query_string' and 'anchor'. 1069 """ 1070 1071 text = wikiutil.escape(text) 1072 return page.link_to_raw(request, text, query_string, anchor, **kw) 1073 1074 def linkToResource(url, request, text, query_string=None, anchor=None): 1075 1076 """ 1077 Using 'request', return a link to 'url' with the given link 'text' and 1078 optional 'query_string' and 'anchor'. 1079 """ 1080 1081 if anchor: 1082 url += "#%s" % anchor 1083 1084 if query_string: 1085 query_string = wikiutil.makeQueryString(query_string) 1086 url += "?%s" % query_string 1087 1088 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1089 1090 output = [] 1091 output.append(formatter.url(1, url)) 1092 output.append(formatter.text(text)) 1093 output.append(formatter.url(0)) 1094 return "".join(output) 1095 1096 def getFullPageName(parent, title): 1097 1098 """ 1099 Return a full page name from the given 'parent' page (can be empty or None) 1100 and 'title' (a simple page name). 1101 """ 1102 1103 if parent: 1104 return "%s/%s" % (parent.rstrip("/"), title) 1105 else: 1106 return title 1107 1108 # vim: tabstop=4 expandtab shiftwidth=4