1 # -*- coding: iso-8859-1 -*- 2 """ 3 MoinMoin - MoinSupport library (derived from EventAggregatorSupport) 4 5 @copyright: 2008, 2009, 2010, 2011, 2012 by Paul Boddie <paul@boddie.org.uk> 6 @copyright: 2000-2004 Juergen Hermann <jh@web.de>, 7 2005-2008 MoinMoin:ThomasWaldmann. 8 @license: GNU GPL (v2 or later), see COPYING.txt for details. 9 """ 10 11 from DateSupport import * 12 from MoinMoin.Page import Page 13 from MoinMoin import config, search, wikiutil 14 from StringIO import StringIO 15 from shlex import shlex 16 import re 17 import time 18 19 __version__ = "0.2" 20 21 # Content type parsing. 22 23 encoding_regexp_str = ur'(?P<content_type>[^\s;]*)(?:;\s*charset=(?P<encoding>[-A-Za-z0-9]+))?' 24 encoding_regexp = re.compile(encoding_regexp_str) 25 26 # Accept header parsing. 27 28 accept_regexp_str = ur';\s*q=' 29 accept_regexp = re.compile(accept_regexp_str) 30 31 # Extraction of shared fragments. 32 33 marker_regexp_str = r"([{]{3,}|[}]{3,})" 34 marker_regexp = re.compile(marker_regexp_str, re.MULTILINE | re.DOTALL) # {{{... or }}}... 35 36 # Category extraction from pages. 37 38 category_regexp = None 39 40 # Simple content parsing. 41 42 verbatim_regexp = re.compile(ur'(?:' 43 ur'<<Verbatim\((?P<verbatim>.*?)\)>>' 44 ur'|' 45 ur'\[\[Verbatim\((?P<verbatim2>.*?)\)\]\]' 46 ur'|' 47 ur'!(?P<verbatim3>.*?)(\s|$)?' 48 ur'|' 49 ur'`(?P<monospace>.*?)`' 50 ur'|' 51 ur'{{{(?P<preformatted>.*?)}}}' 52 ur')', re.UNICODE) 53 54 # Category discovery. 55 56 def getCategoryPattern(request): 57 global category_regexp 58 59 try: 60 return request.cfg.cache.page_category_regexact 61 except AttributeError: 62 63 # Use regular expression from MoinMoin 1.7.1 otherwise. 64 65 if category_regexp is None: 66 category_regexp = re.compile(u'^%s$' % ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE) 67 return category_regexp 68 69 def getCategories(request): 70 71 """ 72 From the AdvancedSearch macro, return a list of category page names using 73 the given 'request'. 74 """ 75 76 # This will return all pages with "Category" in the title. 77 78 cat_filter = getCategoryPattern(request).search 79 return request.rootpage.getPageList(filter=cat_filter) 80 81 def getCategoryMapping(category_pagenames, request): 82 83 """ 84 For the given 'category_pagenames' return a list of tuples of the form 85 (category name, category page name) using the given 'request'. 86 """ 87 88 cat_pattern = getCategoryPattern(request) 89 mapping = [] 90 for pagename in category_pagenames: 91 name = cat_pattern.match(pagename).group("key") 92 if name != "Category": 93 mapping.append((name, pagename)) 94 mapping.sort() 95 return mapping 96 97 def getCategoryPages(pagename, request): 98 99 """ 100 Return the pages associated with the given category 'pagename' using the 101 'request'. 102 """ 103 104 query = search.QueryParser().parse_query('category:%s' % pagename) 105 results = search.searchPages(request, query, "page_name") 106 107 cat_pattern = getCategoryPattern(request) 108 pages = [] 109 for page in results.hits: 110 if not cat_pattern.match(page.page_name): 111 pages.append(page) 112 return pages 113 114 def getAllCategoryPages(category_names, request): 115 116 """ 117 Return all pages belonging to the categories having the given 118 'category_names', using the given 'request'. 119 """ 120 121 pages = [] 122 pagenames = set() 123 124 for category_name in category_names: 125 126 # Get the pages and page names in the category. 127 128 pages_in_category = getCategoryPages(category_name, request) 129 130 # Visit each page in the category. 131 132 for page_in_category in pages_in_category: 133 pagename = page_in_category.page_name 134 135 # Only process each page once. 136 137 if pagename in pagenames: 138 continue 139 else: 140 pagenames.add(pagename) 141 142 pages.append(page_in_category) 143 144 return pages 145 146 # WikiDict functions. 147 148 def getWikiDict(pagename, request): 149 150 """ 151 Return the WikiDict provided by the given 'pagename' using the given 152 'request'. 153 """ 154 155 if pagename and Page(request, pagename).exists() and request.user.may.read(pagename): 156 if hasattr(request.dicts, "dict"): 157 return request.dicts.dict(pagename) 158 else: 159 return request.dicts[pagename] 160 else: 161 return None 162 163 # Searching-related functions. 164 165 def getPagesFromResults(result_pages, request): 166 167 "Return genuine pages for the given 'result_pages' using the 'request'." 168 169 return [Page(request, page.page_name) for page in result_pages] 170 171 # Region/section parsing. 172 173 def getRegions(s, include_non_regions=False): 174 175 """ 176 Parse the string 's', returning a list of explicitly declared regions. 177 178 If 'include_non_regions' is specified as a true value, fragments will be 179 included for text between explicitly declared regions. 180 """ 181 182 regions = [] 183 marker = None 184 is_block = True 185 186 # Start a region for exposed text, if appropriate. 187 188 if include_non_regions: 189 regions.append("") 190 191 for match_text in marker_regexp.split(s): 192 193 # Capture section text. 194 195 if is_block: 196 if marker or include_non_regions: 197 regions[-1] += match_text 198 199 # Handle section markers. 200 201 elif not is_block: 202 203 # Close any open sections, returning to exposed text regions. 204 205 if marker: 206 if match_text.startswith("}") and len(marker) == len(match_text): 207 marker = None 208 209 # Start a region for exposed text, if appropriate. 210 211 if include_non_regions: 212 regions.append("") 213 214 # Without a current marker, start a section if an appropriate marker 215 # is given. 216 217 elif match_text.startswith("{"): 218 marker = match_text 219 regions.append("") 220 221 # Markers and section text are added to the current region. 222 223 regions[-1] += match_text 224 225 # The match text alternates between text between markers and the markers 226 # themselves. 227 228 is_block = not is_block 229 230 return regions 231 232 def getFragmentsFromRegions(regions): 233 234 """ 235 Return fragments from the given 'regions', each having the form 236 (format, arguments, body text). 237 """ 238 239 fragments = [] 240 241 for region in regions: 242 if region.startswith("{{{"): 243 244 body = region.lstrip("{").rstrip("}").lstrip() 245 246 # Remove any prelude and process metadata. 247 248 if body.startswith("#!"): 249 body = body[2:] 250 251 arguments, body = body.split("\n", 1) 252 253 # Get any parser/format declaration. 254 255 if arguments and not arguments[0].isspace(): 256 details = arguments.split(None, 1) 257 if len(details) == 2: 258 format, arguments = details 259 else: 260 format = details[0] 261 arguments = "" 262 else: 263 format = None 264 265 # Get the attributes/arguments for the region. 266 267 attributes = parseAttributes(arguments, False) 268 269 # Add an entry for the format in the attribute dictionary. 270 271 if format and not attributes.has_key(format): 272 attributes[format] = True 273 274 fragments.append((format, attributes, body)) 275 276 else: 277 fragments.append((None, {}, body)) 278 279 else: 280 fragments.append((None, {}, region)) 281 282 return fragments 283 284 def getFragments(s, include_non_regions=False): 285 286 """ 287 Return fragments for the given string 's', each having the form 288 (format, arguments, body text). 289 290 If 'include_non_regions' is specified as a true value, fragments will be 291 included for text between explicitly declared regions. 292 """ 293 294 return getFragmentsFromRegions(getRegions(s, include_non_regions)) 295 296 # Region/section attribute parsing. 297 298 def parseAttributes(s, escape=True): 299 300 """ 301 Parse the section attributes string 's', returning a mapping of names to 302 values. If 'escape' is set to a true value, the attributes will be suitable 303 for use with the formatter API. If 'escape' is set to a false value, the 304 attributes will have any quoting removed. 305 """ 306 307 attrs = {} 308 f = StringIO(s) 309 name = None 310 need_value = False 311 312 for token in shlex(f): 313 314 # Capture the name if needed. 315 316 if name is None: 317 name = escape and wikiutil.escape(token) or strip_token(token) 318 319 # Detect either an equals sign or another name. 320 321 elif not need_value: 322 if token == "=": 323 need_value = True 324 else: 325 attrs[name.lower()] = escape and "true" or True 326 name = wikiutil.escape(token) 327 328 # Otherwise, capture a value. 329 330 else: 331 # Quoting of attributes done similarly to wikiutil.parseAttributes. 332 333 if token: 334 if escape: 335 if token[0] in ("'", '"'): 336 token = wikiutil.escape(token) 337 else: 338 token = '"%s"' % wikiutil.escape(token, 1) 339 else: 340 token = strip_token(token) 341 342 attrs[name.lower()] = token 343 name = None 344 need_value = False 345 346 # Handle any name-only attributes at the end of the collection. 347 348 if name and not need_value: 349 attrs[name.lower()] = escape and "true" or True 350 351 return attrs 352 353 def strip_token(token): 354 355 "Return the given 'token' stripped of quoting." 356 357 if token[0] in ("'", '"') and token[-1] == token[0]: 358 return token[1:-1] 359 else: 360 return token 361 362 # Request-related classes and associated functions. 363 364 class Form: 365 366 """ 367 A wrapper preserving MoinMoin 1.8.x (and earlier) behaviour in a 1.9.x 368 environment. 369 """ 370 371 def __init__(self, form): 372 self.form = form 373 374 def has_key(self, name): 375 return not not self.form.getlist(name) 376 377 def get(self, name, default=None): 378 values = self.form.getlist(name) 379 if not values: 380 return default 381 else: 382 return values 383 384 def __getitem__(self, name): 385 return self.form.getlist(name) 386 387 class ActionSupport: 388 389 """ 390 Work around disruptive MoinMoin changes in 1.9, and also provide useful 391 convenience methods. 392 """ 393 394 def get_form(self): 395 return get_form(self.request) 396 397 def _get_selected(self, value, input_value): 398 399 """ 400 Return the HTML attribute text indicating selection of an option (or 401 otherwise) if 'value' matches 'input_value'. 402 """ 403 404 return input_value is not None and value == input_value and 'selected="selected"' or '' 405 406 def _get_selected_for_list(self, value, input_values): 407 408 """ 409 Return the HTML attribute text indicating selection of an option (or 410 otherwise) if 'value' matches one of the 'input_values'. 411 """ 412 413 return value in input_values and 'selected="selected"' or '' 414 415 def _get_input(self, form, name, default=None): 416 417 """ 418 Return the input from 'form' having the given 'name', returning either 419 the input converted to an integer or the given 'default' (optional, None 420 if not specified). 421 """ 422 423 value = form.get(name, [None])[0] 424 if not value: # true if 0 obtained 425 return default 426 else: 427 return int(value) 428 429 def get_form(request): 430 431 "Work around disruptive MoinMoin changes in 1.9." 432 433 if hasattr(request, "values"): 434 return Form(request.values) 435 else: 436 return request.form 437 438 class send_headers_cls: 439 440 """ 441 A wrapper to preserve MoinMoin 1.8.x (and earlier) request behaviour in a 442 1.9.x environment. 443 """ 444 445 def __init__(self, request): 446 self.request = request 447 448 def __call__(self, headers): 449 for header in headers: 450 parts = header.split(":") 451 self.request.headers.add(parts[0], ":".join(parts[1:])) 452 453 def get_send_headers(request): 454 455 "Return a function that can send response headers." 456 457 if hasattr(request, "http_headers"): 458 return request.http_headers 459 elif hasattr(request, "emit_http_headers"): 460 return request.emit_http_headers 461 else: 462 return send_headers_cls(request) 463 464 def escattr(s): 465 return wikiutil.escape(s, 1) 466 467 def getPathInfo(request): 468 if hasattr(request, "getPathinfo"): 469 return request.getPathinfo() 470 else: 471 return request.path 472 473 def getHeader(request, header_name, prefix=None): 474 475 """ 476 Using the 'request', return the value of the header with the given 477 'header_name', using the optional 'prefix' to obtain protocol-specific 478 headers if necessary. 479 480 If no value is found for the given 'header_name', None is returned. 481 """ 482 483 if hasattr(request, "getHeader"): 484 return request.getHeader(header_name) 485 elif hasattr(request, "headers"): 486 return request.headers.get(header_name) 487 else: 488 return request.env.get((prefix and prefix + "_" or "") + header_name.upper()) 489 490 def writeHeaders(request, mimetype, metadata, status=None): 491 492 """ 493 Using the 'request', write resource headers using the given 'mimetype', 494 based on the given 'metadata'. If the optional 'status' is specified, set 495 the status header to the given value. 496 """ 497 498 send_headers = get_send_headers(request) 499 500 # Define headers. 501 502 headers = ["Content-Type: %s; charset=%s" % (mimetype, config.charset)] 503 504 # Define the last modified time. 505 # NOTE: Consider using request.httpDate. 506 507 latest_timestamp = metadata.get("last-modified") 508 if latest_timestamp: 509 headers.append("Last-Modified: %s" % latest_timestamp.as_HTTP_datetime_string()) 510 511 if status: 512 headers.append("Status: %s" % status) 513 514 send_headers(headers) 515 516 # Content/media type and preferences support. 517 518 class MediaRange: 519 520 "A content/media type value which supports whole categories of data." 521 522 def __init__(self, media_range, accept_parameters=None): 523 self.media_range = media_range 524 self.accept_parameters = accept_parameters or {} 525 526 parts = media_range.split(";") 527 self.media_type = parts[0] 528 self.parameters = getMappingFromParameterStrings(parts[1:]) 529 530 # The media type is divided into category and subcategory. 531 532 parts = self.media_type.split("/") 533 self.category = parts[0] 534 self.subcategory = "/".join(parts[1:]) 535 536 def get_parts(self): 537 538 "Return the category, subcategory parts." 539 540 return self.category, self.subcategory 541 542 def get_specificity(self): 543 544 """ 545 Return the specificity of the media type in terms of the scope of the 546 category and subcategory, and also in terms of any qualifying 547 parameters. 548 """ 549 550 if "*" in self.get_parts(): 551 return -list(self.get_parts()).count("*") 552 else: 553 return len(self.parameters) 554 555 def permits(self, other): 556 557 """ 558 Return whether this media type permits the use of the 'other' media type 559 if suggested as suitable content. 560 """ 561 562 if not isinstance(other, MediaRange): 563 other = MediaRange(other) 564 565 category = categoryPermits(self.category, other.category) 566 subcategory = categoryPermits(self.subcategory, other.subcategory) 567 568 if category and subcategory: 569 if "*" not in (category, subcategory): 570 return not self.parameters or self.parameters == other.parameters 571 else: 572 return True 573 else: 574 return False 575 576 def __eq__(self, other): 577 578 """ 579 Return whether this media type is effectively the same as the 'other' 580 media type. 581 """ 582 583 if not isinstance(other, MediaRange): 584 other = MediaRange(other) 585 586 category = categoryMatches(self.category, other.category) 587 subcategory = categoryMatches(self.subcategory, other.subcategory) 588 589 if category and subcategory: 590 if "*" not in (category, subcategory): 591 return self.parameters == other.parameters or \ 592 not self.parameters or not other.parameters 593 else: 594 return True 595 else: 596 return False 597 598 def __ne__(self, other): 599 return not self.__eq__(other) 600 601 def __hash__(self): 602 return hash(self.media_range) 603 604 def __repr__(self): 605 return "MediaRange(%r)" % self.media_range 606 607 def categoryMatches(this, that): 608 609 """ 610 Return the basis of a match between 'this' and 'that' or False if the given 611 categories do not match. 612 """ 613 614 return (this == "*" or this == that) and this or \ 615 that == "*" and that or False 616 617 def categoryPermits(this, that): 618 619 """ 620 Return whether 'this' category permits 'that' category. Where 'this' is a 621 wildcard ("*"), 'that' should always match. A value of False is returned if 622 the categories do not otherwise match. 623 """ 624 625 return (this == "*" or this == that) and this or False 626 627 def getMappingFromParameterStrings(l): 628 629 """ 630 Return a mapping representing the list of "name=value" strings given by 'l'. 631 """ 632 633 parameters = {} 634 635 for parameter in l: 636 parts = parameter.split("=") 637 name = parts[0].strip() 638 value = "=".join(parts[1:]).strip() 639 parameters[name] = value 640 641 return parameters 642 643 def getContentPreferences(accept): 644 645 """ 646 Return a mapping from media types to parameters for content/media types 647 extracted from the given 'accept' header value. The mapping is returned in 648 the form of a list of (media type, parameters) tuples. 649 650 See: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1 651 """ 652 653 preferences = [] 654 655 for field in accept.split(","): 656 657 # The media type with parameters (defined by the "media-range") is 658 # separated from any other parameters (defined as "accept-extension" 659 # parameters) by a quality parameter. 660 661 fparts = accept_regexp.split(field) 662 663 # The first part is always the media type. 664 665 media_type = fparts[0].strip() 666 667 # Any other parts can be interpreted as extension parameters. 668 669 if len(fparts) > 1: 670 fparts = ("q=" + ";q=".join(fparts[1:])).split(";") 671 else: 672 fparts = [] 673 674 # Each field in the preferences can incorporate parameters separated by 675 # semicolon characters. 676 677 parameters = getMappingFromParameterStrings(fparts) 678 media_range = MediaRange(media_type, parameters) 679 preferences.append(media_range) 680 681 return ContentPreferences(preferences) 682 683 class ContentPreferences: 684 685 "A wrapper around content preference information." 686 687 def __init__(self, preferences): 688 self.preferences = preferences 689 690 def __iter__(self): 691 return iter(self.preferences) 692 693 def get_ordered(self, by_quality=0): 694 695 """ 696 Return a list of content/media types in descending order of preference. 697 If 'by_quality' is set to a true value, the "q" value will be used as 698 the primary measure of preference; otherwise, only the specificity will 699 be considered. 700 """ 701 702 ordered = {} 703 704 for media_range in self.preferences: 705 specificity = media_range.get_specificity() 706 707 if by_quality: 708 q = float(media_range.accept_parameters.get("q", "1")) 709 key = q, specificity 710 else: 711 key = specificity 712 713 if not ordered.has_key(key): 714 ordered[key] = [] 715 716 ordered[key].append(media_range) 717 718 # Return the preferences in descending order of quality and specificity. 719 720 keys = ordered.keys() 721 keys.sort(reverse=True) 722 return [ordered[key] for key in keys] 723 724 def get_acceptable_types(self, available): 725 726 """ 727 Return content/media types from those in the 'available' list supported 728 by the known preferences grouped by preference level in descending order 729 of preference. 730 """ 731 732 matches = {} 733 available = set(available[:]) 734 735 for level in self.get_ordered(): 736 for media_range in level: 737 738 # Attempt to match available types. 739 740 found = set() 741 for available_type in available: 742 if media_range.permits(available_type): 743 q = float(media_range.accept_parameters.get("q", "1")) 744 if not matches.has_key(q): 745 matches[q] = [] 746 matches[q].append(available_type) 747 found.add(available_type) 748 749 # Stop looking for matches for matched available types. 750 751 if found: 752 available.difference_update(found) 753 754 # Sort the matches in descending order of quality. 755 756 all_q = matches.keys() 757 758 if all_q: 759 all_q.sort(reverse=True) 760 return [matches[q] for q in all_q] 761 else: 762 return [] 763 764 def get_preferred_types(self, available): 765 766 """ 767 Return the preferred content/media types from those in the 'available' 768 list, given the known preferences. 769 """ 770 771 preferred = self.get_acceptable_types(available) 772 if preferred: 773 return preferred[0] 774 else: 775 return [] 776 777 # Content type parsing. 778 779 def getContentTypeAndEncoding(content_type): 780 781 """ 782 Return a tuple with the content/media type and encoding, extracted from the 783 given 'content_type' header value. 784 """ 785 786 m = encoding_regexp.search(content_type) 787 if m: 788 return m.group("content_type"), m.group("encoding") 789 else: 790 return None, None 791 792 # Page access functions. 793 794 def getPageURL(page): 795 796 "Return the URL of the given 'page'." 797 798 request = page.request 799 return request.getQualifiedURL(page.url(request, relative=0)) 800 801 def getFormat(page): 802 803 "Get the format used on the given 'page'." 804 805 return page.pi["format"] 806 807 def getMetadata(page): 808 809 """ 810 Return a dictionary containing items describing for the given 'page' the 811 page's "created" time, "last-modified" time, "sequence" (or revision number) 812 and the "last-comment" made about the last edit. 813 """ 814 815 request = page.request 816 817 # Get the initial revision of the page. 818 819 revisions = page.getRevList() 820 event_page_initial = Page(request, page.page_name, rev=revisions[-1]) 821 822 # Get the created and last modified times. 823 824 initial_revision = getPageRevision(event_page_initial) 825 826 metadata = {} 827 metadata["created"] = initial_revision["timestamp"] 828 latest_revision = getPageRevision(page) 829 metadata["last-modified"] = latest_revision["timestamp"] 830 metadata["sequence"] = len(revisions) - 1 831 metadata["last-comment"] = latest_revision["comment"] 832 833 return metadata 834 835 def getPageRevision(page): 836 837 "Return the revision details dictionary for the given 'page'." 838 839 # From Page.edit_info... 840 841 if hasattr(page, "editlog_entry"): 842 line = page.editlog_entry() 843 else: 844 line = page._last_edited(page.request) # MoinMoin 1.5.x and 1.6.x 845 846 # Similar to Page.mtime_usecs behaviour... 847 848 if line: 849 timestamp = line.ed_time_usecs 850 mtime = wikiutil.version2timestamp(long(timestamp)) # must be long for py 2.2.x 851 comment = line.comment 852 else: 853 mtime = 0 854 comment = "" 855 856 # Leave the time zone empty. 857 858 return {"timestamp" : DateTime(time.gmtime(mtime)[:6] + (None,)), "comment" : comment} 859 860 # Page parsing and formatting of embedded content. 861 862 def getPageParserClass(request): 863 864 "Using 'request', return a parser class for the current page's format." 865 866 return getParserClass(request, getFormat(request.page)) 867 868 def getParserClass(request, format): 869 870 """ 871 Return a parser class using the 'request' for the given 'format', returning 872 a plain text parser if no parser can be found for the specified 'format'. 873 """ 874 875 try: 876 return wikiutil.searchAndImportPlugin(request.cfg, "parser", format or "plain") 877 except wikiutil.PluginMissingError: 878 return wikiutil.searchAndImportPlugin(request.cfg, "parser", "plain") 879 880 def getFormatterClass(request, format): 881 882 """ 883 Return a formatter class using the 'request' for the given output 'format', 884 returning a plain text formatter if no formatter can be found for the 885 specified 'format'. 886 """ 887 888 try: 889 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", format or "plain") 890 except wikiutil.PluginMissingError: 891 return wikiutil.searchAndImportPlugin(request.cfg, "formatter", "plain") 892 893 def formatText(text, request, fmt, parser_cls=None): 894 895 """ 896 Format the given 'text' using the specified 'request' and formatter 'fmt'. 897 Suppress line anchors in the output, and fix lists by indicating that a 898 paragraph has already been started. 899 """ 900 901 if not parser_cls: 902 parser_cls = getPageParserClass(request) 903 parser = parser_cls(text, request, line_anchors=False) 904 905 old_fmt = request.formatter 906 request.formatter = fmt 907 try: 908 return redirectedOutput(request, parser, fmt, inhibit_p=True) 909 finally: 910 request.formatter = old_fmt 911 912 def redirectedOutput(request, parser, fmt, **kw): 913 914 "A fixed version of the request method of the same name." 915 916 buf = StringIO() 917 request.redirect(buf) 918 try: 919 parser.format(fmt, **kw) 920 if hasattr(fmt, "flush"): 921 buf.write(fmt.flush(True)) 922 finally: 923 request.redirect() 924 text = buf.getvalue() 925 buf.close() 926 return text 927 928 # Textual representations. 929 930 def getSimpleWikiText(text): 931 932 """ 933 Return the plain text representation of the given 'text' which may employ 934 certain Wiki syntax features, such as those providing verbatim or monospaced 935 text. 936 """ 937 938 # NOTE: Re-implementing support for verbatim text and linking avoidance. 939 940 return "".join([s for s in verbatim_regexp.split(text) if s is not None]) 941 942 def getEncodedWikiText(text): 943 944 "Encode the given 'text' in a verbatim representation." 945 946 return "<<Verbatim(%s)>>" % text 947 948 def getPrettyTitle(title): 949 950 "Return a nicely formatted version of the given 'title'." 951 952 return title.replace("_", " ").replace("/", u" ? ") 953 954 # User interface functions. 955 956 def getParameter(request, name, default=None): 957 958 """ 959 Using the given 'request', return the value of the parameter with the given 960 'name', returning the optional 'default' (or None) if no value was supplied 961 in the 'request'. 962 """ 963 964 return get_form(request).get(name, [default])[0] 965 966 def getQualifiedParameter(request, prefix, argname, default=None): 967 968 """ 969 Using the given 'request', 'prefix' and 'argname', retrieve the value of the 970 qualified parameter, returning the optional 'default' (or None) if no value 971 was supplied in the 'request'. 972 """ 973 974 argname = getQualifiedParameterName(prefix, argname) 975 return getParameter(request, argname, default) 976 977 def getQualifiedParameterName(prefix, argname): 978 979 """ 980 Return the qualified parameter name using the given 'prefix' and 'argname'. 981 """ 982 983 if prefix is None: 984 return argname 985 else: 986 return "%s-%s" % (prefix, argname) 987 988 # Page-related functions. 989 990 def getPrettyPageName(page): 991 992 "Return a nicely formatted title/name for the given 'page'." 993 994 title = page.split_title(force=1) 995 return getPrettyTitle(title) 996 997 def linkToPage(request, page, text, query_string=None, anchor=None, **kw): 998 999 """ 1000 Using 'request', return a link to 'page' with the given link 'text' and 1001 optional 'query_string' and 'anchor'. 1002 """ 1003 1004 text = wikiutil.escape(text) 1005 return page.link_to_raw(request, text, query_string, anchor, **kw) 1006 1007 def linkToResource(url, request, text, query_string=None, anchor=None): 1008 1009 """ 1010 Using 'request', return a link to 'url' with the given link 'text' and 1011 optional 'query_string' and 'anchor'. 1012 """ 1013 1014 if anchor: 1015 url += "#%s" % anchor 1016 1017 if query_string: 1018 query_string = wikiutil.makeQueryString(query_string) 1019 url += "?%s" % query_string 1020 1021 formatter = request.page and getattr(request.page, "formatter", None) or request.html_formatter 1022 1023 output = [] 1024 output.append(formatter.url(1, url)) 1025 output.append(formatter.text(text)) 1026 output.append(formatter.url(0)) 1027 return "".join(output) 1028 1029 def getFullPageName(parent, title): 1030 1031 """ 1032 Return a full page name from the given 'parent' page (can be empty or None) 1033 and 'title' (a simple page name). 1034 """ 1035 1036 if parent: 1037 return "%s/%s" % (parent.rstrip("/"), title) 1038 else: 1039 return title 1040 1041 # vim: tabstop=4 expandtab shiftwidth=4