wikiextractor/WikiExtractor.py

2647 lines
91 KiB
Python
Raw Normal View History

2016-02-04 18:08:37 +08:00
#!/usr/bin/env python
2015-03-22 20:45:17 +08:00
# -*- coding: utf-8 -*-
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
# =============================================================================
2016-02-04 18:08:37 +08:00
# Version: 2.42 (November 19, 2015)
2015-03-22 20:45:17 +08:00
# Author: Giuseppe Attardi (attardi@di.unipi.it), University of Pisa
#
# Contributors:
2015-10-17 17:34:24 +08:00
# Antonio Fuschetto (fuschett@aol.com)
# Leonardo Souza (lsouza@amtera.com.br)
# Juan Manuel Caicedo (juan@cavorite.com)
# Humberto Pereira (begini@gmail.com)
# Siegfried-A. Gevatter (siegfried@gevatter.com)
# Pedro Assis (pedroh2306@gmail.com)
# Wim Muskee (wimmuskee@gmail.com)
# Radics Geza (radicsge@gmail.com)
2015-03-22 20:45:17 +08:00
#
# =============================================================================
2016-02-04 18:08:37 +08:00
# Copyright (c) 2011-2016. Giuseppe Attardi (attardi@di.unipi.it).
2015-03-22 20:45:17 +08:00
# =============================================================================
# This file is part of Tanl.
#
# Tanl is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License, version 3,
# as published by the Free Software Foundation.
#
# Tanl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
"""Wikipedia Extractor:
2015-04-12 16:21:35 +08:00
Extracts and cleans text from a Wikipedia database dump and stores output in a
2015-03-22 20:45:17 +08:00
number of files of similar size in a given directory.
2015-04-12 16:21:35 +08:00
Each file will contain several documents in the format:
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
<doc id="" url="" title="">
2015-03-22 20:45:17 +08:00
...
</doc>
This version performs template expansion by preprocesssng the whole dump and
2015-04-12 16:21:35 +08:00
collecting template definitions.
2015-03-22 20:45:17 +08:00
"""
2015-10-17 17:34:24 +08:00
import argparse
2015-04-12 16:21:35 +08:00
import bz2
2015-03-22 20:45:17 +08:00
import codecs
import fileinput
2015-10-17 17:34:24 +08:00
import logging
import os.path
import re # TODO use regex when it will be standard
import sys
import time
import urllib
from cStringIO import StringIO
from htmlentitydefs import name2codepoint
from itertools import izip, izip_longest
from multiprocessing import Queue, Process, cpu_count
from timeit import default_timer
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
# ===========================================================================
2015-03-22 20:45:17 +08:00
2015-04-12 16:21:35 +08:00
# Program version
2015-11-20 07:04:59 +08:00
version = '2.41'
2015-04-12 16:21:35 +08:00
2015-10-17 17:34:24 +08:00
## PARAMS ####################################################################
2015-03-22 20:45:17 +08:00
##
# Defined in <siteinfo>
# We include as default Template, when loading external template file.
knownNamespaces = set(['Template'])
##
# The namespace used for template definitions
2015-08-31 03:17:26 +08:00
# It is the name associated with namespace key=10 in the siteinfo header.
templateNamespace = ''
2015-11-20 07:04:59 +08:00
templatePrefix = ''
2015-03-22 20:45:17 +08:00
2015-09-29 21:31:19 +08:00
##
# The namespace used for module definitions
# It is the name associated with namespace key=828 in the siteinfo header.
moduleNamespace = ''
2015-03-22 20:45:17 +08:00
##
# Recognize only these namespaces
# w: Internal links to the Wikipedia
# wiktionary: Wiki dictionary
# wikt: shortcut for Wiktionary
#
2015-04-17 03:00:35 +08:00
acceptedNamespaces = ['w', 'wiktionary', 'wikt']
2015-03-22 20:45:17 +08:00
##
# Drop these elements from article text
#
2015-04-17 03:00:35 +08:00
discardElements = [
2015-10-17 17:34:24 +08:00
'gallery', 'timeline', 'noinclude', 'pre',
'table', 'tr', 'td', 'th', 'caption', 'div',
'form', 'input', 'select', 'option', 'textarea',
'ul', 'li', 'ol', 'dl', 'dt', 'dd', 'menu', 'dir',
'ref', 'references', 'img', 'imagemap', 'source', 'small'
]
2015-03-22 20:45:17 +08:00
2015-04-12 16:21:35 +08:00
# This is obtained from <siteinfo>
2015-10-17 17:34:24 +08:00
urlbase = ''
2015-04-12 16:21:35 +08:00
2015-10-17 17:34:24 +08:00
def get_url(uid):
return "%s?curid=%s" % (urlbase, uid)
# =========================================================================
2015-03-22 20:45:17 +08:00
#
# MediaWiki Markup Grammar
2015-04-11 18:29:31 +08:00
# https://www.mediawiki.org/wiki/Preprocessor_ABNF
# xml-char = %x9 / %xA / %xD / %x20-D7FF / %xE000-FFFD / %x10000-10FFFF
# sptab = SP / HTAB
2015-04-15 20:30:55 +08:00
2015-04-11 18:29:31 +08:00
# ; everything except ">" (%x3E)
# attr-char = %x9 / %xA / %xD / %x20-3D / %x3F-D7FF / %xE000-FFFD / %x10000-10FFFF
2015-04-15 20:30:55 +08:00
2015-04-11 18:29:31 +08:00
# literal = *xml-char
# title = wikitext-L3
# part-name = wikitext-L3
# part-value = wikitext-L3
# part = ( part-name "=" part-value ) / ( part-value )
# parts = [ title *( "|" part ) ]
# tplarg = "{{{" parts "}}}"
# template = "{{" parts "}}"
# link = "[[" wikitext-L3 "]]"
2015-04-15 20:30:55 +08:00
2015-04-11 18:29:31 +08:00
# comment = "<!--" literal "-->"
# unclosed-comment = "<!--" literal END
# ; the + in the line-eating-comment rule was absent between MW 1.12 and MW 1.22
# line-eating-comment = LF LINE-START *SP +( comment *SP ) LINE-END
2015-04-15 20:30:55 +08:00
2015-04-11 18:29:31 +08:00
# attr = *attr-char
# nowiki-element = "<nowiki" attr ( "/>" / ( ">" literal ( "</nowiki>" / END ) ) )
# wikitext-L2 = heading / wikitext-L3 / *wikitext-L2
2015-04-15 20:30:55 +08:00
# wikitext-L3 = literal / template / tplarg / link / comment /
# line-eating-comment / unclosed-comment / xmlish-element /
2015-04-11 18:29:31 +08:00
# *wikitext-L3
2015-10-17 17:34:24 +08:00
# ------------------------------------------------------------------------------
2015-03-22 20:45:17 +08:00
2015-10-17 17:48:16 +08:00
selfClosingTags = ('br', 'hr', 'nobr', 'ref', 'references', 'nowiki')
2015-03-22 20:45:17 +08:00
# These tags are dropped, keeping their content.
# handle 'a' separately, depending on keepLinks
2015-10-17 17:48:16 +08:00
ignoredTags = (
2015-04-19 19:17:48 +08:00
'abbr', 'b', 'big', 'blockquote', 'center', 'cite', 'div', 'em',
2015-04-11 21:33:20 +08:00
'font', 'h1', 'h2', 'h3', 'h4', 'hiero', 'i', 'kbd', 'nowiki',
2015-04-15 00:09:46 +08:00
'p', 'plaintext', 's', 'span', 'strike', 'strong',
2015-04-11 21:33:20 +08:00
'sub', 'sup', 'tt', 'u', 'var'
2015-10-17 17:48:16 +08:00
)
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
placeholder_tags = {'math': 'formula', 'code': 'codice'}
2015-03-22 20:45:17 +08:00
def normalizeTitle(title):
"""Normalize title"""
# remove leading/trailing whitespace and underscores
title = title.strip(' _')
# replace sequences of whitespace and underscore chars with a single space
title = re.sub(r'[\s_]+', ' ', title)
m = re.match(r'([^:]*):(\s*)(\S(?:.*))', title)
if m:
prefix = m.group(1)
if m.group(2):
optionalWhitespace = ' '
else:
optionalWhitespace = ''
rest = m.group(3)
ns = normalizeNamespace(prefix)
if ns in knownNamespaces:
# If the prefix designates a known namespace, then it might be
# followed by optional whitespace that should be removed to get
# the canonical page name
# (e.g., "Category: Births" should become "Category:Births").
title = ns + ":" + ucfirst(rest)
else:
# No namespace, just capitalize first letter.
# If the part before the colon is not a known namespace, then we
# must not remove the space after the colon (if any), e.g.,
# "3001: The_Final_Odyssey" != "3001:The_Final_Odyssey".
# However, to get the canonical page name we must contract multiple
# spaces into one, because
# "3001: The_Final_Odyssey" != "3001: The_Final_Odyssey".
title = ucfirst(prefix) + ":" + optionalWhitespace + ucfirst(rest)
else:
# no namespace, just capitalize first letter
title = ucfirst(title)
return title
def unescape(text):
2015-10-17 17:34:24 +08:00
"""
Removes HTML or XML character references and entities from a text string.
:param text The HTML (or XML) source text.
:return The plain text, as a Unicode string, if necessary.
"""
2015-03-22 20:45:17 +08:00
def fixup(m):
text = m.group(0)
code = m.group(1)
try:
if text[1] == "#": # character reference
if text[2] == "x":
return unichr(int(code[1:], 16))
else:
return unichr(int(code))
2015-10-17 17:34:24 +08:00
else: # named entity
2015-03-22 20:45:17 +08:00
return unichr(name2codepoint[code])
except:
2015-10-17 17:34:24 +08:00
return text # leave as is
2015-03-22 20:45:17 +08:00
return re.sub("&#?(\w+);", fixup, text)
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
# Match HTML comments
# The buggy template {{Template:T}} has a comment terminating with just "->"
comment = re.compile(r'<!--.*?-->', re.DOTALL)
# Match ignored tags
ignored_tag_patterns = []
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
def ignoreTag(tag):
2015-10-17 17:34:24 +08:00
left = re.compile(r'<%s\b.*?>' % tag, re.IGNORECASE | re.DOTALL) # both <ref> and <reference>
2015-03-22 20:45:17 +08:00
right = re.compile(r'</\s*%s>' % tag, re.IGNORECASE)
ignored_tag_patterns.append((left, right))
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
for tag in ignoredTags:
ignoreTag(tag)
# Match selfClosing HTML tags
2015-04-11 21:33:20 +08:00
selfClosing_tag_patterns = [
2015-04-17 03:00:35 +08:00
re.compile(r'<\s*%s\b[^>]*/\s*>' % tag, re.DOTALL | re.IGNORECASE) for tag in selfClosingTags
2015-10-17 17:34:24 +08:00
]
2015-03-22 20:45:17 +08:00
# Match HTML placeholder tags
2015-04-11 21:33:20 +08:00
placeholder_tag_patterns = [
(re.compile(r'<\s*%s(\s*| [^>]+?)>.*?<\s*/\s*%s\s*>' % (tag, tag), re.DOTALL | re.IGNORECASE),
repl) for tag, repl in placeholder_tags.items()
2015-10-17 17:34:24 +08:00
]
2015-03-22 20:45:17 +08:00
# Match preformatted lines
2015-04-15 20:30:55 +08:00
preformatted = re.compile(r'^ .*?$')
2015-03-22 20:45:17 +08:00
# Match external links (space separates second optional parameter)
externalLink = re.compile(r'\[\w+[^ ]*? (.*?)]')
externalLinkNoAnchor = re.compile(r'\[\w+[&\]]*\]')
# Matches bold/italic
2015-04-11 21:33:20 +08:00
bold_italic = re.compile(r"'''''(.*?)'''''")
bold = re.compile(r"'''(.*?)'''")
2015-03-22 20:45:17 +08:00
italic_quote = re.compile(r"''\"([^\"]*?)\"''")
2015-04-11 21:33:20 +08:00
italic = re.compile(r"''(.*?)''")
2015-03-22 20:45:17 +08:00
quote_quote = re.compile(r'""([^"]*?)""')
# Matches space
spaces = re.compile(r' {2,}')
# Matches dots
dots = re.compile(r'\.{4,}')
2015-10-17 17:34:24 +08:00
# ======================================================================
2015-03-22 20:45:17 +08:00
2015-04-21 03:19:05 +08:00
class Template(list):
"""
A Template is a list of TemplateText or TemplateArgs
"""
@classmethod
def parse(cls, body):
tpl = Template()
# we must handle nesting, s.a.
# {{{1|{{PAGENAME}}}
# {{{italics|{{{italic|}}}
# {{#if:{{{{{#if:{{{nominee|}}}|nominee|candidate}}|}}}|
#
start = 0
2015-10-17 17:34:24 +08:00
for s, e in findMatchingBraces(body, 3):
2015-04-21 03:19:05 +08:00
tpl.append(TemplateText(body[start:s]))
2015-10-17 17:34:24 +08:00
tpl.append(TemplateArg(body[s + 3:e - 3]))
2015-04-21 03:19:05 +08:00
start = e
2015-10-17 17:34:24 +08:00
tpl.append(TemplateText(body[start:])) # leftover
2015-04-21 03:19:05 +08:00
return tpl
def subst(self, params, extractor, depth=0):
# We perform parameter substitutions recursively.
# We also limit the maximum number of iterations to avoid too long or
# even endless loops (in case of malformed input).
# :see: http://meta.wikimedia.org/wiki/Help:Expansion#Distinction_between_variables.2C_parser_functions.2C_and_templates
#
# Parameter values are assigned to parameters in two (?) passes.
# Therefore a parameter name in a template can depend on the value of
# another parameter of the same template, regardless of the order in
# which they are specified in the template call, for example, using
# Template:ppp containing "{{{{{{p}}}}}}", {{ppp|p=q|q=r}} and even
# {{ppp|q=r|p=q}} gives r, but using Template:tvvv containing
# "{{{{{{{{{p}}}}}}}}}", {{tvvv|p=q|q=r|r=s}} gives s.
2015-10-17 17:34:24 +08:00
# logging.debug('subst tpl (%d, %d) %s', len(extractor.frame), depth, self)
2015-04-21 03:19:05 +08:00
if depth > extractor.maxParameterRecursionLevels:
extractor.recursion_exceeded_3_errs += 1
2015-04-21 03:19:05 +08:00
return ''
2015-06-03 06:06:35 +08:00
return ''.join([tpl.subst(params, extractor, depth) for tpl in self])
2015-04-21 03:19:05 +08:00
def __str__(self):
2015-04-22 18:42:42 +08:00
return ''.join([unicode(x) for x in self])
2015-04-21 03:19:05 +08:00
2015-10-17 17:34:24 +08:00
2015-04-21 03:19:05 +08:00
class TemplateText(unicode):
"""Fixed text of template"""
def subst(self, params, extractor, depth):
return self
2015-10-17 17:34:24 +08:00
2015-04-21 03:19:05 +08:00
class TemplateArg(object):
"""
parameter to a template.
Has a name and a default value, both of which are Templates.
"""
2015-10-17 17:34:24 +08:00
2015-04-21 03:19:05 +08:00
def __init__(self, parameter):
"""
:param parameter: the parts of a tplarg.
"""
# the parameter name itself might contain templates, e.g.:
2015-04-26 14:47:53 +08:00
# appointe{{#if:{{{appointer14|}}}|r|d}}14|
# 4|{{{{{subst|}}}CURRENTYEAR}}
2015-04-21 03:19:05 +08:00
# any parts in a tplarg after the first (the parameter default) are
# ignored, and an equals sign in the first part is treated as plain text.
2015-10-17 17:34:24 +08:00
# logging.debug('TemplateArg %s', parameter)
2015-04-21 03:19:05 +08:00
2015-04-26 14:47:53 +08:00
parts = splitParts(parameter)
2015-04-21 03:19:05 +08:00
self.name = Template.parse(parts[0])
if len(parts) > 1:
# This parameter has a default value
self.default = Template.parse(parts[1])
else:
self.default = None
def __str__(self):
if self.default:
return '{{{%s|%s}}}' % (self.name, self.default)
else:
return '{{{%s}}}' % self.name
def subst(self, params, extractor, depth):
"""
Substitute value for this argument from dict :param params:
Use :param extractor: to evaluate expressions for name and default.
Limit substitution to the maximun :param depth:.
"""
# the parameter name itself might contain templates, e.g.:
# appointe{{#if:{{{appointer14|}}}|r|d}}14|
2015-10-17 17:34:24 +08:00
paramName = self.name.subst(params, extractor, depth + 1)
2015-04-21 03:19:05 +08:00
paramName = extractor.expandTemplates(paramName)
2015-04-26 14:47:53 +08:00
res = ''
2015-04-21 03:19:05 +08:00
if paramName in params:
2015-04-26 14:47:53 +08:00
res = params[paramName] # use parameter value specified in template invocation
2015-10-17 17:34:24 +08:00
elif self.default: # use the default value
defaultValue = self.default.subst(params, extractor, depth + 1)
res = extractor.expandTemplates(defaultValue)
# logging.debug('subst arg %d %s -> %s' % (depth, paramName, res))
2015-04-26 14:47:53 +08:00
return res
2015-04-21 03:19:05 +08:00
2015-10-17 17:34:24 +08:00
# ======================================================================
2015-04-21 03:19:05 +08:00
2015-04-17 03:00:35 +08:00
substWords = 'subst:|safesubst:'
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
2015-04-17 03:00:35 +08:00
class Extractor(object):
"""
An extraction task on a article.
2015-03-22 20:45:17 +08:00
"""
2015-04-20 12:19:32 +08:00
##
# Whether to preserve links in output
keepLinks = False
##
2015-11-20 07:34:23 +08:00
# Whether to preserve section titles
keepSections = True
2015-04-20 12:19:32 +08:00
##
# Whether to output HTML instead of text
toHTML = False
2015-03-22 20:45:17 +08:00
2015-04-17 03:00:35 +08:00
def __init__(self, id, title, page):
"""
:param page: a list of lines.
"""
self.id = id
self.title = title
self.page = page
self.magicWords = MagicWords()
self.frame = []
2015-10-17 17:34:24 +08:00
self.recursion_exceeded_1_errs = 0 # template recursion within expandTemplates()
self.recursion_exceeded_2_errs = 0 # template recursion within expandTemplate()
self.recursion_exceeded_3_errs = 0 # parameter recursion
2015-09-15 00:05:36 +08:00
self.template_title_errs = 0
2015-09-15 00:05:36 +08:00
def extract(self, out):
"""
:param out: a memory file.
"""
logging.debug("%s\t%s", self.id, self.title)
2015-04-17 03:00:35 +08:00
text = ''.join(self.page)
url = get_url(self.id)
header = '<doc id="%s" url="%s" title="%s">\n' % (self.id, url, self.title)
# Separate header from text with a newline.
header += self.title + '\n\n'
header = header.encode('utf-8')
self.magicWords['pagename'] = self.title
2015-04-19 06:18:48 +08:00
self.magicWords['fullpagename'] = self.title
2015-04-26 14:47:53 +08:00
self.magicWords['currentyear'] = time.strftime('%Y')
self.magicWords['currentmonth'] = time.strftime('%m')
self.magicWords['currentday'] = time.strftime('%d')
self.magicWords['currenthour'] = time.strftime('%H')
self.magicWords['currenttime'] = time.strftime('%H:%M:%S')
2015-04-17 03:00:35 +08:00
text = clean(self, text)
footer = "\n</doc>\n"
out.write(header)
for line in compact(text):
out.write(line.encode('utf-8'))
out.write('\n')
out.write(footer)
2015-09-15 00:05:36 +08:00
errs = (self.template_title_errs,
self.recursion_exceeded_1_errs,
self.recursion_exceeded_2_errs,
2015-09-15 00:05:36 +08:00
self.recursion_exceeded_3_errs)
if any(errs):
2015-09-15 02:24:10 +08:00
logging.warn("Template errors in article '%s' (%s): title(%d) recursion(%d, %d, %d)",
self.title, self.id, *errs)
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
# ----------------------------------------------------------------------
2015-04-17 03:00:35 +08:00
# Expand templates
2015-03-22 20:45:17 +08:00
2015-04-17 03:00:35 +08:00
maxTemplateRecursionLevels = 30
maxParameterRecursionLevels = 10
# check for template beginning
reOpen = re.compile('(?<!{){{(?!{)', re.DOTALL)
def expandTemplates(self, wikitext):
"""
:param wikitext: the text to be expanded.
Templates are frequently nested. Occasionally, parsing mistakes may
cause template insertion to enter an infinite loop, for instance when
trying to instantiate Template:Country
{{country_{{{1}}}|{{{2}}}|{{{2}}}|size={{{size|}}}|name={{{name|}}}}}
which is repeatedly trying to insert template 'country_', which is
again resolved to Template:Country. The straightforward solution of
keeping track of templates that were already inserted for the current
article would not work, because the same template may legally be used
more than once, with different parameters in different parts of the
article. Therefore, we limit the number of iterations of nested
template inclusion.
"""
2015-04-19 06:18:48 +08:00
# Test template expansion at:
# https://en.wikipedia.org/wiki/Special:ExpandTemplates
2015-04-17 03:00:35 +08:00
res = ''
if len(self.frame) >= self.maxTemplateRecursionLevels:
self.recursion_exceeded_1_errs += 1
2015-04-17 03:00:35 +08:00
return res
2015-10-17 17:34:24 +08:00
# logging.debug('<expandTemplates ' + str(len(self.frame)))
2015-04-17 03:00:35 +08:00
cur = 0
# look for matching {{...}}
2015-10-17 17:34:24 +08:00
for s, e in findMatchingBraces(wikitext, 2):
res += wikitext[cur:s] + self.expandTemplate(wikitext[s + 2:e - 2])
2015-04-17 03:00:35 +08:00
cur = e
# leftover
res += wikitext[cur:]
2015-10-17 17:34:24 +08:00
# logging.debug(' expandTemplates> %d %s', len(self.frame), res)
2015-04-15 00:09:46 +08:00
return res
2015-04-11 09:43:32 +08:00
2015-04-17 03:00:35 +08:00
def templateParams(self, parameters):
"""
Build a dictionary with positional or name key to expanded parameters.
:param parameters: the parts[1:] of a template, i.e. all except the title.
"""
templateParams = {}
if not parameters:
return templateParams
2015-09-15 00:05:36 +08:00
logging.debug('<templateParams: %s', '|'.join(parameters))
2015-04-17 03:00:35 +08:00
# Parameters can be either named or unnamed. In the latter case, their
# name is defined by their ordinal position (1, 2, 3, ...).
unnamedParameterCounter = 0
# It's legal for unnamed parameters to be skipped, in which case they
# will get default values (if available) during actual instantiation.
# That is {{template_name|a||c}} means parameter 1 gets
# the value 'a', parameter 2 value is not defined, and parameter 3 gets
# the value 'c'. This case is correctly handled by function 'split',
# and does not require any special handling.
for param in parameters:
# Spaces before or after a parameter value are normally ignored,
# UNLESS the parameter contains a link (to prevent possible gluing
# the link to the following text after template substitution)
# Parameter values may contain "=" symbols, hence the parameter
# name extends up to the first such symbol.
# It is legal for a parameter to be specified several times, in
# which case the last assignment takes precedence. Example:
# "{{t|a|b|c|2=B}}" is equivalent to "{{t|a|B|c}}".
# Therefore, we don't check if the parameter has been assigned a
# value before, because anyway the last assignment should override
# any previous ones.
# FIXME: Don't use DOTALL here since parameters may be tags with
# attributes, e.g. <div class="templatequotecite">
# Parameters may span several lines, like:
# {{Reflist|colwidth=30em|refs=
# &lt;ref name=&quot;Goode&quot;&gt;Title&lt;/ref&gt;
2015-04-21 03:19:05 +08:00
# The '=' might occurr within an HTML attribute:
# "&lt;ref name=value"
# but we stop at first.
2015-09-15 00:05:36 +08:00
m = re.match(' *([^=]*?) *=(.*)', param, re.DOTALL)
2015-04-17 03:00:35 +08:00
if m:
# This is a named parameter. This case also handles parameter
# assignments like "2=xxx", where the number of an unnamed
# parameter ("2") is specified explicitly - this is handled
# transparently.
parameterName = m.group(1).strip()
parameterValue = m.group(2)
2015-10-17 17:34:24 +08:00
if ']]' not in parameterValue: # if the value does not contain a link, trim whitespace
2015-04-17 03:00:35 +08:00
parameterValue = parameterValue.strip()
templateParams[parameterName] = parameterValue
else:
# this is an unnamed parameter
unnamedParameterCounter += 1
2015-04-15 00:09:46 +08:00
2015-10-17 17:34:24 +08:00
if ']]' not in param: # if the value does not contain a link, trim whitespace
2015-04-17 03:00:35 +08:00
param = param.strip()
templateParams[str(unnamedParameterCounter)] = param
2015-09-15 00:05:36 +08:00
logging.debug(' templateParams> %s', '|'.join(templateParams.values()))
2015-04-17 03:00:35 +08:00
return templateParams
def expandTemplate(self, body):
"""Expands template invocation.
:param body: the parts of a template.
:see http://meta.wikimedia.org/wiki/Help:Expansion for an explanation
of the process.
See in particular: Expansion of names and values
http://meta.wikimedia.org/wiki/Help:Expansion#Expansion_of_names_and_values
For most parser functions all names and values are expanded,
regardless of what is relevant for the result. The branching functions
(#if, #ifeq, #iferror, #ifexist, #ifexpr, #switch) are exceptions.
All names in a template call are expanded, and the titles of the
tplargs in the template body, after which it is determined which
values must be expanded, and for which tplargs in the template body
the first part (default).
In the case of a tplarg, any parts beyond the first are never
expanded. The possible name and the value of the first part is
expanded if the title does not match a name in the template call.
:see code for braceSubstitution at
https://doc.wikimedia.org/mediawiki-core/master/php/html/Parser_8php_source.html#3397:
"""
# template = "{{" parts "}}"
# Templates and tplargs are decomposed in the same way, with pipes as
# separator, even though eventually any parts in a tplarg after the first
# (the parameter default) are ignored, and an equals sign in the first
# part is treated as plain text.
# Pipes inside inner templates and tplargs, or inside double rectangular
# brackets within the template or tplargs are not taken into account in
# this decomposition.
# The first part is called title, the other parts are simply called parts.
# If a part has one or more equals signs in it, the first equals sign
# determines the division into name = value. Equals signs inside inner
# templates and tplargs, or inside double rectangular brackets within the
# part are not taken into account in this decomposition. Parts without
# equals sign are indexed 1, 2, .., given as attribute in the <name> tag.
if len(self.frame) >= self.maxTemplateRecursionLevels:
self.recursion_exceeded_2_errs += 1
2015-10-17 17:34:24 +08:00
# logging.debug(' INVOCATION> %d %s', len(self.frame), body)
2015-04-17 03:00:35 +08:00
return ''
2015-04-20 12:19:32 +08:00
logging.debug('INVOCATION %d %s', len(self.frame), body)
2015-04-17 03:00:35 +08:00
2015-04-26 14:47:53 +08:00
parts = splitParts(body)
2015-04-17 03:00:35 +08:00
# title is the portion before the first |
2015-04-20 12:19:32 +08:00
logging.debug('TITLE %s', parts[0].strip())
2015-04-17 03:00:35 +08:00
title = self.expandTemplates(parts[0].strip())
# SUBST
2015-04-19 06:18:48 +08:00
# Apply the template tag to parameters without
# substituting into them, e.g.
# {{subst:t|a{{{p|q}}}b}} gives the wikitext start-a{{{p|q}}}b-end
# @see https://www.mediawiki.org/wiki/Manual:Substitution#Partial_substitution
subst = False
if re.match(substWords, title, re.IGNORECASE):
title = re.sub(substWords, '', title, 1, re.IGNORECASE)
subst = True
2015-04-17 03:00:35 +08:00
if title.lower() in self.magicWords.values:
return self.magicWords[title.lower()]
# Parser functions
# The first argument is everything after the first colon.
2015-04-19 06:18:48 +08:00
# It has been evaluated above.
2015-04-17 03:00:35 +08:00
colon = title.find(':')
if colon > 1:
funct = title[:colon]
2015-10-17 17:34:24 +08:00
parts[0] = title[colon + 1:].strip() # side-effect (parts[0] not used later)
2015-04-17 03:00:35 +08:00
# arguments after first are not evaluated
ret = callParserFunction(funct, parts, self.frame)
return self.expandTemplates(ret)
title = fullyQualifiedTemplateTitle(title)
if not title:
2015-09-15 00:05:36 +08:00
self.template_title_errs += 1
return ''
2015-04-17 03:00:35 +08:00
redirected = redirects.get(title)
if redirected:
title = redirected
2015-04-21 03:19:05 +08:00
# get the template
if title in templateCache:
template = templateCache[title]
elif title in templates:
template = Template.parse(templates[title])
# add it to cache
templateCache[title] = template
del templates[title]
else:
2015-04-17 03:00:35 +08:00
# The page being included could not be identified
return ''
2015-10-17 17:34:24 +08:00
# logging.debug('TEMPLATE %s: %s', title, template)
2015-04-17 03:00:35 +08:00
# tplarg = "{{{" parts "}}}"
# parts = [ title *( "|" part ) ]
# part = ( part-name "=" part-value ) / ( part-value )
# part-name = wikitext-L3
# part-value = wikitext-L3
# wikitext-L3 = literal / template / tplarg / link / comment /
# line-eating-comment / unclosed-comment /
2015-10-17 17:34:24 +08:00
# xmlish-element / *wikitext-L3
2015-04-17 03:00:35 +08:00
# A tplarg may contain other parameters as well as templates, e.g.:
# {{{text|{{{quote|{{{1|{{error|Error: No text given}}}}}}}}}}}
# hence no simple RE like this would work:
# '{{{((?:(?!{{{).)*?)}}}'
# We must use full CF parsing.
# the parameter name itself might be computed, e.g.:
# {{{appointe{{#if:{{{appointer14|}}}|r|d}}14|}}}
# Because of the multiple uses of double-brace and triple-brace
# syntax, expressions can sometimes be ambiguous.
# Precedence rules specifed here:
# http://www.mediawiki.org/wiki/Preprocessor_ABNF#Ideal_precedence
# resolve ambiguities like this:
# {{{{ }}}} -> { {{{ }}} }
# {{{{{ }}}}} -> {{ {{{ }}} }}
#
# :see: https://en.wikipedia.org/wiki/Help:Template#Handling_parameters
2015-04-19 06:18:48 +08:00
params = parts[1:]
if not subst:
# Evaluate parameters, since they may contain templates, including
# the symbol "=".
# {{#ifexpr: {{{1}}} = 1 }}
params = [self.expandTemplates(p) for p in params]
2015-04-17 03:00:35 +08:00
# build a dict of name-values for the parameter values
2015-04-19 06:18:48 +08:00
params = self.templateParams(params)
2015-04-17 03:00:35 +08:00
# Perform parameter substitution
2015-06-03 06:01:45 +08:00
# extend frame before subst, since there may be recursion in default
# parameter value, e.g. {{OTRS|celebrative|date=April 2015}} in article
# 21637542 in enwiki.
self.frame.append((title, params))
2015-04-21 03:19:05 +08:00
instantiated = template.subst(params, self)
2015-10-17 17:34:24 +08:00
# logging.debug('instantiated %d %s', len(self.frame), instantiated)
2015-04-17 03:00:35 +08:00
value = self.expandTemplates(instantiated)
self.frame.pop()
2015-10-17 17:34:24 +08:00
# logging.debug(' INVOCATION> %s %d %s', title, len(self.frame), value)
2015-04-17 03:00:35 +08:00
return value
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
# ----------------------------------------------------------------------
# parameter handling
2015-10-17 17:34:24 +08:00
2015-04-26 14:47:53 +08:00
def splitParts(paramsList):
2015-03-22 20:45:17 +08:00
"""
2015-10-17 17:34:24 +08:00
:param paramsList: the parts of a template or tplarg.
2015-04-16 00:20:32 +08:00
2015-04-26 14:47:53 +08:00
Split template parameters at the separator "|".
2015-03-22 20:45:17 +08:00
separator "=".
Template parameters often contain URLs, internal links, text or even
template expressions, since we evaluate templates outside in.
This is required for cases like:
{{#if: {{{1}}} | {{lc:{{{1}}} | "parameter missing"}}
Parameters are separated by "|" symbols. However, we
cannot simply split the string on "|" symbols, since these
also appear inside templates and internal links, e.g.
{{if:|
|{{#if:the president|
|{{#if:|
[[Category:Hatnote templates|A{{PAGENAME}}]]
}}
}}
}}
2015-04-26 14:47:53 +08:00
We split parts at the "|" symbols that are not inside any pair
2015-03-22 20:45:17 +08:00
{{{...}}}, {{...}}, [[...]], {|...|}.
"""
2015-04-20 12:19:32 +08:00
# Must consider '[' as normal in expansion of Template:EMedicine2:
2015-04-17 06:37:30 +08:00
# #ifeq: ped|article|[http://emedicine.medscape.com/article/180-overview|[http://www.emedicine.com/ped/topic180.htm#{{#if: |section~}}
2015-04-20 12:19:32 +08:00
# as part of:
# {{#ifeq: ped|article|[http://emedicine.medscape.com/article/180-overview|[http://www.emedicine.com/ped/topic180.htm#{{#if: |section~}}}} ped/180{{#if: |~}}]
2015-04-17 06:37:30 +08:00
2015-04-26 14:47:53 +08:00
# should handle both tpl arg like:
# 4|{{{{{subst|}}}CURRENTYEAR}}
# and tpl parameters like:
# ||[[Category:People|{{#if:A|A|{{PAGENAME}}}}]]
sep = '|'
2015-03-22 20:45:17 +08:00
parameters = []
cur = 0
2015-10-17 17:34:24 +08:00
for s, e in findMatchingBraces(paramsList):
2015-03-22 20:45:17 +08:00
par = paramsList[cur:s].split(sep)
if par:
if parameters:
# portion before | belongs to previous parameter
parameters[-1] += par[0]
if len(par) > 1:
# rest are new parameters
parameters.extend(par[1:])
else:
parameters = par
elif not parameters:
2015-10-17 17:34:24 +08:00
parameters = [''] # create first param
2015-03-22 20:45:17 +08:00
# add span to last previous parameter
parameters[-1] += paramsList[s:e]
cur = e
# leftover
par = paramsList[cur:].split(sep)
if par:
if parameters:
# portion before | belongs to previous parameter
parameters[-1] += par[0]
if len(par) > 1:
# rest are new parameters
parameters.extend(par[1:])
else:
parameters = par
2015-10-17 17:34:24 +08:00
# logging.debug('splitParts %s %s\nparams: %s', sep, paramsList, str(parameters))
2015-03-22 20:45:17 +08:00
return parameters
2015-10-17 17:34:24 +08:00
2015-04-26 14:47:53 +08:00
def findMatchingBraces(text, ldelim=0):
"""
2015-04-26 14:47:53 +08:00
:param ldelim: number of braces to match. 0 means match [[]], {{}} and {{{}}}.
"""
# Parsing is done with respect to pairs of double braces {{..}} delimiting
2015-04-15 00:09:46 +08:00
# a template, and pairs of triple braces {{{..}}} delimiting a tplarg.
# If double opening braces are followed by triple closing braces or
# conversely, this is taken as delimiting a template, with one left-over
# brace outside it, taken as plain text. For any pattern of braces this
# defines a set of templates and tplargs such that any two are either
# separate or nested (not overlapping).
# Unmatched double rectangular closing brackets can be in a template or
2015-04-26 14:47:53 +08:00
# tplarg, but unmatched double rectangular opening brackets cannot.
# Unmatched double or triple closing braces inside a pair of
# double rectangular brackets are treated as plain text.
# Other formulation: in ambiguity between template or tplarg on one hand,
# and a link on the other hand, the structure with the rightmost opening
# takes precedence, even if this is the opening of a link without any
# closing, so not producing an actual link.
# In the case of more than three opening braces the last three are assumed
# to belong to a tplarg, unless there is no matching triple of closing
# braces, in which case the last two opening braces are are assumed to
# belong to a template.
2015-04-20 12:19:32 +08:00
# We must skip individual { like in:
# {{#ifeq: {{padleft:|1|}} | { | | &nbsp;}}
2015-04-15 00:09:46 +08:00
# We must resolve ambiguities like this:
# {{{{ }}}} -> { {{{ }}} }
# {{{{{ }}}}} -> {{ {{{ }}} }}
# {{#if:{{{{{#if:{{{nominee|}}}|nominee|candidate}}|}}}|...}}
2015-04-15 09:43:02 +08:00
# Handle:
2015-04-19 06:18:48 +08:00
# {{{{{|safesubst:}}}#Invoke:String|replace|{{{1|{{{{{|safesubst:}}}PAGENAME}}}}}|%s+%([^%(]-%)$||plain=false}}
# as well as expressions with stray }:
2015-04-20 12:19:32 +08:00
# {{{link|{{ucfirst:{{{1}}}}}} interchange}}}
2015-04-19 06:18:48 +08:00
2015-10-17 17:34:24 +08:00
if ldelim: # 2-3
reOpen = re.compile('[{]{%d,}' % ldelim) # at least ldelim
reNext = re.compile('[{]{2,}|}{2,}') # at least 2
2015-04-26 14:47:53 +08:00
else:
reOpen = re.compile('{{2,}|\[{2,}')
2015-10-17 17:34:24 +08:00
reNext = re.compile('{{2,}|}{2,}|\[{2,}|]{2,}') # at least 2
2015-04-15 09:43:02 +08:00
2015-03-22 20:45:17 +08:00
cur = 0
while True:
2015-04-15 00:09:46 +08:00
m1 = reOpen.search(text, cur)
if not m1:
2015-03-22 20:45:17 +08:00
return
2015-10-17 17:34:24 +08:00
lmatch = m1.end() - m1.start()
2015-04-26 14:47:53 +08:00
if m1.group()[0] == '{':
2015-10-17 17:34:24 +08:00
stack = [lmatch] # stack of opening braces lengths
2015-04-26 14:47:53 +08:00
else:
2015-10-17 17:34:24 +08:00
stack = [-lmatch] # negative means [
2015-04-15 00:09:46 +08:00
end = m1.end()
while True:
2015-04-19 06:18:48 +08:00
m2 = reNext.search(text, end)
2015-04-15 00:09:46 +08:00
if not m2:
2015-10-17 17:34:24 +08:00
return # unbalanced
2015-04-15 00:09:46 +08:00
end = m2.end()
2015-04-26 14:47:53 +08:00
brac = m2.group()[0]
2015-10-17 17:34:24 +08:00
lmatch = m2.end() - m2.start()
2015-04-26 14:47:53 +08:00
if brac == '{':
stack.append(lmatch)
elif brac == '}':
2015-04-19 06:18:48 +08:00
while stack:
2015-10-17 17:34:24 +08:00
openCount = stack.pop() # opening span
if openCount == 0: # illegal unmatched [[
2015-04-26 14:47:53 +08:00
continue
if lmatch >= openCount:
lmatch -= openCount
2015-10-17 17:34:24 +08:00
if lmatch <= 1: # either close or stray }
2015-04-26 14:47:53 +08:00
break
2015-04-17 03:00:35 +08:00
else:
2015-04-19 06:18:48 +08:00
# put back unmatched
2015-04-26 14:47:53 +08:00
stack.append(openCount - lmatch)
2015-04-19 06:18:48 +08:00
break
if not stack:
2015-10-17 17:34:24 +08:00
yield m1.start(), end - lmatch
2015-04-19 06:18:48 +08:00
cur = end
break
2015-04-26 14:47:53 +08:00
elif len(stack) == 1 and 0 < stack[0] < ldelim:
2015-04-19 06:18:48 +08:00
# ambiguous {{{{{ }}} }}
yield m1.start() + stack[0], end
2015-04-15 00:09:46 +08:00
cur = end
break
2015-10-17 17:34:24 +08:00
elif brac == '[': # [[
2015-04-26 14:47:53 +08:00
stack.append(-lmatch)
2015-10-17 17:34:24 +08:00
else: # ]]
while stack and stack[-1] < 0: # matching [[
2015-04-26 14:47:53 +08:00
openCount = -stack.pop()
if lmatch >= openCount:
lmatch -= openCount
2015-10-17 17:34:24 +08:00
if lmatch <= 1: # either close or stray ]
2015-04-26 14:47:53 +08:00
break
else:
# put back unmatched (negative)
stack.append(lmatch - openCount)
break
if not stack:
2015-10-17 17:34:24 +08:00
yield m1.start(), end - lmatch
2015-04-26 14:47:53 +08:00
cur = end
break
# unmatched ]] are discarded
cur = end
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
2015-04-20 12:19:32 +08:00
def findBalanced(text, openDelim, closeDelim):
2015-03-22 20:45:17 +08:00
"""
Assuming that text contains a properly balanced expression using
:param openDelim: as opening delimiters and
:param closeDelim: as closing delimiters.
:return: an iterator producing pairs (start, end) of start and end
positions in text containing a balanced expression.
"""
2015-04-20 12:19:32 +08:00
openPat = '|'.join([re.escape(x) for x in openDelim])
2015-03-22 20:45:17 +08:00
# patter for delimiters expected after each opening delimiter
2015-10-17 17:34:24 +08:00
afterPat = {o: re.compile(openPat + '|' + c, re.DOTALL) for o, c in izip(openDelim, closeDelim)}
2015-03-22 20:45:17 +08:00
stack = []
start = 0
cur = 0
2015-10-17 17:34:24 +08:00
# end = len(text)
2015-03-22 20:45:17 +08:00
startSet = False
2015-04-20 12:19:32 +08:00
startPat = re.compile(openPat)
2015-03-22 20:45:17 +08:00
nextPat = startPat
while True:
next = nextPat.search(text, cur)
if not next:
return
if not startSet:
start = next.start()
startSet = True
delim = next.group(0)
if delim in openDelim:
stack.append(delim)
nextPat = afterPat[delim]
else:
opening = stack.pop()
# assert opening == openDelim[closeDelim.index(next.group(0))]
if stack:
nextPat = afterPat[stack[-1]]
else:
yield start, next.end()
nextPat = startPat
start = next.end()
startSet = False
cur = next.end()
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
# ----------------------------------------------------------------------
# Modules
# Only minimal support
# FIXME: import Lua modules.
modules = {
2015-10-17 17:34:24 +08:00
'convert': {
'convert': lambda x, u, *rest: x + ' ' + u, # no conversion
2015-03-22 20:45:17 +08:00
}
}
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
# ----------------------------------------------------------------------
# variables
2015-10-17 17:34:24 +08:00
2015-04-17 03:00:35 +08:00
class MagicWords(object):
2015-03-22 20:45:17 +08:00
"""
2015-04-17 03:00:35 +08:00
One copy in each Extractor.
2015-03-22 20:45:17 +08:00
2015-04-17 03:00:35 +08:00
@see https://doc.wikimedia.org/mediawiki-core/master/php/MagicWord_8php_source.html
"""
2015-04-17 03:00:35 +08:00
names = [
'!',
'currentmonth',
'currentmonth1',
'currentmonthname',
'currentmonthnamegen',
'currentmonthabbrev',
'currentday',
'currentday2',
'currentdayname',
'currentyear',
'currenttime',
'currenthour',
'localmonth',
'localmonth1',
'localmonthname',
'localmonthnamegen',
'localmonthabbrev',
'localday',
'localday2',
'localdayname',
'localyear',
'localtime',
'localhour',
'numberofarticles',
'numberoffiles',
'numberofedits',
'articlepath',
'pageid',
'sitename',
'server',
'servername',
'scriptpath',
'stylepath',
'pagename',
'pagenamee',
'fullpagename',
'fullpagenamee',
'namespace',
'namespacee',
'namespacenumber',
'currentweek',
'currentdow',
'localweek',
'localdow',
'revisionid',
'revisionday',
'revisionday2',
'revisionmonth',
'revisionmonth1',
'revisionyear',
'revisiontimestamp',
'revisionuser',
'revisionsize',
'subpagename',
'subpagenamee',
'talkspace',
'talkspacee',
'subjectspace',
'subjectspacee',
'talkpagename',
'talkpagenamee',
'subjectpagename',
'subjectpagenamee',
'numberofusers',
'numberofactiveusers',
'numberofpages',
'currentversion',
'rootpagename',
'rootpagenamee',
'basepagename',
'basepagenamee',
'currenttimestamp',
'localtimestamp',
'directionmark',
'contentlanguage',
'numberofadmins',
'cascadingsources',
]
def __init__(self):
2015-10-17 17:34:24 +08:00
self.values = {'!': '|'}
2015-04-17 03:00:35 +08:00
def __getitem__(self, name):
return self.values.get(name)
def __setitem__(self, name, value):
self.values[name] = value
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
switches = (
2015-04-19 17:32:39 +08:00
'__NOTOC__',
'__FORCETOC__',
'__TOC__',
'__TOC__',
'__NEWSECTIONLINK__',
'__NONEWSECTIONLINK__',
'__NOGALLERY__',
'__HIDDENCAT__',
'__NOCONTENTCONVERT__',
'__NOCC__',
'__NOTITLECONVERT__',
'__NOTC__',
'__START__',
'__END__',
'__INDEX__',
'__NOINDEX__',
'__STATICREDIRECT__',
'__DISAMBIG__'
2015-10-17 17:34:24 +08:00
)
2015-04-19 17:32:39 +08:00
magicWordsRE = re.compile('|'.join(MagicWords.switches))
2015-10-17 17:34:24 +08:00
2015-04-15 00:09:46 +08:00
# ----------------------------------------------------------------------
# parser functions utilities
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
def ucfirst(string):
2015-08-31 03:17:26 +08:00
""":return: a string with just its first character uppercase
We can't use title() since it coverts all words.
"""
2015-03-22 20:45:17 +08:00
if string:
if len(string) > 1:
return string[0].upper() + string[1:]
else:
return string.upper()
else:
return ''
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
def lcfirst(string):
""":return: a string with its first character lowercase"""
if string:
if len(string) > 1:
return string[0].lower() + string[1:]
else:
return string.lower()
else:
return ''
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
def fullyQualifiedTemplateTitle(templateTitle):
"""
Determine the namespace of the page being included through the template
mechanism
"""
if templateTitle.startswith(':'):
# Leading colon by itself implies main namespace, so strip this colon
return ucfirst(templateTitle[1:])
else:
m = re.match('([^:]*)(:.*)', templateTitle)
if m:
# colon found but not in the first position - check if it
# designates a known namespace
prefix = normalizeNamespace(m.group(1))
if prefix in knownNamespaces:
return prefix + ucfirst(m.group(2))
# The title of the page being included is NOT in the main namespace and
# lacks any other explicit designation of the namespace - therefore, it
# is resolved to the Template namespace (that's the default for the
# template inclusion mechanism).
# This is a defense against pages whose title only contains UTF-8 chars
# that are reduced to an empty string. Right now I can think of one such
# case - <C2><A0> which represents the non-breaking space.
# In this particular case, this page is a redirect to [[Non-nreaking
# space]], but having in the system a redirect page with an empty title
# causes numerous problems, so we'll live happier without it.
if templateTitle:
2015-08-31 03:17:26 +08:00
return templatePrefix + ucfirst(templateTitle)
2015-03-22 20:45:17 +08:00
else:
return '' # caller may log as error
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
def normalizeNamespace(ns):
return ucfirst(ns)
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
# ----------------------------------------------------------------------
2015-04-19 06:18:48 +08:00
# Parser functions
2015-03-22 20:45:17 +08:00
# see http://www.mediawiki.org/wiki/Help:Extension:ParserFunctions
# https://github.com/Wikia/app/blob/dev/extensions/ParserFunctions/ParserFunctions_body.php
2015-10-17 17:34:24 +08:00
2015-04-15 06:09:51 +08:00
class Infix:
"""Infix operators.
The calling sequence for the infix is:
x |op| y
"""
2015-10-17 17:34:24 +08:00
2015-04-15 06:09:51 +08:00
def __init__(self, function):
self.function = function
2015-10-17 17:34:24 +08:00
2015-04-15 06:09:51 +08:00
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
2015-10-17 17:34:24 +08:00
2015-04-15 06:09:51 +08:00
def __or__(self, other):
return self.function(other)
2015-10-17 17:34:24 +08:00
2015-04-15 06:09:51 +08:00
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
2015-10-17 17:34:24 +08:00
2015-04-15 06:09:51 +08:00
def __rshift__(self, other):
return self.function(other)
2015-10-17 17:34:24 +08:00
2015-04-15 06:09:51 +08:00
def __call__(self, value1, value2):
return self.function(value1, value2)
2015-10-17 17:34:24 +08:00
ROUND = Infix(lambda x, y: round(x, y))
2015-04-15 06:09:51 +08:00
2015-03-22 20:45:17 +08:00
def sharp_expr(expr):
try:
2015-04-26 14:47:53 +08:00
expr = re.sub('=', '==', expr)
2015-03-22 20:45:17 +08:00
expr = re.sub('mod', '%', expr)
2015-04-15 06:09:51 +08:00
expr = re.sub('\bdiv\b', '/', expr)
expr = re.sub('\bround\b', '|ROUND|', expr)
2015-04-25 17:52:01 +08:00
return unicode(eval(expr))
2015-03-22 20:45:17 +08:00
except:
2015-04-26 14:47:53 +08:00
return '<span class="error"></span>'
2015-03-22 20:45:17 +08:00
def sharp_if(testValue, valueIfTrue, valueIfFalse=None, *args):
2015-04-19 06:18:48 +08:00
# In theory, we should evaluate the first argument here,
# but it was evaluated while evaluating part[0] in expandTemplate().
2015-03-22 20:45:17 +08:00
if testValue.strip():
# The {{#if:}} function is an if-then-else construct.
2015-04-15 20:30:55 +08:00
# The applied condition is: "The condition string is non-empty".
2015-03-22 20:45:17 +08:00
valueIfTrue = valueIfTrue.strip()
if valueIfTrue:
return valueIfTrue
elif valueIfFalse:
return valueIfFalse.strip()
return ""
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
def sharp_ifeq(lvalue, rvalue, valueIfTrue, valueIfFalse=None, *args):
rvalue = rvalue.strip()
if rvalue:
# lvalue is always defined
if lvalue.strip() == rvalue:
# The {{#ifeq:}} function is an if-then-else construct. The
# applied condition is "is rvalue equal to lvalue". Note that this
# does only string comparison while MediaWiki implementation also
# supports numerical comparissons.
if valueIfTrue:
return valueIfTrue.strip()
else:
if valueIfFalse:
return valueIfFalse.strip()
return ""
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
def sharp_iferror(test, then='', Else=None, *args):
if re.match('<(?:strong|span|p|div)\s(?:[^\s>]*\s+)*?class="(?:[^"\s>]*\s+)*?error(?:\s[^">]*)?"', test):
return then
elif Else is None:
return test.strip()
else:
return Else.strip()
2015-10-17 17:34:24 +08:00
2015-04-15 00:09:46 +08:00
def sharp_switch(primary, *params):
2015-03-22 20:45:17 +08:00
# FIXME: we don't support numeric expressions in primary
# {{#switch: comparison string
# | case1 = result1
2015-04-15 20:30:55 +08:00
# | case2
2015-03-22 20:45:17 +08:00
# | case4 = result2
2015-04-19 06:18:48 +08:00
# | 1 | case5 = result3
# | #default = result4
2015-03-22 20:45:17 +08:00
# }}
primary = primary.strip()
2015-10-17 17:34:24 +08:00
found = False # for fall through cases
2015-03-22 20:45:17 +08:00
default = None
rvalue = None
lvalue = ''
2015-04-15 00:09:46 +08:00
for param in params:
2015-04-16 00:20:32 +08:00
# handle cases like:
2015-04-20 12:19:32 +08:00
# #default = [http://www.perseus.tufts.edu/hopper/text?doc=Perseus...]
pair = param.split('=', 1)
2015-03-22 20:45:17 +08:00
lvalue = pair[0].strip()
rvalue = None
if len(pair) > 1:
# got "="
rvalue = pair[1].strip()
2015-04-19 06:18:48 +08:00
# check for any of multiple values pipe separated
if found or primary in [v.strip() for v in lvalue.split('|')]:
2015-03-22 20:45:17 +08:00
# Found a match, return now
2015-04-12 16:21:35 +08:00
return rvalue
elif lvalue == '#default':
2015-03-22 20:45:17 +08:00
default = rvalue
2015-10-17 17:34:24 +08:00
rvalue = None # avoid defaulting to last case
2015-03-22 20:45:17 +08:00
elif lvalue == primary:
# If the value matches, set a flag and continue
found = True
# Default case
# Check if the last item had no = sign, thus specifying the default case
2015-04-12 16:21:35 +08:00
if rvalue is not None:
2015-03-22 20:45:17 +08:00
return lvalue
2015-04-12 16:21:35 +08:00
elif default is not None:
2015-03-22 20:45:17 +08:00
return default
return ''
2015-10-17 17:34:24 +08:00
# Extension Scribuntu
2015-04-15 16:47:02 +08:00
def sharp_invoke(module, function, frame):
functions = modules.get(module)
if functions:
funct = functions.get(function)
if funct:
# find parameters in frame whose title is the one of the original
# template invocation
templateTitle = fullyQualifiedTemplateTitle(function)
if not templateTitle:
logging.warn("Template with empty title")
2015-04-15 16:47:02 +08:00
pair = next((x for x in frame if x[0] == templateTitle), None)
if pair:
params = pair[1]
# extract positional args
2015-10-17 17:34:24 +08:00
params = [params.get(str(i + 1)) for i in range(len(params))]
2015-04-15 16:47:02 +08:00
return funct(*params)
else:
return funct()
return ''
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
parserFunctions = {
'#expr': sharp_expr,
'#if': sharp_if,
'#ifeq': sharp_ifeq,
'#iferror': sharp_iferror,
2015-10-17 17:34:24 +08:00
'#ifexpr': lambda *args: '', # not supported
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
'#ifexist': lambda *args: '', # not supported
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
'#rel2abs': lambda *args: '', # not supported
2015-03-22 20:45:17 +08:00
'#switch': sharp_switch,
2015-10-17 17:34:24 +08:00
'# language': lambda *args: '', # not supported
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
'#time': lambda *args: '', # not supported
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
'#timel': lambda *args: '', # not supported
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
'#titleparts': lambda *args: '', # not supported
2015-03-22 20:45:17 +08:00
# This function is used in some pages to construct links
# http://meta.wikimedia.org/wiki/Help:URL
'urlencode': lambda string, *rest: urllib.quote(string.encode('utf-8')),
2015-04-19 06:18:48 +08:00
'lc': lambda string, *rest: string.lower() if string else '',
2015-03-22 20:45:17 +08:00
2015-04-19 06:18:48 +08:00
'lcfirst': lambda string, *rest: lcfirst(string),
2015-03-22 20:45:17 +08:00
2015-04-19 06:18:48 +08:00
'uc': lambda string, *rest: string.upper() if string else '',
2015-03-22 20:45:17 +08:00
2015-04-19 06:18:48 +08:00
'ucfirst': lambda string, *rest: ucfirst(string),
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
'int': lambda string, *rest: str(int(string)),
2015-03-22 20:45:17 +08:00
}
2015-10-17 17:34:24 +08:00
2015-04-15 16:47:02 +08:00
def callParserFunction(functionName, args, frame):
2015-03-22 20:45:17 +08:00
"""
Parser functions have similar syntax as templates, except that
the first argument is everything after the first colon.
2015-04-11 09:43:32 +08:00
:return: the result of the invocation, None in case of failure.
2015-03-22 20:45:17 +08:00
http://meta.wikimedia.org/wiki/Help:ParserFunctions
"""
2015-04-15 20:30:55 +08:00
2015-03-22 20:45:17 +08:00
try:
2015-10-17 17:34:24 +08:00
if functionName == '#invoke':
# special handling of frame
ret = sharp_invoke(args[0].strip(), args[1].strip(), frame)
# logging.debug('parserFunction> %s %s', functionName, ret)
return ret
if functionName in parserFunctions:
ret = parserFunctions[functionName](*args)
# logging.debug('parserFunction> %s %s', functionName, ret)
return ret
2015-03-22 20:45:17 +08:00
except:
2015-10-17 17:34:24 +08:00
return "" # FIXME: fix errors
2015-03-22 20:45:17 +08:00
return ""
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
# ----------------------------------------------------------------------
# Expand using WikiMedia API
# import json
# def expandTemplates(text):
# """Expand templates invoking MediaWiki API"""
# text = urlib.urlencodew(text.encode('utf-8'))
2015-04-12 16:21:35 +08:00
# base = urlbase[:urlbase.rfind('/')]
2015-03-22 20:45:17 +08:00
# url = base + "/w/api.php?action=expandtemplates&format=json&text=" + text
# exp = json.loads(urllib.urlopen(url))
# return exp['expandtemplates']['*']
# ----------------------------------------------------------------------
# Extract Template definition
reNoinclude = re.compile(r'<noinclude>(?:.*?)</noinclude>', re.DOTALL)
reIncludeonly = re.compile(r'<includeonly>|</includeonly>', re.DOTALL)
2015-09-15 00:05:36 +08:00
# These are built before spawning processes, hence thay are shared.
2015-03-22 20:45:17 +08:00
templates = {}
redirects = {}
2015-04-21 03:19:05 +08:00
# cache of parser templates
2015-09-15 00:05:36 +08:00
# FIXME: sharing this with a Manager slows down.
2015-04-21 03:19:05 +08:00
templateCache = {}
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
def define_template(title, page):
2015-04-19 06:18:48 +08:00
"""
Adds a template defined in the :param page:.
@see https://en.wikipedia.org/wiki/Help:Template#Noinclude.2C_includeonly.2C_and_onlyinclude
"""
2015-03-22 20:45:17 +08:00
global templates
global redirects
2015-10-17 17:34:24 +08:00
# title = normalizeTitle(title)
2015-03-22 20:45:17 +08:00
# check for redirects
2015-08-31 03:52:02 +08:00
m = re.match('#REDIRECT.*?\[\[([^\]]*)]]', page[0], re.IGNORECASE)
2015-03-22 20:45:17 +08:00
if m:
2015-10-17 17:34:24 +08:00
redirects[title] = m.group(1) # normalizeTitle(m.group(1))
2015-03-22 20:45:17 +08:00
return
text = unescape(''.join(page))
# We're storing template text for future inclusion, therefore,
# remove all <noinclude> text and keep all <includeonly> text
# (but eliminate <includeonly> tags per se).
# However, if <onlyinclude> ... </onlyinclude> parts are present,
# then only keep them and discard the rest of the template body.
# This is because using <onlyinclude> on a text fragment is
# equivalent to enclosing it in <includeonly> tags **AND**
# enclosing all the rest of the template body in <noinclude> tags.
# remove comments
text = comment.sub('', text)
2015-04-15 09:43:02 +08:00
# eliminate <noinclude> fragments
text = reNoinclude.sub('', text)
# eliminate unterminated <noinclude> elements
text = re.sub(r'<noinclude\s*>.*$', '', text, flags=re.DOTALL)
2015-04-19 06:18:48 +08:00
text = re.sub(r'<noinclude/>', '', text)
2015-04-15 09:43:02 +08:00
2015-03-22 20:45:17 +08:00
onlyincludeAccumulator = ''
for m in re.finditer('<onlyinclude>(.*?)</onlyinclude>', text, re.DOTALL):
2015-04-15 09:43:02 +08:00
onlyincludeAccumulator += m.group(1)
2015-03-22 20:45:17 +08:00
if onlyincludeAccumulator:
text = onlyincludeAccumulator
else:
text = reIncludeonly.sub('', text)
if text:
if title in templates:
2015-04-20 12:19:32 +08:00
logging.warn('Redefining: %s', title)
2015-03-22 20:45:17 +08:00
templates[title] = text
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
# ----------------------------------------------------------------------
def dropNested(text, openDelim, closeDelim):
"""
A matching function for nested expressions, e.g. namespaces and tables.
"""
openRE = re.compile(openDelim, re.IGNORECASE)
closeRE = re.compile(closeDelim, re.IGNORECASE)
2015-03-22 20:45:17 +08:00
# partition text in separate blocks { } { }
2015-10-17 17:34:24 +08:00
spans = [] # pairs (s, e) for each partition
nest = 0 # nesting level
2015-03-22 20:45:17 +08:00
start = openRE.search(text, 0)
if not start:
return text
end = closeRE.search(text, start.end())
next = start
while end:
next = openRE.search(text, next.end())
2015-10-17 17:34:24 +08:00
if not next: # termination
while nest: # close all pending
nest -= 1
2015-03-22 20:45:17 +08:00
end0 = closeRE.search(text, end.end())
if end0:
end = end0
else:
break
spans.append((start.start(), end.end()))
break
while end.end() < next.start():
# { } {
if nest:
nest -= 1
# try closing more
last = end.end()
end = closeRE.search(text, end.end())
2015-10-17 17:34:24 +08:00
if not end: # unbalanced
2015-03-22 20:45:17 +08:00
if spans:
span = (spans[0][0], last)
else:
span = (start.start(), last)
spans = [span]
break
else:
spans.append((start.start(), end.end()))
# advance start, find next close
start = next
end = closeRE.search(text, next.end())
2015-10-17 17:34:24 +08:00
break # { }
2015-03-22 20:45:17 +08:00
if next != start:
# { { }
nest += 1
# collect text outside partitions
return dropSpans(spans, text)
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
def dropSpans(spans, text):
"""
Drop from text the blocks identified in :param spans:, possibly nested.
"""
spans.sort()
res = ''
offset = 0
2015-10-17 17:34:24 +08:00
for s, e in spans:
if offset <= s: # handle nesting
2015-03-22 20:45:17 +08:00
if offset < s:
res += text[offset:s]
offset = e
res += text[offset:]
return res
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# ----------------------------------------------------------------------
# WikiLinks
# See https://www.mediawiki.org/wiki/Help:Links#Internal_links
2015-03-22 20:45:17 +08:00
# Can be nested [[File:..|..[[..]]..|..]], [[Category:...]], etc.
2015-04-22 18:42:42 +08:00
# Also: [[Help:IPA for Catalan|[andora]]]
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
def replaceInternalLinks(text):
"""
Replaces external links of the form:
[[title |...|label]]trail
2015-03-22 20:45:17 +08:00
2015-04-22 18:42:42 +08:00
with title concatenated with trail, when present, e.g. 's' for plural.
"""
# call this after removal of external links, so we need not worry about
# triple closing ]]].
cur = 0
res = ''
2015-10-17 17:34:24 +08:00
for s, e in findBalanced(text, ['[['], [']]']):
2015-04-22 18:42:42 +08:00
m = tailRE.match(text, e)
if m:
trail = m.group(0)
end = m.end()
else:
trail = ''
end = e
2015-10-17 17:34:24 +08:00
inner = text[s + 2:e - 2]
2015-04-22 18:42:42 +08:00
# find first |
pipe = inner.find('|')
if pipe < 0:
title = inner
label = title
else:
title = inner[:pipe].rstrip()
# find last |
2015-10-17 17:34:24 +08:00
curp = pipe + 1
for s1, e1 in findBalanced(inner, ['[['], [']]']):
2015-04-22 23:07:08 +08:00
last = inner.rfind('|', curp, s1)
2015-04-22 18:42:42 +08:00
if last >= 0:
2015-10-17 17:34:24 +08:00
pipe = last # advance
2015-04-22 23:07:08 +08:00
curp = e1
2015-10-17 17:34:24 +08:00
label = inner[pipe + 1:].strip()
2015-04-22 18:42:42 +08:00
res += text[cur:s] + makeInternalLink(title, label) + trail
cur = end
return res + text[cur:]
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# the official version is a method in class Parser, similar to this:
# def replaceInternalLinks2(text):
# global wgExtraInterlanguageLinkPrefixes
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# # the % is needed to support urlencoded titles as well
# tc = Title::legalChars() + '#%'
# # Match a link having the form [[namespace:link|alternate]]trail
# e1 = re.compile("([%s]+)(?:\\|(.+?))?]](.*)" % tc, re.S | re.D)
# # Match cases where there is no "]]", which might still be images
# e1_img = re.compile("([%s]+)\\|(.*)" % tc, re.S | re.D)
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# holders = LinkHolderArray(self)
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# # split the entire text string on occurrences of [[
# iterBrackets = re.compile('[[').finditer(text)
# m in iterBrackets.next()
# # get the first element (all text up to first [[)
# s = text[:m.start()]
# cur = m.end()
# line = s
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# useLinkPrefixExtension = self.getTargetLanguage().linkPrefixExtension()
# e2 = None
# if useLinkPrefixExtension:
# # Match the end of a line for a word that is not followed by whitespace,
# # e.g. in the case of "The Arab al[[Razi]]", "al" will be matched
# global wgContLang
# charset = wgContLang.linkPrefixCharset()
# e2 = re.compile("((?>.*[^charset]|))(.+)", re.S | re.D | re.U)
# if self.mTitle is None:
# raise MWException(__METHOD__ + ": \self.mTitle is null\n")
# nottalk = not self.mTitle.isTalkPage()
# if useLinkPrefixExtension:
# m = e2.match(s)
# if m:
# first_prefix = m.group(2)
# else:
# first_prefix = false
# else:
# prefix = ''
# useSubpages = self.areSubpagesAllowed()
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# for m in iterBrackets:
# line = text[cur:m.start()]
# cur = m.end()
# # TODO: Check for excessive memory usage
# if useLinkPrefixExtension:
# m = e2.match(e2)
# if m:
# prefix = m.group(2)
# s = m.group(1)
# else:
# prefix = ''
# # first link
# if first_prefix:
# prefix = first_prefix
# first_prefix = False
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# might_be_img = False
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# m = e1.match(line)
# if m: # page with normal label or alt
# label = m.group(2)
# # If we get a ] at the beginning of m.group(3) that means we have a link that is something like:
# # [[Image:Foo.jpg|[http://example.com desc]]] <- having three ] in a row fucks up,
# # the real problem is with the e1 regex
# # See bug 1300.
# #
# # Still some problems for cases where the ] is meant to be outside punctuation,
# # and no image is in sight. See bug 2095.
# #
# if label and m.group(3)[0] == ']' and '[' in label:
# label += ']' # so that replaceExternalLinks(label) works later
# m.group(3) = m.group(3)[1:]
# # fix up urlencoded title texts
# if '%' in m.group(1):
# # Should anchors '#' also be rejected?
# m.group(1) = str_replace(array('<', '>'), array('&lt', '&gt'), rawurldecode(m.group(1)))
# trail = m.group(3)
# else:
# m = e1_img.match(line):
# if m:
# # Invalid, but might be an image with a link in its caption
# might_be_img = true
# label = m.group(2)
# if '%' in m.group(1):
# m.group(1) = rawurldecode(m.group(1))
# trail = ""
# else: # Invalid form; output directly
# s += prefix + '[[' + line
# continue
# origLink = m.group(1)
# # Dont allow internal links to pages containing
# # PROTO: where PROTO is a valid URL protocol these
# # should be external links.
# if (preg_match('/^(?i:' + self.mUrlProtocols + ')/', origLink)) {
# s += prefix + '[[' + line
# continue
# }
# # Make subpage if necessary
# if useSubpages:
# link = self.maybeDoSubpageLink(origLink, label)
# else:
# link = origLink
# noforce = origLink[0] != ':'
# if not noforce:
# # Strip off leading ':'
# link = link[1:]
# nt = Title::newFromText(self.mStripState.unstripNoWiki(link))
# if nt is None:
# s += prefix + '[[' + line
# continue
# ns = nt.getNamespace()
# iw = nt.getInterwiki()
# if might_be_img { # if this is actually an invalid link
# if (ns == NS_FILE and noforce) { # but might be an image
# found = False
# while True:
# # look at the next 'line' to see if we can close it there
# next_line = iterBrakets.next()
# if not next_line:
# break
# m = explode(']]', next_line, 3)
# if m.lastindex == 3:
# # the first ]] closes the inner link, the second the image
# found = True
# label += "[[%s]]%s" % (m.group(0), m.group(1))
# trail = m.group(2)
# break
# elif m.lastindex == 2:
# # if there is exactly one ]] that is fine, we will keep looking
# label += "[[{m[0]}]]{m.group(1)}"
# else:
# # if next_line is invalid too, we need look no further
# label += '[[' + next_line
# break
# if not found:
# # we couldnt find the end of this imageLink, so output it raw
# # but dont ignore what might be perfectly normal links in the text we ve examined
# holders.merge(self.replaceInternalLinks2(label))
# s += "{prefix}[[%s|%s" % (link, text)
# # note: no trail, because without an end, there *is* no trail
# continue
# } else: # it is not an image, so output it raw
# s += "{prefix}[[%s|%s" % (link, text)
# # note: no trail, because without an end, there *is* no trail
# continue
# }
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# wasblank = (text == '')
# if wasblank:
# text = link
# else:
# # Bug 4598 madness. Handle the quotes only if they come from the alternate part
# # [[Lista d''e paise d''o munno]] . <a href="...">Lista d''e paise d''o munno</a>
# # [[Criticism of Harry Potter|Criticism of ''Harry Potter'']]
# # . <a href="Criticism of Harry Potter">Criticism of <i>Harry Potter</i></a>
# text = self.doQuotes(text)
# # Link not escaped by : , create the various objects
# if noforce and not nt.wasLocalInterwiki():
# # Interwikis
# if iw and mOptions.getInterwikiMagic() and nottalk and (
# Language::fetchLanguageName(iw, None, 'mw') or
# in_array(iw, wgExtraInterlanguageLinkPrefixes)):
# # Bug 24502: filter duplicates
# if iw not in mLangLinkLanguages:
# self.mLangLinkLanguages[iw] = True
# self.mOutput.addLanguageLink(nt.getFullText())
# s = rstrip(s + prefix)
# s += strip(trail, "\n") == '' ? '': prefix + trail
# continue
# if ns == NS_FILE:
# if not wfIsBadImage(nt.getDBkey(), self.mTitle):
# if wasblank:
# # if no parameters were passed, text
# # becomes something like "File:Foo.png",
# # which we dont want to pass on to the
# # image generator
# text = ''
# else:
# # recursively parse links inside the image caption
# # actually, this will parse them in any other parameters, too,
# # but it might be hard to fix that, and it doesnt matter ATM
# text = self.replaceExternalLinks(text)
# holders.merge(self.replaceInternalLinks2(text))
# # cloak any absolute URLs inside the image markup, so replaceExternalLinks() wont touch them
# s += prefix + self.armorLinks(
# self.makeImage(nt, text, holders)) + trail
# else:
# s += prefix + trail
# continue
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# if ns == NS_CATEGORY:
# s = rstrip(s + "\n") # bug 87
# if wasblank:
# sortkey = self.getDefaultSort()
# else:
# sortkey = text
# sortkey = Sanitizer::decodeCharReferences(sortkey)
# sortkey = str_replace("\n", '', sortkey)
# sortkey = self.getConverterLanguage().convertCategoryKey(sortkey)
# self.mOutput.addCategory(nt.getDBkey(), sortkey)
# s += strip(prefix + trail, "\n") == '' ? '' : prefix + trail
# continue
# }
# }
# # Self-link checking. For some languages, variants of the title are checked in
# # LinkHolderArray::doVariants() to allow batching the existence checks necessary
# # for linking to a different variant.
# if ns != NS_SPECIAL and nt.equals(self.mTitle) and !nt.hasFragment():
# s += prefix + Linker::makeSelfLinkObj(nt, text, '', trail)
# continue
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# # NS_MEDIA is a pseudo-namespace for linking directly to a file
# # @todo FIXME: Should do batch file existence checks, see comment below
# if ns == NS_MEDIA:
# # Give extensions a chance to select the file revision for us
# options = []
# descQuery = False
# Hooks::run('BeforeParserFetchFileAndTitle',
# [this, nt, &options, &descQuery])
# # Fetch and register the file (file title may be different via hooks)
# file, nt = self.fetchFileAndTitle(nt, options)
# # Cloak with NOPARSE to avoid replacement in replaceExternalLinks
# s += prefix + self.armorLinks(
# Linker::makeMediaLinkFile(nt, file, text)) + trail
# continue
# # Some titles, such as valid special pages or files in foreign repos, should
# # be shown as bluelinks even though they are not included in the page table
# #
# # @todo FIXME: isAlwaysKnown() can be expensive for file links; we should really do
# # batch file existence checks for NS_FILE and NS_MEDIA
# if iw == '' and nt.isAlwaysKnown():
# self.mOutput.addLink(nt)
# s += self.makeKnownLinkHolder(nt, text, array(), trail, prefix)
# else:
# # Links will be added to the output link list after checking
# s += holders.makeHolder(nt, text, array(), trail, prefix)
# }
# return holders
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
def makeInternalLink(title, label):
2015-04-20 12:19:32 +08:00
colon = title.find(':')
if colon > 0 and title[:colon] not in acceptedNamespaces:
2015-03-22 20:45:17 +08:00
return ''
2015-04-15 00:09:46 +08:00
if colon == 0:
# drop also :File:
2015-10-17 17:34:24 +08:00
colon2 = title.find(':', colon + 1)
if colon2 > 1 and title[colon + 1:colon2] not in acceptedNamespaces:
2015-04-15 00:09:46 +08:00
return ''
2015-04-22 18:42:42 +08:00
if Extractor.keepLinks:
2015-05-06 22:08:27 +08:00
return '<a href="%s">%s</a>' % (urllib.quote(title.encode('utf-8')), label)
2015-04-20 12:19:32 +08:00
else:
2015-04-22 18:42:42 +08:00
return label
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# ----------------------------------------------------------------------
# External links
# from: https://doc.wikimedia.org/mediawiki-core/master/php/DefaultSettings_8php_source.html
wgUrlProtocols = [
2015-10-17 17:34:24 +08:00
'bitcoin:', 'ftp://', 'ftps://', 'geo:', 'git://', 'gopher://', 'http://',
'https://', 'irc://', 'ircs://', 'magnet:', 'mailto:', 'mms://', 'news:',
'nntp://', 'redis://', 'sftp://', 'sip:', 'sips:', 'sms:', 'ssh://',
'svn://', 'tel:', 'telnet://', 'urn:', 'worldwind://', 'xmpp:', '//'
2015-04-22 18:42:42 +08:00
]
# from: https://doc.wikimedia.org/mediawiki-core/master/php/Parser_8php_source.html
# Constants needed for external link processing
# Everything except bracket, space, or control characters
# \p{Zs} is unicode 'separator, space' category. It covers the space 0x20
# as well as U+3000 is IDEOGRAPHIC SPACE for bug 19052
EXT_LINK_URL_CLASS = r'[^][<>"\x00-\x20\x7F\s]'
2015-10-17 17:34:24 +08:00
ExtLinkBracketedRegex = re.compile(
'\[(((?i)' + '|'.join(wgUrlProtocols) + ')' + EXT_LINK_URL_CLASS + r'+)\s*([^\]\x00-\x08\x0a-\x1F]*?)\]',
re.S | re.U)
2015-04-22 18:42:42 +08:00
EXT_IMAGE_REGEX = re.compile(
r"""^(http://|https://)([^][<>"\x00-\x20\x7F\s]+)
/([A-Za-z0-9_.,~%\-+&;#*?!=()@\x80-\xFF]+)\.((?i)gif|png|jpg|jpeg)$""",
re.X | re.S | re.U)
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
def replaceExternalLinks(text):
s = ''
cur = 0
for m in ExtLinkBracketedRegex.finditer(text):
s += text[cur:m.start()]
cur = m.end()
url = m.group(1)
label = m.group(3)
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# # The characters '<' and '>' (which were escaped by
# # removeHTMLtags()) should not be included in
# # URLs, per RFC 2396.
# m2 = re.search('&(lt|gt);', url)
# if m2:
# link = url[m2.end():] + ' ' + link
# url = url[0:m2.end()]
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# If the link text is an image URL, replace it with an <img> tag
# This happened by accident in the original parser, but some people used it extensively
m = EXT_IMAGE_REGEX.match(label)
if m:
label = makeExternalImage(label)
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
# Use the encoded URL
# This means that users can paste URLs directly into the text
# Funny characters like ö aren't valid in URLs anyway
# This was changed in August 2004
2015-10-17 17:34:24 +08:00
s += makeExternalLink(url, label) # + trail
2015-04-22 18:42:42 +08:00
return s + text[cur:]
2015-10-17 17:34:24 +08:00
2015-11-20 07:04:59 +08:00
def makeExternalLink(url, anchor):
2015-10-17 17:34:24 +08:00
"""Function applied to wikiLinks"""
2015-04-20 12:19:32 +08:00
if Extractor.keepLinks:
2015-11-20 07:04:59 +08:00
return '<a href="%s">%s</a>' % (urllib.quote(url.encode('utf-8')), anchor)
2015-03-22 20:45:17 +08:00
else:
return anchor
2015-10-17 17:34:24 +08:00
2015-04-22 18:42:42 +08:00
def makeExternalImage(url, alt=''):
if Extractor.keepLinks:
return '<img src="%s" alt="%s">' % (url, alt)
else:
return alt
2015-03-22 20:45:17 +08:00
# ----------------------------------------------------------------------
2015-04-15 06:09:51 +08:00
# match tail after wikilink
2015-04-20 12:19:32 +08:00
tailRE = re.compile('\w+')
2015-04-15 06:09:51 +08:00
2015-04-19 19:17:48 +08:00
syntaxhighlight = re.compile('&lt;syntaxhighlight .*?&gt;(.*?)&lt;/syntaxhighlight&gt;', re.DOTALL)
2015-04-11 18:29:31 +08:00
expand_templates = True
2015-10-17 17:34:24 +08:00
2015-04-17 03:00:35 +08:00
def clean(extractor, text):
2015-04-19 17:32:39 +08:00
"""
Transforms wiki markup.
@see https://www.mediawiki.org/wiki/Help:Formatting
"""
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
if expand_templates:
2015-04-11 18:29:31 +08:00
# expand templates
# See: http://www.mediawiki.org/wiki/Help:Templates
2015-04-17 03:00:35 +08:00
text = extractor.expandTemplates(text)
2015-04-15 00:09:46 +08:00
else:
# Drop transclusions (template, parser functions)
text = dropNested(text, r'{{', r'}}')
2015-03-22 20:45:17 +08:00
# Drop tables
text = dropNested(text, r'{\|', r'\|}')
2015-04-22 18:42:42 +08:00
# replace external links
text = replaceExternalLinks(text)
2015-03-22 20:45:17 +08:00
2015-04-22 18:42:42 +08:00
# replace internal links
text = replaceInternalLinks(text)
2015-03-22 20:45:17 +08:00
2015-04-19 17:32:39 +08:00
# drop MagicWords behavioral switches
text = magicWordsRE.sub('', text)
2015-10-17 17:34:24 +08:00
# ############### Process HTML ###############
2015-03-22 20:45:17 +08:00
2015-04-19 19:17:48 +08:00
# turn into HTML, except for the content of <syntaxhighlight>
res = ''
cur = 0
for m in syntaxhighlight.finditer(text):
end = m.end()
res += unescape(text[cur:m.start()]) + m.group(1)
cur = end
2015-04-20 12:19:32 +08:00
text = res + unescape(text[cur:])
# Handle bold/italic/quote
if extractor.toHTML:
text = bold_italic.sub(r'<b>\1</b>', text)
text = bold.sub(r'<b>\1</b>', text)
text = italic.sub(r'<i>\1</i>', text)
else:
text = bold_italic.sub(r'\1', text)
text = bold.sub(r'\1', text)
text = italic_quote.sub(r'"\1"', text)
text = italic.sub(r'"\1"', text)
2015-04-20 12:56:29 +08:00
text = quote_quote.sub(r'"\1"', text)
2015-04-20 12:19:32 +08:00
# residuals of unbalanced quotes
text = text.replace("'''", '').replace("''", '"')
2015-03-22 20:45:17 +08:00
# Collect spans
spans = []
# Drop HTML comments
for m in comment.finditer(text):
2015-10-17 17:34:24 +08:00
spans.append((m.start(), m.end()))
2015-03-22 20:45:17 +08:00
# Drop self-closing tags
for pattern in selfClosing_tag_patterns:
for m in pattern.finditer(text):
spans.append((m.start(), m.end()))
# Drop ignored tags
for left, right in ignored_tag_patterns:
for m in left.finditer(text):
spans.append((m.start(), m.end()))
for m in right.finditer(text):
spans.append((m.start(), m.end()))
# Bulk remove all spans
text = dropSpans(spans, text)
# Drop discarded elements
2015-04-19 06:18:48 +08:00
for tag in discardElements:
text = dropNested(text, r'<\s*%s\b[^>/]*>' % tag, r'<\s*/\s*%s>' % tag)
2015-03-22 20:45:17 +08:00
2015-04-20 12:19:32 +08:00
if not extractor.toHTML:
# Turn into text what is left (&amp;nbsp;) and <syntaxhighlight>
text = unescape(text)
2015-04-19 19:17:48 +08:00
2015-03-22 20:45:17 +08:00
# Expand placeholders
for pattern, placeholder in placeholder_tag_patterns:
index = 1
for match in pattern.finditer(text):
text = text.replace(match.group(), '%s_%d' % (placeholder, index))
index += 1
text = text.replace('<<', u'«').replace('>>', u'»')
#############################################
# Cleanup text
text = text.replace('\t', ' ')
text = spaces.sub(' ', text)
text = dots.sub('...', text)
text = re.sub(u' (,:\.\)\]»)', r'\1', text)
text = re.sub(u'(\[\(«) ', r'\1', text)
2015-10-17 17:34:24 +08:00
text = re.sub(r'\n\W+?\n', '\n', text, flags=re.U) # lines with only punctuations
2015-03-22 20:45:17 +08:00
text = text.replace(',,', ',').replace(',.', '.')
2015-04-22 23:07:08 +08:00
2015-03-22 20:45:17 +08:00
return text
2015-10-17 17:34:24 +08:00
2015-04-19 17:32:39 +08:00
# skip level 1, it is page name level
2015-03-22 20:45:17 +08:00
section = re.compile(r'(==+)\s*(.*?)\s*\1')
2015-10-17 17:34:24 +08:00
listOpen = {'*': '<ul>', '#': '<ol>', ';': '<dl>', ':': '<dl>'}
listClose = {'*': '</ul>', '#': '</ol>', ';': '</dl>', ':': '</dl>'}
listItem = {'*': '<li>%s</li>', '#': '<li>%s</<li>', ';': '<dt>%s</dt>',
':': '<dd>%s</dd>'}
2015-04-20 12:19:32 +08:00
2015-03-22 20:45:17 +08:00
def compact(text):
2015-04-20 12:19:32 +08:00
"""Deal with headers, lists, empty sections, residuals of tables.
2015-10-17 17:34:24 +08:00
:param text: convert to HTML
2015-04-20 12:19:32 +08:00
"""
2015-10-17 17:34:24 +08:00
page = [] # list of paragraph
headers = {} # Headers for unfilled sections
emptySection = False # empty sections are discarded
listLevel = '' # nesting of lists
2015-03-22 20:45:17 +08:00
for line in text.split('\n'):
if not line:
continue
# Handle section titles
m = section.match(line)
if m:
title = m.group(2)
lev = len(m.group(1))
2015-04-20 12:19:32 +08:00
if Extractor.toHTML:
2015-03-22 20:45:17 +08:00
page.append("<h%d>%s</h%d>" % (lev, title, lev))
if title and title[-1] not in '!?':
title += '.'
headers[lev] = title
# drop previous headers
for i in headers.keys():
if i > lev:
del headers[i]
emptySection = True
continue
# Handle page title
if line.startswith('++'):
title = line[2:-2]
if title:
if title[-1] not in '!?':
title += '.'
page.append(title)
# handle indents
elif line[0] == ':':
2015-10-17 17:34:24 +08:00
# page.append(line.lstrip(':*#;'))
2015-04-19 06:18:48 +08:00
continue
2015-03-22 20:45:17 +08:00
# handle lists
2015-04-20 12:19:32 +08:00
elif line[0] in '*#;:':
if Extractor.toHTML:
i = 0
2015-10-17 17:34:24 +08:00
for c, n in izip_longest(listLevel, line, fillvalue=''):
2015-05-30 02:52:27 +08:00
if not n or n not in '*#;:':
2015-04-20 12:19:32 +08:00
if c:
page.append(listClose[c])
listLevel = listLevel[:-1]
continue
else:
break
2015-05-30 02:52:27 +08:00
# n != ''
2015-04-20 12:19:32 +08:00
if c != n and (not c or (c not in ';:' and n not in ';:')):
if c:
# close level
page.append(listClose[c])
listLevel = listLevel[:-1]
2015-05-06 22:08:27 +08:00
listLevel += n
2015-04-20 12:19:32 +08:00
page.append(listOpen[n])
i += 1
2015-10-17 17:34:24 +08:00
n = line[i - 1] # last list char
2015-04-20 12:19:32 +08:00
line = line[i:].strip()
2015-10-17 17:34:24 +08:00
if line: # FIXME: n is '"'
2015-04-20 12:19:32 +08:00
page.append(listItem[n] % line)
2015-03-22 20:45:17 +08:00
else:
continue
2015-04-20 12:19:32 +08:00
elif len(listLevel):
for c in reversed(listLevel):
page.append(listClose[c])
listLevel = []
2015-04-15 09:43:02 +08:00
2015-03-22 20:45:17 +08:00
# Drop residuals of lists
2015-04-26 14:47:53 +08:00
elif line[0] in '{|' or line[-1] == '}':
2015-03-22 20:45:17 +08:00
continue
# Drop irrelevant lines
elif (line[0] == '(' and line[-1] == ')') or line.strip('.-') == '':
continue
elif len(headers):
2015-11-20 07:34:23 +08:00
if Extractor.keepSections:
2015-04-19 17:32:39 +08:00
items = headers.items()
items.sort()
for (i, v) in items:
page.append(v)
2015-03-22 20:45:17 +08:00
headers.clear()
2015-10-17 17:34:24 +08:00
page.append(line) # first line
2015-03-22 20:45:17 +08:00
emptySection = False
elif not emptySection:
page.append(line)
2015-10-17 17:34:24 +08:00
# dangerous
# # Drop preformatted
# elif line[0] == ' ':
# continue
2015-03-22 20:45:17 +08:00
return page
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
def handle_unicode(entity):
numeric_code = int(entity[2:-1])
if numeric_code >= 0x10000: return ''
return unichr(numeric_code)
2015-10-17 17:34:24 +08:00
# ------------------------------------------------------------------------------
2015-04-12 16:21:35 +08:00
# Output
2015-10-17 17:34:24 +08:00
2015-04-12 16:21:35 +08:00
class NextFile(object):
"""
Synchronous generation of next available file name.
"""
2015-03-22 20:45:17 +08:00
2015-04-12 16:21:35 +08:00
filesPerDir = 100
2015-06-19 05:49:03 +08:00
def __init__(self, path_name):
2015-04-12 16:21:35 +08:00
self.path_name = path_name
self.dir_index = -1
2015-03-22 20:45:17 +08:00
self.file_index = -1
2015-04-12 16:21:35 +08:00
def next(self):
2015-06-19 05:49:03 +08:00
self.file_index = (self.file_index + 1) % NextFile.filesPerDir
if self.file_index == 0:
self.dir_index += 1
dirname = self._dirname()
if not os.path.isdir(dirname):
os.makedirs(dirname)
return self._filepath()
2015-04-12 16:21:35 +08:00
def _dirname(self):
char1 = self.dir_index % 26
char2 = self.dir_index / 26 % 26
return os.path.join(self.path_name, '%c%c' % (ord('A') + char2, ord('A') + char1))
def _filepath(self):
return '%s/wiki_%02d' % (self._dirname(), self.file_index)
2015-10-17 17:34:24 +08:00
2015-04-12 16:21:35 +08:00
class OutputSplitter(object):
"""
File-like object, that splits output to multiple files of a given max size.
"""
def __init__(self, nextFile, max_file_size=0, compress=True):
"""
2015-10-17 17:34:24 +08:00
:param nextFile: a NextFile object from which to obtain filenames
2015-04-12 16:21:35 +08:00
to use.
:param max_file_size: the maximum size of each file.
:para compress: whether to write data with bzip compression.
"""
self.nextFile = nextFile
2015-03-22 20:45:17 +08:00
self.compress = compress
self.max_file_size = max_file_size
2015-04-12 16:21:35 +08:00
self.file = self.open(self.nextFile.next())
2015-03-22 20:45:17 +08:00
def reserve(self, size):
2015-04-12 16:21:35 +08:00
if self.file.tell() + size > self.max_file_size:
self.close()
self.file = self.open(self.nextFile.next())
2015-03-22 20:45:17 +08:00
2015-04-12 16:21:35 +08:00
def write(self, data):
2015-06-19 05:49:03 +08:00
self.reserve(len(data))
2015-11-20 07:07:50 +08:00
self.file.write(data)
2015-03-22 20:45:17 +08:00
def close(self):
2015-04-12 16:21:35 +08:00
self.file.close()
def open(self, filename):
2015-03-22 20:45:17 +08:00
if self.compress:
2015-04-12 16:21:35 +08:00
return bz2.BZ2File(filename + '.bz2', 'w')
2015-03-22 20:45:17 +08:00
else:
2015-04-12 16:21:35 +08:00
return open(filename, 'w')
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
# ----------------------------------------------------------------------
# READER
tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*>(?:([^<]*)(<.*?>)?)?')
2015-04-12 16:21:35 +08:00
# 1 2 3 4
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
def load_templates(file, output_file=None):
"""
Load templates from :param file:.
2015-09-29 21:31:19 +08:00
:param output_file: file where to save templates and modules.
2015-03-22 20:45:17 +08:00
"""
2015-08-31 03:17:26 +08:00
global templateNamespace, templatePrefix
2015-03-22 20:45:17 +08:00
templatePrefix = templateNamespace + ':'
2015-09-29 21:31:19 +08:00
global moduleNamespace, modulePrefix
modulePrefix = moduleNamespace + ':'
2015-03-22 20:45:17 +08:00
articles = 0
page = []
inText = False
if output_file:
output = codecs.open(output_file, 'wb', 'utf-8')
for line in file:
line = line.decode('utf-8')
2015-10-17 17:34:24 +08:00
if '<' not in line: # faster than doing re.search()
2015-03-22 20:45:17 +08:00
if inText:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
elif tag == 'title':
title = m.group(3)
elif tag == 'text':
inText = True
line = line[m.start(3):m.end(3)]
page.append(line)
2015-10-17 17:34:24 +08:00
if m.lastindex == 4: # open-close
2015-03-22 20:45:17 +08:00
inText = False
elif tag == '/text':
if m.group(1):
page.append(m.group(1))
inText = False
elif inText:
page.append(line)
elif tag == '/page':
2015-10-17 17:34:24 +08:00
if not output_file and not templateNamespace: # do not know it yet
2015-09-29 21:31:19 +08:00
# we reconstruct it from the first title
2015-08-31 03:17:26 +08:00
colon = title.find(':')
if colon > 1:
templateNamespace = title[:colon]
2015-10-17 17:34:24 +08:00
templatePrefix = title[:colon + 1]
2015-09-29 21:31:19 +08:00
# FIXME: should reconstruct also moduleNamespace
2015-03-22 20:45:17 +08:00
if title.startswith(templatePrefix):
define_template(title, page)
2015-09-29 21:31:19 +08:00
# save templates and modules to file
if output_file and (title.startswith(templatePrefix) or
title.startswith(modulePrefix)):
output.write('<page>\n')
output.write(' <title>%s</title>\n' % title)
output.write(' <ns>10</ns>\n')
output.write(' <text>')
for line in page:
output.write(line)
output.write(' </text>\n')
output.write('</page>\n')
2015-03-22 20:45:17 +08:00
page = []
articles += 1
if articles % 100000 == 0:
2015-04-20 12:19:32 +08:00
logging.info("Preprocessed %d pages", articles)
if output_file:
output.close()
logging.info("Saved %d templates to '%s'", len(templates), output_file)
2015-03-22 20:45:17 +08:00
2015-10-17 17:34:24 +08:00
2015-09-15 00:05:36 +08:00
def process_dump(input_file, template_file, out_file, file_size, file_compress,
process_count):
2015-03-22 20:45:17 +08:00
"""
2015-06-19 05:49:03 +08:00
:param input_file: name of the wikipedia dump file; '-' to read from stdin
2015-03-22 20:45:17 +08:00
:param template_file: optional file with template definitions.
2015-09-15 00:05:36 +08:00
:param out_file: directory where to store extracted data, or '-' for stdout
2015-06-19 05:49:03 +08:00
:param file_size: max size of each extracted file, or None for no max (one file)
2015-04-12 16:21:35 +08:00
:param file_compress: whether to compress files with bzip.
2015-09-15 00:05:36 +08:00
:param process_count: number of extraction processes to spawn.
2015-03-22 20:45:17 +08:00
"""
2015-04-12 16:21:35 +08:00
global urlbase
2015-03-22 20:45:17 +08:00
global knownNamespaces
2015-09-29 21:31:19 +08:00
global templateNamespace, templatePrefix
global moduleNamespace, modulePrefix
2015-03-22 20:45:17 +08:00
2015-06-19 05:49:03 +08:00
if input_file == '-':
input = sys.stdin
2015-03-22 20:45:17 +08:00
else:
2015-09-15 00:05:36 +08:00
input = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed)
2015-03-22 20:45:17 +08:00
# collect siteinfo
for line in input:
line = line.decode('utf-8')
m = tagRE.search(line)
2015-06-19 05:49:03 +08:00
if not m:
2015-03-22 20:45:17 +08:00
continue
tag = m.group(2)
if tag == 'base':
2015-04-12 16:21:35 +08:00
# discover urlbase from the xml dump file
2015-03-22 20:45:17 +08:00
# /mediawiki/siteinfo/base
base = m.group(3)
2015-04-12 16:21:35 +08:00
urlbase = base[:base.rfind("/")]
2015-03-22 20:45:17 +08:00
elif tag == 'namespace':
knownNamespaces.add(m.group(3))
if re.search('key="10"', line):
templateNamespace = m.group(3)
2015-08-31 03:17:26 +08:00
templatePrefix = templateNamespace + ':'
2015-09-29 21:31:19 +08:00
elif re.search('key="828"', line):
moduleNamespace = m.group(3)
modulePrefix = moduleNamespace + ':'
2015-03-22 20:45:17 +08:00
elif tag == '/siteinfo':
break
2015-04-11 18:29:31 +08:00
if expand_templates:
# preprocess
template_load_start = default_timer()
2015-04-11 18:29:31 +08:00
if template_file and os.path.exists(template_file):
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", template_file)
2015-09-15 00:05:36 +08:00
file = fileinput.FileInput(template_file, openhook=fileinput.hook_compressed)
load_templates(file)
file.close()
2015-04-11 18:29:31 +08:00
else:
2015-06-19 05:49:03 +08:00
if input_file == '-':
# can't scan then reset stdin; must error w/ suggestion to specify template_file
raise ValueError("to use templates with stdin dump, must supply explicit template-file")
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", input_file)
2015-04-11 18:29:31 +08:00
load_templates(input, template_file)
input.close()
2015-10-17 17:34:24 +08:00
input = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed)
template_load_elapsed = default_timer() - template_load_start
logging.info("Loaded %d templates in %.1fs", len(templates), template_load_elapsed)
2015-09-15 00:05:36 +08:00
if out_file == '-':
output = sys.stdout
if file_compress:
2015-11-20 07:04:59 +08:00
logging.warn("writing to stdout, so no output compression (use an external tool)")
2015-09-15 00:05:36 +08:00
else:
nextFile = NextFile(out_file)
output = OutputSplitter(nextFile, file_size, file_compress)
2015-03-22 20:45:17 +08:00
# process pages
logging.info("Starting page extraction from %s.", input_file)
extract_start = default_timer()
2015-03-22 20:45:17 +08:00
2015-09-15 00:05:36 +08:00
# Parallel Map/Reduce:
# - pages to be processed are dispatched to workers
# - a reduce process collects the results, sort them and print them.
maxsize = 10 * process_count
# output queue
2015-09-15 02:24:10 +08:00
output_queue = Queue(maxsize=maxsize)
2015-04-12 16:21:35 +08:00
2015-09-15 00:05:36 +08:00
# Reduce job that sorts and prints output
2015-09-15 02:24:10 +08:00
reduce = Process(target=reduce_process, args=(output_queue, output))
2015-09-15 00:05:36 +08:00
reduce.start()
2015-04-12 16:21:35 +08:00
2015-06-19 05:49:03 +08:00
# initialize jobs queue
2015-09-15 02:24:10 +08:00
jobs_queue = Queue(maxsize=maxsize)
2015-04-12 16:21:35 +08:00
2015-06-19 05:49:03 +08:00
# start worker processes
2015-09-15 00:05:36 +08:00
logging.info("Using %d extract processes.", process_count)
2015-04-12 16:21:35 +08:00
workers = []
2015-06-19 05:49:03 +08:00
for _ in xrange(max(1, process_count)):
2015-09-15 02:24:10 +08:00
extractor = Process(target=extract_process,
args=(jobs_queue, output_queue))
extractor.daemon = True # only live while parent process lives
2015-06-18 09:49:26 +08:00
extractor.start()
2015-04-12 16:21:35 +08:00
workers.append(extractor)
2015-09-29 21:31:19 +08:00
# Mapper process
2015-09-15 00:05:36 +08:00
# we collect individual lines, since str.join() is significantly faster
# than concatenation
2015-03-22 20:45:17 +08:00
page = []
id = None
last_id = None
2015-10-17 17:34:24 +08:00
ordinal = 0 # page count
2015-03-22 20:45:17 +08:00
inText = False
redirect = False
for line in input:
line = line.decode('utf-8')
2015-10-17 17:34:24 +08:00
if '<' not in line: # faster than doing re.search()
2015-03-22 20:45:17 +08:00
if inText:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
redirect = False
elif tag == 'id' and not id:
id = m.group(3)
elif tag == 'title':
title = m.group(3)
elif tag == 'redirect':
redirect = True
elif tag == 'text':
inText = True
line = line[m.start(3):m.end(3)]
page.append(line)
2015-10-17 17:34:24 +08:00
if m.lastindex == 4: # open-close
2015-03-22 20:45:17 +08:00
inText = False
elif tag == '/text':
if m.group(1):
page.append(m.group(1))
inText = False
elif inText:
page.append(line)
elif tag == '/page':
colon = title.find(':')
if (colon < 0 or title[:colon] in acceptedNamespaces) and id != last_id and \
2015-03-22 20:45:17 +08:00
not redirect and not title.startswith(templateNamespace):
2015-09-15 00:05:36 +08:00
job = (id, title, page, ordinal)
2015-10-17 17:34:24 +08:00
jobs_queue.put(job) # goes to any available extract_process
last_id = id
ordinal += 1
2015-03-22 20:45:17 +08:00
id = None
page = []
input.close()
2015-09-15 00:05:36 +08:00
# signal termination
2015-10-17 17:34:24 +08:00
for _ in workers:
2015-09-15 00:05:36 +08:00
jobs_queue.put(None)
# wait for workers to terminate
2015-09-15 02:24:10 +08:00
for w in workers:
w.join()
2015-09-15 00:05:36 +08:00
# signal end of work to reduce process
output_queue.put(None)
# wait for it to finish
2015-09-15 02:24:10 +08:00
reduce.join()
2015-09-15 00:05:36 +08:00
if output != sys.stdout:
output.close()
extract_duration = default_timer() - extract_start
extract_rate = ordinal / extract_duration
2015-09-15 00:05:36 +08:00
logging.info("Finished %d-process extraction of %d articles in %.1fs (%.1f art/s)",
process_count, ordinal, extract_duration, extract_rate)
2015-04-12 16:21:35 +08:00
2015-10-17 17:34:24 +08:00
# ----------------------------------------------------------------------
2015-06-19 05:49:03 +08:00
# Multiprocess support
2015-04-15 00:09:46 +08:00
2015-10-17 17:34:24 +08:00
2015-09-15 00:05:36 +08:00
def extract_process(jobs_queue, output_queue):
"""Pull tuples of raw page content, do CPU/regex-heavy fixup, push finished text
2015-10-17 17:34:24 +08:00
:param jobs_queue: where to get jobs.
2015-09-15 00:05:36 +08:00
:param output_queue: where to queue extracted text for output.
"""
2015-06-18 09:49:26 +08:00
while True:
job = jobs_queue.get() # job is (id, title, page, ordinal)
2015-06-18 09:49:26 +08:00
if job:
2015-10-17 17:34:24 +08:00
out = StringIO() # memory buffer
Extractor(*job[:3]).extract(out) # (id, title, page)
2015-09-15 00:05:36 +08:00
text = out.getvalue()
2015-10-17 17:34:24 +08:00
output_queue.put((job[3], text)) # (ordinal, extracted_text)
2015-06-19 05:49:03 +08:00
out.close()
2015-06-18 09:49:26 +08:00
else:
break
2015-06-19 05:49:03 +08:00
2015-10-17 17:34:24 +08:00
2015-09-15 00:05:36 +08:00
def reduce_process(output_queue, output):
"""Pull finished article text, write series of files (or stdout)
:param output_queue: text to be output.
:param output: file object where to print.
"""
2015-06-18 09:49:26 +08:00
interval_start = default_timer()
2015-09-15 00:05:36 +08:00
period = 100000
# FIXME: use a heap
2015-10-17 17:34:24 +08:00
ordering_buffer = {} # collected pages
next_ordinal = 0 # sequence number of pages
2015-06-19 05:49:03 +08:00
while True:
2015-09-15 00:05:36 +08:00
if next_ordinal in ordering_buffer:
output.write(ordering_buffer.pop(next_ordinal))
next_ordinal += 1
# progress report
if next_ordinal % period == 0:
interval_rate = period / (default_timer() - interval_start)
2015-09-15 02:24:10 +08:00
logging.info("Extracted %d articles (%.1f art/s)",
next_ordinal, interval_rate)
2015-09-15 00:05:36 +08:00
interval_start = default_timer()
else:
# mapper puts None to signal finish
pair = output_queue.get()
if not pair:
2015-04-12 16:21:35 +08:00
break
2015-09-15 00:05:36 +08:00
ordinal, text = pair
ordering_buffer[ordinal] = text
2015-06-19 05:49:03 +08:00
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
# ----------------------------------------------------------------------
# Minimum size of output files
minFileSize = 200 * 1024
2015-10-17 17:34:24 +08:00
2015-03-22 20:45:17 +08:00
def main():
2015-04-20 12:19:32 +08:00
global urlbase, acceptedNamespaces
2015-09-15 00:05:36 +08:00
global expand_templates, templateCache
2015-03-22 20:45:17 +08:00
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
2015-10-17 17:34:24 +08:00
formatter_class=argparse.RawDescriptionHelpFormatter,
2015-03-22 20:45:17 +08:00
description=__doc__)
parser.add_argument("input",
help="XML wiki dump file")
2015-04-20 12:56:29 +08:00
groupO = parser.add_argument_group('Output')
groupO.add_argument("-o", "--output", default="text",
2015-11-20 07:04:59 +08:00
help="directory for extracted files (or '-' for dumping to stdout)")
2015-09-15 00:05:36 +08:00
groupO.add_argument("-b", "--bytes", default="1M",
2015-09-29 21:31:19 +08:00
help="maximum bytes per output file (default %(default)s)",
2015-06-19 05:49:03 +08:00
metavar="n[KMG]")
2015-04-20 12:56:29 +08:00
groupO.add_argument("-c", "--compress", action="store_true",
2015-03-22 20:45:17 +08:00
help="compress output files using bzip")
2015-04-20 12:56:29 +08:00
groupP = parser.add_argument_group('Processing')
groupP.add_argument("--html", action="store_true",
2015-11-20 07:34:23 +08:00
help="produce HTML output, subsumes --links")
2015-04-20 12:56:29 +08:00
groupP.add_argument("-l", "--links", action="store_true",
2015-03-22 20:45:17 +08:00
help="preserve links")
2015-04-20 12:56:29 +08:00
groupP.add_argument("-ns", "--namespaces", default="", metavar="ns1,ns2",
2015-03-22 20:45:17 +08:00
help="accepted namespaces")
2015-04-20 12:56:29 +08:00
groupP.add_argument("--templates",
2015-03-22 20:45:17 +08:00
help="use or create file containing templates")
2015-04-20 12:56:29 +08:00
groupP.add_argument("--no-templates", action="store_false",
2015-04-11 18:29:31 +08:00
help="Do not expand templates")
2015-09-15 00:05:36 +08:00
default_process_count = cpu_count() - 1
2015-06-19 05:49:03 +08:00
parser.add_argument("--processes", type=int, default=default_process_count,
2015-11-20 07:04:59 +08:00
help="Number of processes to use (default %(default)s)")
2015-04-20 12:56:29 +08:00
groupS = parser.add_argument_group('Special')
groupS.add_argument("-q", "--quiet", action="store_true",
help="suppress reporting progress info")
groupS.add_argument("--debug", action="store_true",
help="print debug info")
groupS.add_argument("-a", "--article", action="store_true",
2015-09-15 00:05:36 +08:00
help="analyze a file containing a single article (debug option)")
2015-04-20 12:56:29 +08:00
groupS.add_argument("-v", "--version", action="version",
2015-03-22 20:45:17 +08:00
version='%(prog)s ' + version,
help="print program version")
args = parser.parse_args()
2015-04-15 20:30:55 +08:00
2015-04-20 12:19:32 +08:00
Extractor.keepLinks = args.links
Extractor.toHTML = args.html
if args.html:
Extractor.keepLinks = True
2015-04-11 18:29:31 +08:00
expand_templates = args.no_templates
2015-03-22 20:45:17 +08:00
try:
2015-09-15 00:05:36 +08:00
power = 'kmg'.find(args.bytes[-1].lower()) + 1
file_size = int(args.bytes[:-1]) * 1024 ** power
if file_size < minFileSize:
2015-06-19 05:49:03 +08:00
raise ValueError()
2015-03-22 20:45:17 +08:00
except ValueError:
2015-04-20 12:19:32 +08:00
logging.error('Insufficient or invalid size: %s', args.bytes)
2015-03-22 20:45:17 +08:00
return
if args.namespaces:
2015-05-06 22:08:27 +08:00
acceptedNamespaces = set(args.namespaces.split(','))
2015-03-22 20:45:17 +08:00
2015-04-19 19:17:48 +08:00
FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger()
2015-03-22 20:45:17 +08:00
if not args.quiet:
logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
2015-03-22 20:45:17 +08:00
input_file = args.input
2015-04-20 12:19:32 +08:00
if not Extractor.keepLinks:
2015-03-22 20:45:17 +08:00
ignoreTag('a')
2015-09-15 00:05:36 +08:00
# sharing cache of parser templates is too slow:
2015-10-17 17:34:24 +08:00
# manager = Manager()
# templateCache = manager.dict()
2015-09-15 00:05:36 +08:00
2015-03-22 20:45:17 +08:00
if args.article:
if args.templates:
if os.path.exists(args.templates):
with open(args.templates) as file:
load_templates(file)
with open(input_file) as file:
page = file.read().decode('utf-8')
m = re.search(r'<id>(.*)</id>', page)
2015-04-16 00:20:32 +08:00
id = m.group(1) if m else 0
2015-03-22 20:45:17 +08:00
m = re.search(r'<title>(.*)</title>', page)
if m:
title = m.group(1)
2015-08-31 03:17:26 +08:00
else:
logging.error('Missing title element')
return
2015-09-15 00:05:36 +08:00
Extractor(id, title, [page]).extract(sys.stdout)
2015-03-22 20:45:17 +08:00
return
2015-06-19 05:49:03 +08:00
output_path = args.output
2015-09-15 00:05:36 +08:00
if output_path != '-' and not os.path.isdir(output_path):
2015-03-22 20:45:17 +08:00
try:
2015-06-19 05:49:03 +08:00
os.makedirs(output_path)
2015-03-22 20:45:17 +08:00
except:
2015-06-19 05:49:03 +08:00
logging.error('Could not create: %s', output_path)
2015-03-22 20:45:17 +08:00
return
2015-06-19 05:49:03 +08:00
process_dump(input_file, args.templates, output_path, file_size,
args.compress, args.processes)
2015-06-18 09:49:26 +08:00
2015-03-22 20:45:17 +08:00
if __name__ == '__main__':
main()