Skip to content

Commit

Permalink
Added dev branch features.
Browse files Browse the repository at this point in the history
Bumped to version 0.6
  • Loading branch information
buriy committed Jul 26, 2015
1 parent 1546587 commit 24bb20c
Show file tree
Hide file tree
Showing 9 changed files with 223 additions and 173 deletions.
4 changes: 4 additions & 0 deletions README
Expand Up @@ -34,6 +34,9 @@ Command-line usage::

python -m readability.readability -u http://pypi.python.org/pypi/readability-lxml

To open resulting page in browser::

python -m readability.readability -b -u http://pypi.python.org/pypi/readability-lxml

Using positive/negative keywords example::

Expand All @@ -56,3 +59,4 @@ Updates
- 0.3 Added Document.encoding, positive_keywords and negative_keywords
- 0.4 Added Videos loading and allowed more images per paragraph
- 0.5 Preparing a release to support Python versions 2.6, 2.7, 3.3 and 3.4
- 0.6 Finally a release which supports Python versions 2.6, 2.7, 3.3 and 3.4
20 changes: 20 additions & 0 deletions readability/browser.py
@@ -0,0 +1,20 @@
def open_in_browser(html):
"""
Open the HTML document in a web browser, saving it to a temporary
file to open it. Note that this does not delete the file after
use. This is mainly meant for debugging.
"""
import os
import webbrowser
import tempfile
handle, fn = tempfile.mkstemp(suffix='.html')
f = os.fdopen(handle, 'wb')
try:
f.write(b"<meta charset='UTF-8' />")
f.write(html.encode('utf-8'))
finally:
# we leak the file itself here, but we should at least close it
f.close()
url = 'file://' + fn.replace(os.path.sep, '/')
webbrowser.open(url)
return url
5 changes: 3 additions & 2 deletions readability/cleaners.py
Expand Up @@ -2,7 +2,7 @@
import re
from lxml.html.clean import Cleaner

bad_attrs = ['style', '[-a-z]*color', 'background[-a-z]*', 'on*']
bad_attrs = ['width', 'height', 'style', '[-a-z]*color', 'background[-a-z]*', 'on*']
single_quoted = "'[^']+'"
double_quoted = '"[^"]+"'
non_space = '[^ "\'>]+'
Expand All @@ -20,7 +20,8 @@ def clean_attributes(html):
return html

def normalize_spaces(s):
if not s: return ''
if not s:
return ''
"""replace any sequence of whitespace
characters with a single space"""
return ' '.join(s.split())
Expand Down
5 changes: 5 additions & 0 deletions readability/compat/__init__.py
Expand Up @@ -4,3 +4,8 @@
It mainly exists because their are certain incompatibilities in the Python
syntax that can only be solved by conditionally importing different functions.
"""
import sys
if sys.version_info[0] == 2:
str_ = unicode
elif sys.version_info[0] == 3:
str_ = str
62 changes: 45 additions & 17 deletions readability/debug.py
@@ -1,25 +1,53 @@
def save_to_file(text, filename):
f = open(filename, 'wt')
f.write('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />')
f.write(text.encode('utf-8'))
f.close()
import re

uids = {}
def describe(node, depth=2):

#FIXME: use with caution, can leak memory
uids = {}
uids_document = None


def describe_node(node):
global uids
if node is None:
return ''
if not hasattr(node, 'tag'):
return "[%s]" % type(node)
name = node.tag
if node.get('id', ''): name += '#'+node.get('id')
if node.get('class', ''):
name += '.' + node.get('class').replace(' ','.')
if node.get('id', ''):
name += '#' + node.get('id')
if node.get('class', ''):
name += '.' + node.get('class').replace(' ', '.')
if name[:4] in ['div#', 'div.']:
name = name[3:]
if name in ['tr', 'td', 'div', 'p']:
if not node in uids:
uid = uids[node] = len(uids)+1
else:
uid = uids.get(node)
name += "%02d" % (uid)
if depth and node.getparent() is not None:
return name+' - '+describe(node.getparent(), depth-1)
uid = uids.get(node)
if uid is None:
uid = uids[node] = len(uids) + 1
name += "{%02d}" % uid
return name


def describe(node, depth=2):
global uids, uids_document
doc = node.getroottree().getroot()
if doc != uids_document:
uids = {}
uids_document = doc

#return repr(NodeRepr(node))
parent = ''
if depth and node.getparent() is not None:
parent = describe(node.getparent(), depth=depth - 1)
return parent + '/' + describe_node(node)


RE_COLLAPSE_WHITESPACES = re.compile('\s+', re.U)


def text_content(elem, length=40):
content = RE_COLLAPSE_WHITESPACES.sub(' ', elem.text_content().replace('\r', ''))
if len(content) < length:
return content
return content[:length] + '...'


64 changes: 36 additions & 28 deletions readability/encoding.py
Expand Up @@ -2,15 +2,34 @@
import chardet
import sys


RE_CHARSET = re.compile(br'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
RE_PRAGMA = re.compile(br'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
RE_XML = re.compile(br'^<\?xml.*?encoding=["\']*(.+?)["\'>]')

CHARSETS = {
'big5': 'big5hkscs',
'gb2312': 'gb18030',
'ascii': 'utf-8',
'maccyrillic': 'cp1251',
'win1251': 'cp1251',
'win-1251': 'cp1251',
'windows-1251': 'cp1251',
}

def fix_charset(encoding):
"""Overrides encoding when charset declaration
or charset determination is a subset of a larger
charset. Created because of issues with Chinese websites"""
encoding = encoding.lower()
return CHARSETS.get(encoding, encoding)


def get_encoding(page):
# Regex for XML and HTML Meta charset declaration
charset_re = re.compile(br'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(br'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(br'^<\?xml.*?encoding=["\']*(.+?)["\'>]')

declared_encodings = (charset_re.findall(page) +
pragma_re.findall(page) +
xml_re.findall(page))
declared_encodings = (RE_CHARSET.findall(page) +
RE_PRAGMA.findall(page) +
RE_XML.findall(page))

# Try any declared encodings
for declared_encoding in declared_encodings:
Expand All @@ -21,34 +40,23 @@ def get_encoding(page):
# ever use non-ascii characters in the name of an encoding.
declared_encoding = declared_encoding.decode('ascii', 'replace')

page.decode(custom_decode(declared_encoding))
return custom_decode(declared_encoding)
encoding = fix_charset(declared_encoding)

# Now let's decode the page
page.decode()
# It worked!
return encoding
except UnicodeDecodeError:
pass

# Fallback to chardet if declared encodings fail
text = re.sub(b'</?[^>]*>\s*', b' ', page)
# Remove all HTML tags, and leave only text for chardet
text = re.sub(b'(\s*</?[^>]*>)+\s*', b' ', page).strip()
enc = 'utf-8'
if not text.strip() or len(text) < 10:
if len(text) < 10:
return enc # can't guess
res = chardet.detect(text)
enc = res['encoding'] or 'utf-8'
#print '->', enc, "%.2f" % res['confidence']
enc = custom_decode(enc)
enc = fix_charset(enc)
return enc

def custom_decode(encoding):
"""Overrides encoding when charset declaration
or charset determination is a subset of a larger
charset. Created because of issues with Chinese websites"""
encoding = encoding.lower()
alternates = {
'big5': 'big5hkscs',
'gb2312': 'gb18030',
'ascii': 'utf-8',
'MacCyrillic': 'cp1251',
}
if encoding in alternates:
return alternates[encoding]
else:
return encoding
34 changes: 20 additions & 14 deletions readability/htmls.py
Expand Up @@ -5,26 +5,25 @@

from .cleaners import normalize_spaces, clean_attributes
from .encoding import get_encoding
from .compat import str_

utf8_parser = lxml.html.HTMLParser(encoding='utf-8')

if sys.version_info[0] == 2:
str = unicode

def build_doc(page):
if isinstance(page, str):
enc = None
page_unicode = page
if isinstance(page, str_):
encoding = None
decoded_page = page
else:
enc = get_encoding(page) or 'utf-8'
page_unicode = page.decode(enc, 'replace')
doc = lxml.html.document_fromstring(page_unicode.encode('utf-8', 'replace'), parser=utf8_parser)
return doc, enc
encoding = get_encoding(page) or 'utf-8'
decoded_page = page.decode(encoding, 'replace')

# XXX: we have to do .decode and .encode even for utf-8 pages to remove bad characters
doc = lxml.html.document_fromstring(decoded_page.encode('utf-8', 'replace'), parser=utf8_parser)
return doc, encoding

def js_re(src, pattern, flags, repl):
return re.compile(pattern, flags).sub(src, repl.replace('$', '\\'))


def normalize_entities(cur_title):
entities = {
u'\u2014':'-',
Expand Down Expand Up @@ -58,6 +57,10 @@ def add_match(collection, text, orig):
if text.replace('"', '') in orig.replace('"', ''):
collection.add(text)

TITLE_CSS_HEURISTICS = ['#title', '#head', '#heading', '.pageTitle',
'.news_title', '.title', '.head', '.heading',
'.contentheading', '.small_header_red']

def shorten_title(doc):
title = doc.find('.//title')
if title is None or title.text is None or len(title.text) == 0:
Expand All @@ -74,7 +77,7 @@ def shorten_title(doc):
if e.text_content():
add_match(candidates, e.text_content(), orig)

for item in ['#title', '#head', '#heading', '.pageTitle', '.news_title', '.title', '.head', '.heading', '.contentheading', '.small_header_red']:
for item in TITLE_CSS_HEURISTICS:
for e in doc.cssselect(item):
if e.text:
add_match(candidates, e.text, orig)
Expand Down Expand Up @@ -107,8 +110,11 @@ def shorten_title(doc):
return title

def get_body(doc):
[ elem.drop_tree() for elem in doc.xpath('.//script | .//link | .//style') ]
raw_html = str(tostring(doc.body or doc))
for elem in doc.xpath('.//script | .//link | .//style'):
elem.drop_tree()
# tostring() always return utf-8 encoded string
# FIXME: isn't better to use tounicode?
raw_html = str_(tostring(doc.body or doc))
cleaned = clean_attributes(raw_html)
try:
#BeautifulSoup(cleaned) #FIXME do we really need to try loading it?
Expand Down

0 comments on commit 24bb20c

Please sign in to comment.