#! /usr/bin/env python
#-*- coding: utf-8 -*-
# pyAggr3g470r - A Web based news aggregator.
# Copyright (C) 2010-2014 Cédric Bonhomme - http://cedricbonhomme.org/
#
# For more information : https://bitbucket.org/cedricbonhomme/pyaggr3g470r/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
__author__ = "Cedric Bonhomme"
__version__ = "$Revision: 1.6 $"
__date__ = "$Date: 2010/12/07 $"
__revision__ = "$Date: 2013/11/17 $"
__copyright__ = "Copyright (c) Cedric Bonhomme"
__license__ = "AGPLv3"
#
# This file provides functions used for:
# - the database management;
# - generation of tags cloud;
# - HTML processing;
# - e-mail notifications.
#
import os
import re
import glob
import opml
import operator
import calendar
from BeautifulSoup import BeautifulSoup
from collections import Counter
from contextlib import contextmanager
import conf
from pyaggr3g470r import db
from pyaggr3g470r.models import User, Feed
# regular expression to check URL
url_finders = [ \
re.compile("([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}|(((news|telnet|nttp|file|http|ftp|https)://)|(www|ftp)[-A-Za-z0-9]*\\.)[-A-Za-z0-9\\.]+)(:[0-9]*)?/[-A-Za-z0-9_\\$\\.\\+\\!\\*\\(\\),;:@&=\\?/~\\#\\%]*[^]'\\.}>\\),\\\"]"), \
re.compile("([0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}|(((news|telnet|nttp|file|http|ftp|https)://)|(www|ftp)[-A-Za-z0-9]*\\.)[-A-Za-z0-9\\.]+)(:[0-9]*)?"), \
re.compile("(~/|/|\\./)([-A-Za-z0-9_\\$\\.\\+\\!\\*\\(\\),;:@&=\\?/~\\#\\%]|\\\\)+"), \
re.compile("'\\<((mailto:)|)[-A-Za-z0-9\\.]+@[-A-Za-z0-9\\.]+") \
]
#import log
#pyaggr3g470r_log = log.Log()
@contextmanager
def opened_w_error(filename, mode="r"):
try:
f = open(filename, mode)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
def import_opml(email, opml_file):
"""
Import new feeds from an OPML file.
"""
user = User.query.filter(User.email == email).first()
try:
subscriptions = opml.parse(opml_file)
except Exception as e:
raise e
nb = 0
for subscription in subscriptions:
try:
title = subscription.text
except:
title = ""
try:
description = subscription.description
except:
description = ""
try:
link = subscription.xmlUrl
except:
continue
existing_feed = [feed for feed in user.feeds if feed.link == link]
if len(existing_feed) != 0:
continue
try:
site_link = subscription.htmlUrl
except:
site_link = ""
new_feed = Feed(title=title, description=description, link=link, site_link=site_link, email_notification=False, enabled=True)
user.feeds.append(new_feed)
nb += 1
db.session.commit()
return nb
def open_url(url):
"""
Open an URL with the proxy and the user-agent
specified in the configuration file.
"""
if conf.HTTP_PROXY == "":
proxy = {}
else:
proxy = {"http" : conf.HTTP_PROXY}
opener = urllib.request.FancyURLopener(proxy)
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', conf.USER_AGENT)]
return (True, opener.open(url))
except urllib.error.HTTPError as e:
# server couldn't fulfill the request
error = (url, e.code, \
http.server.BaseHTTPRequestHandler.responses[e.code][1])
#pyaggr3g470r_log.error(url + " " + str(e.code) + " " + http.server.BaseHTTPRequestHandler.responses[e.code][1])
return (False, error)
except urllib.error.URLError as e:
# failed to reach the server
if type(e.reason) == str:
error = (url, e.reason, e.reason)
#pyaggr3g470r_log.error(url + " " + e.reason)
else:
error = (url, e.reason.errno, e.reason.strerror)
#pyaggr3g470r_log.error(url + " " + str(e.reason.errno) + " " + e.reason.strerror)
return (False, error)
def clear_string(data):
"""
Clear a string by removing HTML tags, HTML special caracters
and consecutive white spaces (more that one).
"""
p = re.compile('<[^>]+>') # HTML tags
q = re.compile('\s') # consecutive white spaces
return p.sub('', q.sub(' ', data))
def load_stop_words():
"""
Load the stop words and return them in a list.
"""
stop_words_lists = glob.glob('./pyaggr3g470r/var/stop_words/*.txt')
stop_words = []
for stop_wods_list in stop_words_lists:
with opened_w_error(stop_wods_list, "r") as (stop_wods_file, err):
if err:
stop_words = []
else:
stop_words += stop_wods_file.read().split(";")
return stop_words
def top_words(articles, n=10, size=5):
"""
Return the n most frequent words in a list.
"""
stop_words = load_stop_words()
words = Counter()
wordre = re.compile(r'\b\w{%s,}\b' % size, re.I)
for article in articles:
for word in [elem.lower() for elem in
wordre.findall(clear_string(article.content)) \
if elem.lower() not in stop_words]:
words[word] += 1
return words.most_common(n)
def tag_cloud(tags):
"""
Generates a tags cloud.
"""
tags.sort(key=operator.itemgetter(0))
return '\n'.join([('%s' % \
(min(1 + count * 7 / max([tag[1] for tag in tags]), 7), word, format(count, ',d'), word)) \
for (word, count) in tags])
def search_feed(url):
"""
Search a feed in a HTML page.
"""
soup, page = None, None
try:
result = open_url(url)
if result[0] == True:
page = open_url(url)[1]
else:
return None
soup = BeautifulSoup(page)
except:
return None
feed_links = soup('link', type='application/atom+xml')
feed_links.extend(soup('link', type='application/rss+xml'))
for feed_link in feed_links:
#if url not in feed_link['href']:
#return urllib.parse.urljoin(url, feed_link['href'])
return feed_link['href']
return None