#! /usr/local/bin/python #-*- coding: utf-8 -*- __author__ = "Cedric Bonhomme" __version__ = "$Revision: 0.9 $" __date__ = "$Date: 2010/03/01 $" __copyright__ = "Copyright (c) 2010 Cedric Bonhomme" __license__ = "GPLv3" import os import sqlite3 import cherrypy import threading import ConfigParser from cherrypy.lib.static import serve_file import utils import feedgetter config = ConfigParser.RawConfigParser() config.read("./cfg/pyAggr3g470r.cfg") path = config.get('global','path') bindhost = "0.0.0.0" cherrypy.config.update({ 'server.socket_port': 12556, 'server.socket_host': bindhost}) path = {'/css/style.css': {'tools.staticfile.on': True, \ 'tools.staticfile.filename':path+'css/style.css'}, \ '/css/img/feed-icon-28x28.png': {'tools.staticfile.on': True, \ 'tools.staticfile.filename':path+'css/img/feed-icon-28x28.png'}, \ '/css/img/delicious.png': {'tools.staticfile.on': True, \ 'tools.staticfile.filename':path+'css/img/delicious.png'}, \ '/css/img/digg.png': {'tools.staticfile.on': True, \ 'tools.staticfile.filename':path+'css/img/digg.png'}, \ '/css/img/reddit.png': {'tools.staticfile.on': True, \ 'tools.staticfile.filename':path+'css/img/reddit.png'}, \ '/css/img/scoopeo.png': {'tools.staticfile.on': True, \ 'tools.staticfile.filename':path+'css/img/scoopeo.png'}, \ '/css/img/blogmarks.png': {'tools.staticfile.on': True, \ 'tools.staticfile.filename':path+'css/img/blogmarks.png'}, \ '/var/histogram.png':{'tools.staticfile.on': True, \ 'tools.staticfile.filename':path+'var/histogram.png'}} htmlheader = '\n' + \ '' + \ '\n\tpyAggr3g470r - RSS Feed Reader\n' + \ '\t' + \ '\n\t\n' + \ '\n' htmlfooter = '

This software is under GPLv3 license. You are welcome to copy, modify or' + \ ' redistribute the source code according to the' + \ ' GPLv3 license.

\n' + \ '\n' htmlnav = '\n

pyAggr3g470r - RSS Feed Reader

\n' + \ 'pyAggr3g470r (source code)' class Root: def index(self): """ Main page containing the list of feeds and articles. """ html = htmlheader html += htmlnav html += """
\n""" html += """Management
\n""" html += """Fetch all feeds
\n""" html += """Mark articles as read\n""" html += """
\n""" html += "
\n" html += """Your feeds (%s):
\n""" % len(self.articles.keys()) for rss_feed_id in self.articles.keys(): html += """%s (%s / %s)
\n""" % \ (rss_feed_id.encode('utf-8'), \ self.feeds[rss_feed_id][3].encode('utf-8'), \ rss_feed_id, self.feeds[rss_feed_id][1], \ self.feeds[rss_feed_id][0]) html += """
\n
\n""" for rss_feed_id in self.articles.keys(): html += """

%s

\n""" % \ (rss_feed_id, \ self.feeds[rss_feed_id][5].encode('utf-8'), \ self.feeds[rss_feed_id][3].encode('utf-8'), \ self.feeds[rss_feed_id][4].encode('utf-8'), \ self.feeds[rss_feed_id][2].encode('utf-8')) # The main page display only 10 articles by feeds. for article in self.articles[rss_feed_id][:10]: if article[5] == "0": # not readed articles are in bold not_read_begin = "" not_read_end = "" else: not_read_begin = "" not_read_end = "" html += article[1].encode('utf-8') + \ " - " + not_read_begin + \ """%s""" % \ (rss_feed_id, article[0].encode('utf-8'),article[2].encode('utf-8')) + \ not_read_end + \ "
\n" html += "
\n" html += """All articles""" % (rss_feed_id,) html += """ Mark all as read""" % (rss_feed_id,) if self.feeds[rss_feed_id][1] != 0: html += """ Unread article(s) (%s)""" % (rss_feed_id, \ self.feeds[rss_feed_id][1]) html += """

Top

""" html += "
\n" html += htmlfooter return html index.exposed = True def management(self): """ Management of articles. """ html = htmlheader html += htmlnav html += """
\n""" html += "

Add Feeds

\n" html += """
\n
\n""" html += "

Delete Feeds

\n" html += """
\n""" html += "
\n" html += """

The database contains a total of %s article(s) with %s unread article(s).
""" % \ (self.nb_articles, sum([feed[1] for feed in self.feeds.values()])) html += """Database: %s.\n
Size: %s bytes.

\n""" % \ (os.path.abspath("./var/feed.db"), os.path.getsize("./var/feed.db")) html += """
\n
\n""" html += """
\n
\n""" html += "
\n" if self.articles: html += "

Statistics

\n" if "oice" not in utils.IMPORT_ERROR: nb_french = 0 nb_english = 0 for rss_feed_id in self.articles.keys(): for article in self.articles[rss_feed_id]: if article[6] == 'french': nb_french += 1 elif article[6] == 'english': nb_english += 1 nb_other = self.nb_articles - nb_french - nb_english html += "\n\n\n
" html += "

Words count

\n" html += "
    \n" for word, frequency in self.top_words: html += """\t
  1. %s: %s
  2. \n""" % \ (word, word, frequency) html += "
\n" html += "

Languages

\n" if "oice" in utils.IMPORT_ERROR: html += "Install the module " html += """oice.langdet""" html += "
" else: html += "
    \n" for language in ['english', 'french', 'other']: html += """\t
  • %s articles in %s
  • \n""" % \ (locals()["nb_"+language], language, language) html += "
\n
" html += """
""" html += "
\n" html += htmlfooter return html management.exposed = True def q(self, querystring=None): """ Search for a feed. Simply search for the string 'querystring' in the description of the article. """ param, _, value = querystring.partition(':') feed_id = None if param == "Feed": feed_id, _, querystring = value.partition(':') html = htmlheader html += htmlnav html += """
""" html += """

Articles containing the string %s


""" % (querystring,) if feed_id is not None: for article in self.articles[rss_feed_id]: article_content = utils.remove_html_tags(article[4].encode('utf-8')) if querystring.lower() in article_content.lower(): if article[5] == "0": # not readed articles are in bold not_read_begin = "" not_read_end = "" else: not_read_begin = "" not_read_end = "" html += article[1].encode('utf-8') + \ " - " + not_read_begin + \ """%s""" % \ (feed_id, article[0].encode('utf-8'), article[2].encode('utf-8')) + \ not_read_end else: for rss_feed_id in self.articles.keys(): for article in self.articles[rss_feed_id]: article_content = utils.remove_html_tags(article[4].encode('utf-8')) if querystring.lower() in article_content.lower(): if article[5] == "0": # not readed articles are in bold not_read_begin = "" not_read_end = "" else: not_read_begin = "" not_read_end = "" html += article[1].encode('utf-8') + \ " - " + not_read_begin + \ """%s""" % \ (rss_feed_id, article[0].encode('utf-8'), article[2].encode('utf-8')) + \ not_read_end + """ from %s
\n""" % \ (self.feeds[rss_feed_id][5].encode('utf-8'), \ self.feeds[rss_feed_id][3].encode('utf-8')) html += "
" html += htmlfooter return html q.exposed = True def fetch(self): """ Fetch all feeds """ feed_getter = feedgetter.FeedGetter() feed_getter.retrieve_feed() self.update() return self.index() fetch.exposed = True def description(self, param): """ Display the description of an article in a new Web page. """ try: feed_id, article_id = param.split(':') except: return self.error_page("Bad URL") try: articles_list = self.articles[feed_id] except KeyError: return self.error_page("This feed do not exists.") html = htmlheader html += htmlnav html += """
""" for article in articles_list: if article_id == article[0]: if article[5] == "0": self.mark_as_read("Article:"+article[3]) # update the database html += """

%s from %s

\n
\n""" % \ (article[2].encode('utf-8'), feed_id, \ self.feeds[feed_id][3].encode('utf-8')) description = article[4].encode('utf-8') if description: html += description else: html += "No description available." html += "\n
\n" html += """This article seems to be written in %s.\n""" % \ (article[6], article[6]) html += """
\nPlain text\n""" % \ (feed_id, article_id) html += """
\nComplete story\n
\n""" % \ (article[3].encode('utf-8'),) # Share this article: # on delicious html += """\n    """ % \ (article[3].encode('utf-8'), article[2].encode('utf-8')) # on Digg html += """\n    """ % \ (article[3].encode('utf-8'), article[2].encode('utf-8')) # on reddit html += """\n    """ % \ (article[3].encode('utf-8'), article[2].encode('utf-8')) # on Scoopeo html += """\n    """ % \ (article[3].encode('utf-8'), article[2].encode('utf-8')) # on Blogmarks html += """\n """ % \ (article[3].encode('utf-8'), article[2].encode('utf-8')) break html += "
\n" + htmlfooter return html description.exposed = True def all_articles(self, feed_id): """ Display all articles of a feed. """ try: articles_list = self.articles[feed_id] except KeyError: return self.error_page("This feed do not exists.") html = htmlheader html += htmlnav html += """
\n""" html += """Mark all articles from this feed as read""" % (feed_id,) html += """
\n
\n""" % (feed_id,) html += "
\n" html += """Your feeds (%s):
\n""" % len(self.articles.keys()) for rss_feed_id in self.articles.keys(): html += """%s (%s / %s)
\n""" % \ (rss_feed_id.encode('utf-8'), \ self.feeds[rss_feed_id][3].encode('utf-8'), \ rss_feed_id, self.feeds[rss_feed_id][1], \ self.feeds[rss_feed_id][0]) html += """
""" html += """

Articles of the feed %s


""" % (self.feeds[feed_id][3].encode('utf-8')) for article in articles_list: if article[5] == "0": # not readed articles are in bold not_read_begin = "" not_read_end = "" else: not_read_begin = "" not_read_end = "" html += article[1].encode('utf-8') + \ " - " + not_read_begin + \ """%s""" % \ (feed_id, article[0].encode('utf-8'), article[2].encode('utf-8')) + \ not_read_end + \ "
\n" html += """\n

All feeds

""" html += "
\n" html += htmlfooter return html all_articles.exposed = True def unread(self, feed_id): """ Display all unread articles of a feed. """ html = htmlheader html += htmlnav html += """
""" if feed_id == "All": html += "

Unread article(s)

" for rss_feed_id in self.feeds.keys(): for article in self.articles[rss_feed_id]: if article[5] == "0": html += article[1].encode('utf-8') + \ """ - %s from %s
\n""" % \ (rss_feed_id, article[0].encode('utf-8'), article[2].encode('utf-8'), \ self.feeds[rss_feed_id][5].encode('utf-8'), \ self.feeds[rss_feed_id][3].encode('utf-8')) html += """
\nMark articles as read\n""" else: try: articles_list = self.articles[feed_id] except KeyError: return self.error_page("This feed do not exists.") html += """

Unread article(s) of the feed %s


""" % (feed_id, self.feeds[feed_id][3].encode('utf-8')) for article in articles_list: if article[5] == "0": html += article[1].encode('utf-8') + \ """ - %s""" % \ (feed_id, article[0].encode('utf-8'), article[2].encode('utf-8')) + \ "
\n" html += """
\nMark all as read""" % (feed_id,) html += """\n

All feeds

""" html += "
\n" html += htmlfooter return html unread.exposed = True def language(self, lang): """ Display articles by language. """ if lang not in ['english', 'french', 'other']: return self.error_page('This language is not supported.') html = htmlheader html += htmlnav html += """
""" html += """

Article(s) written in %s

\n
\n""" % (lang,) if "oice" not in utils.IMPORT_ERROR: for rss_feed_id in self.articles.keys(): for article in self.articles[rss_feed_id]: if article[6] == lang: html += article[1].encode('utf-8') + \ """ - %s from %s
\n""" % \ (rss_feed_id, article[0].encode('utf-8'), article[2].encode('utf-8'), \ self.feeds[rss_feed_id][5].encode('utf-8'), \ self.feeds[rss_feed_id][3].encode('utf-8')) else: html += "Install the module " html += """oice.langdet""" html += "
\n" html += htmlfooter return html language.exposed = True def plain_text(self, target): """ Display an article in plain text (without HTML tags). """ try: feed_id, article_id = target.split(':') except: return self.error_page("This article do not exists.") try: articles_list = self.articles[feed_id] except KeyError: return self.error_page("This feed do not exists.") html = htmlheader html += htmlnav html += """
""" feed_id, article_id = target.split(':') for article in articles_list: if article_id == article[0]: html += """

%s from %s

\n
\n"""% \ (article[2].encode('utf-8'), feed_id, \ self.feeds[feed_id][3].encode('utf-8')) description = utils.remove_html_tags(article[4].encode('utf-8')) if description: html += description else: html += "No description available." html += "\n
\n" + htmlfooter return html plain_text.exposed = True def error_page(self, message): """ Display a message (bad feed id, bad article id, etc.) """ html = htmlheader html += htmlnav html += """
""" html += """%s""" % message html += "\n
\n" + htmlfooter return html error_page.exposed = True def mark_as_read(self, target): """ Mark one (or more) article(s) as read by setting the value of the field 'article_readed' of the SQLite database to 1. """ LOCKER.acquire() param, _, identifiant = target.partition(':') try: conn = sqlite3.connect("./var/feed.db", isolation_level = None) c = conn.cursor() # Mark all articles as read. if param == "All": c.execute("UPDATE articles SET article_readed=1") # Mark all articles from a feed as read. elif param == "Feed" or param == "Feed_FromMainPage": c.execute("UPDATE articles SET article_readed=1 WHERE feed_link='" + \ self.feeds[identifiant][4].encode('utf-8') + "'") # Mark an article as read. elif param == "Article": c.execute("UPDATE articles SET article_readed=1 WHERE article_link='" + \ identifiant + "'") conn.commit() c.close() except Exception, e: pass threading.Thread(None, self.update, None, ()).start() if param == "All" or param == "Feed_FromMainPage": return self.index() elif param == "Feed": return self.all_articles(identifiant) LOCKER.release() mark_as_read.exposed = True def update(self): """ Synchronizes transient objects with the database, computes the list of most frequent words and generates the histogram. Called when an article is marked as read or when new articles are fetched. """ self.articles, self.feeds = utils.load_feed() if self.articles != {}: self.nb_articles = sum([feed[0] for feed in self.feeds.values()]) self.top_words = utils.top_words(self.articles, 10) if "pylab" not in utils.IMPORT_ERROR: utils.create_histogram(self.top_words) if __name__ == '__main__': # Point of entry in execution mode LOCKER = threading.Lock() root = Root() root.update() cherrypy.quickstart(root, config=path)