#! /usr/bin/env python #-*- coding: utf-8 -*- # pyAggr3g470r - A Web based news aggregator. # Copyright (C) 2010 Cédric Bonhomme - http://cedricbonhomme.org/ # # For more information : http://bitbucket.org/cedricbonhomme/pyaggr3g470r/ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see __author__ = "Cedric Bonhomme" __version__ = "$Revision: 2.9 $" __date__ = "$Date: 2010/01/29 $" __revision__ = "$Date: 2011/09/26 $" __copyright__ = "Copyright (c) Cedric Bonhomme" __license__ = "GPLv3" # # This file contains the "Root" class which describes # all pages of pyAggr3g470r. These pages are: # - main page; # - management; # - history; # - favorites; # - notifications; # - unread; # - feed summary. # import os import re import time import sqlite3 import cherrypy import calendar import threading from collections import Counter import datetime import utils import feedgetter import PyQRNative def error_page_404(status, message, traceback, version): """ Display an error if the page does not exist. """ html = htmlheader() html += htmlnav html += "

Error %s - This page does not exist." % status html += "\n
\n" + htmlfooter return html def handle_error(): """ Handle different type of errors. """ html = htmlheader() html += htmlnav html += "

Sorry, an error occured" html += "\n
\n" + htmlfooter cherrypy.response.status = 500 cherrypy.response.body = [html] def htmlheader(nb_unread_articles=""): """ Return the header of the HTML page with the number of unread articles in the 'title' HTML tag.. """ return '\n' + \ '' + \ '\n\t'+ nb_unread_articles +'pyAggr3g470r - News aggregator\n' + \ '\t' + \ '\n\t\n' + \ '\n\t\n' + \ '\n' htmlfooter = '

This software is under GPLv3 license. You are welcome to copy, modify or' + \ ' redistribute the source code according to the' + \ ' GPLv3 license.

\n' + \ '\n' htmlnav = '\n

pyAggr3g470r - News aggregator

\n' + \ 'pyAggr3g470r (source code)' class Root: """ Root class. All pages of pyAggr3g470r are described in this class. """ def index(self): """ Main page containing the list of feeds and articles. """ # if there are unread articles, display the number in the tab of the browser html = htmlheader((self.nb_unread_articles and \ ['(' + str(self.nb_unread_articles) +') '] or \ [""])[0]) html += htmlnav html += self.create_right_menu() html += """
\n""" if self.feeds: html += '\n' html += '\n' html += '      \n' html += """\n""" % \ (self.nb_favorites,) html += """\n""" % \ (self.nb_mail_notifications,) html += '      ' if self.nb_unread_articles != 0: html += '\n' html += """\n""" % \ (self.nb_unread_articles,) html += '\n' # The main page display all the feeds. for feed in self.feeds.values(): html += """

%s

\n""" % \ (feed.feed_id, feed.feed_site_link, feed.feed_title, \ feed.feed_link, feed.feed_image) # The main page display only 10 articles by feeds. for article in feed.articles.values()[:10]: if article.article_readed == "0": # not readed articles are in bold not_read_begin, not_read_end = "", "" else: not_read_begin, not_read_end = "", "" # display a heart for faved articles if article.like == "1": like = """ """ else: like = "" # Descrition for the CSS ToolTips article_content = utils.clear_string(article.article_description) if article_content: description = " ".join(article_content.split(' ')[:55]) else: description = "No description." # Title of the article article_title = article.article_title if len(article_title) >= 110: article_title = article_title[:110] + " ..." # a description line per article (date, title of the article and # CSS description tooltips on mouse over) html += article.article_date + " - " + \ """%s%s%s%s""" % \ (feed.feed_id, article.article_id, not_read_begin, \ article_title, not_read_end, description) + like + "
\n" html += "
\n" # some options for the current feed html += """All articles   """ % (feed.feed_id,) html += """Feed summary   """ % (feed.feed_id,) if feed.nb_unread_articles != 0: html += """  Mark all as read""" % (feed.feed_id,) html += """     Unread article(s) (%s)""" % (feed.feed_id, feed.nb_unread_articles) if feed.mail == "0": html += """
\nStay tuned""" % (feed.feed_id,) else: html += """
\nStop staying tuned""" % (feed.feed_id,) html += """

Top

""" html += "
\n" html += htmlfooter return html index.exposed = True def create_right_menu(self): """ Create the right menu. """ html = """
\n""" html += """
\n""" html += "
\n" # insert the list of feeds in the menu html += self.create_list_of_feeds() html += "
\n" return html def create_list_of_feeds(self): """ Create the list of feeds. """ html = """" def management(self, word_size=6, max_nb_articles=5): """ Management page. Allows adding and deleting feeds. Export functions of the SQLite data base and display some statistics. """ html = htmlheader() html += htmlnav html += """
\n""" html += "

Add Feeds

\n" # Form: add a feed html += """
\n
\n""" if self.feeds: # Form: delete a feed html += "

Delete Feeds

\n" html += """
\n""" html += """

Active e-mail notifications: %s

\n""" % \ (self.nb_mail_notifications,) html += """

You like %s article(s).

\n""" % \ (self.nb_favorites, ) html += "
\n" # Informations about the data base of articles html += """

%s article(s) are loaded from the database with %s unread article(s).
""" % \ (self.nb_articles, self.nb_unread_articles) html += """Database: %s.\n
Size: %s bytes.

\n""" % \ (os.path.abspath(utils.sqlite_base), os.path.getsize(utils.sqlite_base)) html += """
\n
\n""" html += """
\n
\n""" html += '
\n' html += "For each feed only load the " html += """\n""" % (max_nb_articles) html += " last articles." if utils.MAX_NB_ARTICLES == -1: html += "
All articles are currently loaded.\n" else: html += "
For each feed only " + str(utils.MAX_NB_ARTICLES) + " articles are currently loaded. " html += 'Load all articles.
\n' html += "
\n" # Export functions html += "

Export articles

\n\n" html += """
\n\t\n
\n""" html += "
\n\n" # Some statistics (most frequent word) if self.feeds: self.top_words = utils.top_words(self.feeds, n=50, size=int(word_size)) html += "

Statistics

\n
\n" # Tags cloud html += 'Minimum size of a word:' html += '
' html += """""" % (word_size) html += '
\n' html += '

Tag cloud

\n' html += '
' + \ utils.tag_cloud(self.top_words) + '
' html += "
\n" html += htmlfooter return html management.exposed = True def q(self, querystring=None): """ Simply search for the string 'querystring' in the description of the article. """ param, _, value = querystring.partition(':') wordre = re.compile(r'\b%s\b' % param, re.I) feed_id = None if param == "Feed": feed_id, _, querystring = value.partition(':') html = htmlheader() html += htmlnav html += """
""" html += """

Articles containing the string %s


""" % (querystring,) if feed_id is not None: for article in self.feeds[feed_id].articles.values(): article_content = utils.clear_string(article.article_description) if not article_content: utils.clear_string(article.article_title) if wordre.findall(article_content) != []: if article.article_readed == "0": # not readed articles are in bold not_read_begin, not_read_end = "", "" else: not_read_begin, not_read_end = "", "" html += article.article_date + " - " + not_read_begin + \ """%s""" % \ (feed_id, article.article_id, article.article_title) + \ not_read_end + """
\n""" else: for feed in self.feeds.values(): new_feed_section = True for article in feed.articles.values(): article_content = utils.clear_string(article.article_description) if not article_content: utils.clear_string(article.article_title) if wordre.findall(article_content) != []: if new_feed_section is True: new_feed_section = False html += """

%s

\n""" % \ (feed.feed_id, feed.feed_title, feed.feed_link, feed.feed_image) if article.article_readed == "0": # not readed articles are in bold not_read_begin, not_read_end = "", "" else: not_read_begin, not_read_end = "", "" # display a heart for faved articles if article.like == "1": like = """ """ else: like = "" # descrition for the CSS ToolTips article_content = utils.clear_string(article.article_description) if article_content: description = " ".join(article_content[:500].split(' ')[:-1]) else: description = "No description." # a description line per article (date, title of the article and # CSS description tooltips on mouse over) html += article.article_date + " - " + \ """%s%s%s%s""" % \ (feed.feed_id, article.article_id, not_read_begin, \ article.article_title[:150], not_read_end, description) + like + "
\n" html += "
" html += htmlfooter return html q.exposed = True def fetch(self): """ Fetch all feeds. """ feed_getter = feedgetter.FeedGetter() feed_getter.retrieve_feed() return self.index() fetch.exposed = True def article(self, param): """ Display the article in parameter in a new Web page. """ try: feed_id, article_id = param.split(':') feed, article = self.feeds[feed_id], self.feeds[feed_id].articles[article_id] except: return self.error_page("Bad URL. This article do not exists.") html = htmlheader() html += htmlnav html += """
""" # Generation of the QR Code for the current article try: os.makedirs("./var/qrcode/") except OSError: pass if not os.path.isfile("./var/qrcode/" + article_id + ".png"): # QR Code generation try: qr = PyQRNative.QRCode(7, PyQRNative.QRErrorCorrectLevel.L) qr.addData(article.article_link) qr.make() im = qr.makeImage() im.save("./var/qrcode/"+article_id+".png", format='png') except Exception, e: # Code length overflow print e if article.article_readed == "0": # if the current article is not yet readed, update the database self.mark_as_read("Article:"+article.article_link) html += '\n
\n' # Title of the article html += """

%s from %s

\n
\n""" % \ (article.article_title, feed_id, feed.feed_title) if article.like == "1": html += """""" % \ (feed_id, article.article_id) else: html += """""" % \ (feed_id, article.article_id) html += """  """ % \ (feed_id, article.article_id) html += "

" # Description (full content) of the article description = article.article_description if description: p = re.compile(r'<') q = re.compile(r'>') description = p.sub('<', description) description = q.sub('>', description) html += description + "\n


" else: html += "No description available.\n


" # Previous and following articles try: following = feed.articles.values()[feed.articles.keys().index(article_id) - 1] html += """
\n""" % \ (feed_id, following.article_id, following.article_title) except: pass try: previous = feed.articles.values()[feed.articles.keys().index(article_id) + 1] except: previous = feed.articles.values()[0] finally: html += """
\n""" % \ (feed_id, previous.article_id, previous.article_title) html += "\n
\n" # Footer menu html += "
\n" html += """\nPlain text\n""" % (feed_id, article.article_id) html += """ - Export to EPUB\n""" % (feed_id, article.article_id) html += """
\nComplete story\n
\n""" % (article.article_link,) # Share this article: html += "Share this article:
\n" # on Diaspora html += """\n\t \n""" % \ (utils.DIASPORA_POD, article.article_link, article.article_title, "via pyAggr3g470r") # on Identi.ca html += """\n\n""" % \ (article.article_title, article.article_link) # on Pinboard html += """\n\n\t\n """ % \ (article.article_link, article.article_title) # on delicious html += """\n\n\t\n """ % \ (article.article_link, article.article_title) # on Digg html += """\n\n\t\n """ % \ (article.article_link, article.article_title) # on reddit html += """\n\n\t\n """ % \ (article.article_link, article.article_title) # on Scoopeo html += """\n\n\t\n """ % \ (article.article_link, article.article_title) # on Blogmarks html += """\n\n\t\n """ % \ (article.article_link, article.article_title) # on Twitter html += """\n\n\t\n""" % \ (article.article_link, article.article_title) # on Google Buzz with counter html += """

\n\n""" % \ (article.article_link,) # Google +1 button html += """\n\n""" % \ (article.article_link,) # QRCode (for smartphone) html += """
\n""" % (article_id,) html += "
\n" + htmlfooter return html article.exposed = True def feed(self, feed_id, word_size=6): """ This page gives summary informations about a feed (number of articles, unread articles, average activity, tag cloud, e-mail notification and favourite articles for the current feed. """ try: feed = self.feeds[feed_id] except KeyError: return self.error_page("This feed do not exists.") html = htmlheader() html += htmlnav html += """
""" html += "

The feed " + feed.feed_title + " contains " + str(feed.nb_articles) + " articles. " html += "Representing " + str((round(float(feed.nb_articles) / self.nb_articles, 4)) * 100) + " % of the total " html += "(" + str(self.nb_articles) + ").

" if feed.articles.values() != []: html += "

" + (feed.nb_unread_articles == 0 and ["All articles are read"] or [str(feed.nb_unread_articles) + \ " unread article" + (feed.nb_unread_articles == 1 and [""] or ["s"])[0]])[0] + ".

" if feed.mail == "1": html += """

You are receiving articles from this feed to the address: %s. """ % \ (utils.mail_to, utils.mail_to) html += """Stop receiving articles from this feed.

""" % \ (feed.feed_id, ) if feed.articles.values() != []: last_article = utils.string_to_datetime(feed.articles.values()[0].article_date) first_article = utils.string_to_datetime(feed.articles.values()[-1].article_date) delta = last_article - first_article delta_today = datetime.datetime.fromordinal(datetime.date.today().toordinal()) - last_article html += "

The last article was posted " + str(abs(delta_today.days)) + " day(s) ago.

" if delta.days > 0: html += """

Daily average: %s,""" % (str(round(float(feed.nb_articles)/abs(delta.days), 2)),) html += """ between the %s and the %s.

\n""" % \ (feed.articles.values()[-1].article_date[:10], feed.articles.values()[0].article_date[:10]) html += "

Recent articles

" for article in feed.articles.values()[:10]: if article.article_readed == "0": # not readed articles are in bold not_read_begin, not_read_end = "", "" else: not_read_begin, not_read_end = "", "" # display a heart for faved articles if article.like == "1": like = """ """ else: like = "" # Descrition for the CSS ToolTips article_content = utils.clear_string(article.article_description) if article_content: description = " ".join(article_content[:500].split(' ')[:-1]) else: description = "No description." # Title of the article article_title = article.article_title if len(article_title) >= 110: article_title = article_title[:110] + " ..." # a description line per article (date, title of the article and # CSS description tooltips on mouse over) html += article.article_date + " - " + \ """%s%s%s%s""" % \ (feed.feed_id, article.article_id, not_read_begin, \ article_title, not_read_end, description) + like + "
\n" html += "
\n" html += """All articles   """ % (feed.feed_id,) favs = [article for article in feed.articles.values() if article.like == "1"] if len(favs) != 0: html += "

Your favorites articles for this feed

" for article in favs: if article.like == "1": # descrition for the CSS ToolTips article_content = utils.clear_string(article.article_description) if article_content: description = " ".join(article_content[:500].split(' ')[:-1]) else: description = "No description." # a description line per article (date, title of the article and # CSS description tooltips on mouse over) html += article.article_date + " - " + \ """%s%s
\n""" % \ (feed.feed_id, article.article_id, article.article_title[:150], description) # This section enables the user to edit informations about # the current feed: # - feed logo; # - feed name; # - URL of the feed (not the site); html += "
\n

Edit this feed

\n" html += '\n\n
' + \ '' + \ """
\n""" % \ (feed.feed_link,) html += '\n\n
' + \ '' + \ """
\n""" % \ (feed.feed_link,) html += '\n\n
' + \ '' + \ """
\n""" % \ (feed.feed_link,) dic = {} dic[feed.feed_id] = self.feeds[feed.feed_id] top_words = utils.top_words(dic, n=50, size=int(word_size)) html += "

Tag cloud

\n
\n" # Tags cloud html += 'Minimum size of a word:' html += """
""" % (feed.feed_id,) html += """""" % (word_size,) html += '
\n' html += '
' + \ utils.tag_cloud(top_words) + '
' html += "
" html += "
" html += htmlfooter return html feed.exposed = True def articles(self, feed_id): """ This page displays all articles of a feed. """ try: feed = self.feeds[feed_id] except KeyError: return self.error_page("This feed do not exists.") html = htmlheader() html += htmlnav html += """
\n""" html += """Mark all articles from this feed as read""" % (feed_id,) html += """
\n
\n""" % ("Feed:"+feed_id,) html += "
\n" html += self.create_list_of_feeds() html += """
""" html += """

Articles of the feed %s


""" % (feed.feed_title) for article in feed.articles.values(): if article.article_readed == "0": # not readed articles are in bold not_read_begin, not_read_end = "", "" else: not_read_begin, not_read_end = "", "" if article.like == "1": like = """ """ else: like = "" # descrition for the CSS ToolTips article_content = utils.clear_string(article.article_description) if article_content: description = " ".join(article_content[:500].split(' ')[:-1]) else: description = "No description." # a description line per article (date, title of the article and # CSS description tooltips on mouse over) html += article.article_date + " - " + \ """%s%s%s%s""" % \ (feed.feed_id, article.article_id, not_read_begin, \ article.article_title[:150], not_read_end, description) + like + "
\n" html += """\n

All feeds

""" html += "
\n" html += htmlfooter return html articles.exposed = True def unread(self, feed_id=""): """ This page displays all unread articles of a feed. """ html = htmlheader() html += htmlnav html += """
""" if self.nb_unread_articles != 0: if feed_id == "": html += "

Unread article(s)

" html += """\n
\nMark articles as read\n
\n""" for feed in self.feeds.values(): new_feed_section = True nb_unread = 0 for article in feed.articles.values(): if article.article_readed == "0": nb_unread += 1 if new_feed_section is True: new_feed_section = False html += """

%s

\n""" % \ (feed.feed_id, feed.feed_site_link, feed.feed_title, feed.feed_link, feed.feed_image) # descrition for the CSS ToolTips article_content = utils.clear_string(article.article_description) if article_content: description = " ".join(article_content[:500].split(' ')[:-1]) else: description = "No description." # a description line per article (date, title of the article and # CSS description tooltips on mouse over) html += article.article_date + " - " + \ """%s%s
\n""" % \ (feed.feed_id, article.article_id, article.article_title[:150], description) if nb_unread == feed.nb_unread_articles: html += """
\nMark all articles from this feed as read\n""" % \ (feed.feed_id,) html += """
\nMark articles as read\n""" else: try: feed = self.feeds[feed_id] except: self.error_page("This feed do not exists.") html += """

Unread article(s) of the feed %s


""" % (feed.feed_id, feed.feed_title) for article in feed.articles.values(): if article.article_readed == "0": # descrition for the CSS ToolTips article_content = utils.clear_string(article.article_description) if article_content: description = " ".join(article_content[:500].split(' ')[:-1]) else: description = "No description." # a description line per article (date, title of the article and # CSS description tooltips on mouse over) html += article.article_date + " - " + \ """%s%s
\n""" % \ (feed.feed_id, article.article_id, article.article_title[:150], description) html += """
\nMark all as read""" % (feed.feed_id,) else: html += '

No unread article(s)

\n
\nWhy not check for news?' html += """\n

All feeds

""" html += "
\n" html += htmlfooter return html unread.exposed = True def history(self, querystring="all", m=""): """ This page enables to browse articles chronologically. """ html = htmlheader() html += htmlnav html += """
\n""" # Get the date from the tag cloud # Format: /history/?querystring=year:2011-month:06 to get the # list of articles of June, 2011. if m != "": querystring = """year:%s-month:%s""" % tuple(m.split('-')) if querystring == "all": html += "

Search with tags cloud

\n" html += "

Choose a year


\n" if "year" in querystring: the_year = querystring.split('-')[0].split(':')[1] if "month" not in querystring: html += "

Choose a month for " + the_year + "


\n" if "month" in querystring: the_month = querystring.split('-')[1].split(':')[1] html += "

Articles of "+ calendar.month_name[int(the_month)] + \ ", "+ the_year +".


\n" timeline = Counter() for feed in self.feeds.values(): new_feed_section = True for article in feed.articles.values(): if querystring == "all": timeline[article.article_date.split(' ')[0].split('-')[0]] += 1 elif querystring[:4] == "year": if article.article_date.split(' ')[0].split('-')[0] == the_year: timeline[article.article_date.split(' ')[0].split('-')[1]] += 1 if "month" in querystring: if article.article_date.split(' ')[0].split('-')[1] == the_month: if article.article_readed == "0": # not readed articles are in bold not_read_begin, not_read_end = "", "" else: not_read_begin, not_read_end = "", "" if article.like == "1": like = """ """ else: like = "" # Descrition for the CSS ToolTips article_content = utils.clear_string(article.article_description) if article_content: description = " ".join(article_content[:500].split(' ')[:-1]) else: description = "No description." # Title of the article article_title = article.article_title if len(article_title) >= 110: article_title = article_title[:110] + " ..." if new_feed_section is True: new_feed_section = False html += """

%s

\n""" % \ (feed.feed_id, feed.feed_site_link, feed.feed_title, feed.feed_link, feed.feed_image) html += article.article_date.split(' ')[0][-2:] + " (" + \ article.article_date.split(' ')[1] + ") - " + \ """%s%s%s%s""" % \ (feed.feed_id, article.article_id, not_read_begin, \ article_title, not_read_end, description) + like + "
\n" if querystring == "all": query = "year" elif "year" in querystring: query = "year:" + the_year + "-month" if "month" not in querystring: html += '
' + \ utils.tag_cloud([(elem, timeline[elem]) for elem in timeline.keys()], query) + '
' html += '

Search with a month+year picker

\n' html += '
\n\t\n\t\n
' html += '
' html += htmlfooter return html history.exposed = True def plain_text(self, target): """ Display an article in plain text (without HTML tags). """ try: feed_id, article_id = target.split(':') feed, article = self.feeds[feed_id], self.feeds[feed_id].articles[article_id] except: return self.error_page("Bad URL. This article do not exists.") html = htmlheader() html += htmlnav html += """
""" html += """

%s from %s

\n
\n"""% \ (article.article_title, feed_id, feed.feed_title) description = utils.clear_string(article.article_description) if description: html += description else: html += "No description available." html += "\n
\n" + htmlfooter return html plain_text.exposed = True def error_page(self, message): """ Display a message (bad feed id, bad article id, etc.) """ html = htmlheader() html += htmlnav html += """
""" html += """%s""" % message html += "\n
\n" + htmlfooter return html error_page.exposed = True def mark_as_read(self, target=""): """ Mark one (or more) article(s) as read by setting the value of the field 'article_readed' of the SQLite database to 1. """ param, _, identifiant = target.partition(':') try: LOCKER.acquire() conn = sqlite3.connect(utils.sqlite_base, isolation_level = None) c = conn.cursor() # Mark all articles as read. if param == "": c.execute("UPDATE articles SET article_readed=1 WHERE article_readed='0'") # Mark all articles from a feed as read. elif param == "Feed" or param == "Feed_FromMainPage": c.execute("UPDATE articles SET article_readed=1 WHERE article_readed='0' AND feed_link='" + \ self.feeds[identifiant].feed_link + "'") # Mark an article as read. elif param == "Article": c.execute("UPDATE articles SET article_readed=1 WHERE article_link='" + identifiant + "'") conn.commit() c.close() except Exception: self.error_page("Impossible to mark this article as read.") finally: LOCKER.release() if param == "" or param == "Feed_FromMainPage": return self.index() elif param == "Feed": return self.articles(identifiant) mark_as_read.exposed = True def notifications(self): """ List all active e-mail notifications. """ html = htmlheader() html += htmlnav html += """
""" html += "

You are receiving e-mails for the following feeds:

\n" for feed in self.feeds.values(): if feed.mail == "1": html += """\t%s - Stop
\n""" % \ (feed.feed_id, feed.feed_title, feed.feed_id) html += """

Notifications are sent to: %s

""" % \ (utils.mail_to, utils.mail_to) html += "\n
\n" + htmlfooter return html notifications.exposed = True def mail_notification(self, param): """ Enable or disable to notifications of news for a feed. """ try: action, feed_id = param.split(':') feed = self.feeds[feed_id] except: return self.error_page("Bad URL. This feed do not exists.") conn = sqlite3.connect(utils.sqlite_base, isolation_level = None) try: c = conn.cursor() c.execute("""UPDATE feeds SET mail=%s WHERE feed_site_link='%s'""" % (action, self.feeds[feed_id].feed_site_link)) except: return self.error_page("Error") finally: conn.commit() c.close() return self.index() mail_notification.exposed = True def like(self, param): """ Mark or unmark an article as favorites. """ try: action, feed_id, article_id = param.split(':') article = self.feeds[feed_id].articles[article_id] except: return self.error_page("Bad URL. This article do not exists.") conn = sqlite3.connect(utils.sqlite_base, isolation_level = None) try: c = conn.cursor() c.execute("""UPDATE articles SET like=%s WHERE article_link='%s'""" % (action, article.article_link)) except Exception: self.error_page("Impossible to like/dislike this article (database error).") finally: conn.commit() c.close() return self.article(feed_id+":"+article_id) like.exposed = True def favorites(self): """ List of favorites articles """ html = htmlheader() html += htmlnav html += """
""" html += "

Your favorites articles

" for feed in self.feeds.values(): new_feed_section = True for article in feed.articles.values(): if article.like == "1": if new_feed_section is True: new_feed_section = False html += """

%s

\n""" % \ (feed.feed_id, feed.feed_site_link, feed.feed_title, feed.feed_link, feed.feed_image) # descrition for the CSS ToolTips article_content = utils.clear_string(article.article_description) if article_content: description = " ".join(article_content[:500].split(' ')[:-1]) else: description = "No description." # a description line per article (date, title of the article and # CSS description tooltips on mouse over) html += article.article_date + " - " + \ """%s%s
\n""" % \ (feed.feed_id, article.article_id, article.article_title[:150], description) html += "
\n" html += htmlfooter return html favorites.exposed = True def add_feed(self, url): """ Add a new feed with the URL of a page. """ html = htmlheader() html += htmlnav html += """
""" # search the feed in the HTML page with BeautifulSoup feed_url = utils.search_feed(url) if feed_url is None: return self.error_page("Impossible to find a feed at this URL.") # if a feed exists else: result = utils.add_feed(feed_url) # if the feed is not in the file feed.lst if result is False: html += "

You are already following this feed!

" else: html += """

Feed added. You can now fetch your feeds.

""" html += """\n
\nBack to the management page.
\n""" html += "
\n" html += htmlfooter return html add_feed.exposed = True def remove_feed(self, feed_id): """ Remove a feed from the file feed.lst and from the SQLite base. """ html = htmlheader() html += htmlnav html += """
""" try: utils.remove_feed(self.feeds[feed_id].feed_link) html += """

All articles from the feed %s are now removed from the base.


""" % \ (self.feeds[feed_id].feed_title,) except: return self.error_page("This feed do not exists.") html += """Back to the management page.
\n""" html += "
\n" html += htmlfooter return html remove_feed.exposed = True def change_feed_url(self, new_feed_url, old_feed_url): """ Enables to change the URL of a feed already present in the database. """ html = htmlheader() html += htmlnav html += """
""" utils.change_feed_url(old_feed_url, new_feed_url) html += "

The URL of the feed has been changed.

" html += "
\n" html += htmlfooter return html change_feed_url.exposed = True def change_feed_name(self, feed_url, new_feed_name): """ Enables to change the name of a feed. """ html = htmlheader() html += htmlnav html += """
""" utils.change_feed_name(feed_url, new_feed_name) html += "

The name of the feed has been changed.

" html += "
\n" html += htmlfooter return html change_feed_name.exposed = True def change_feed_logo(self, feed_url, new_feed_logo): """ Enables to change the name of a feed. """ html = htmlheader() html += htmlnav html += """
""" utils.change_feed_logo(feed_url, new_feed_logo) html += "

The logo of the feed has been changed.

" html += "
\n" html += htmlfooter return html change_feed_logo.exposed = True def set_max_articles(self, max_nb_articles=1): """ Enables to set the maximum of articles to be loaded per feed from the data base. """ if max_nb_articles < -1 or max_nb_articles == 0: max_nb_articles = 1 utils.MAX_NB_ARTICLES = int(max_nb_articles) self.update() return self.management() set_max_articles.exposed = True def delete_article(self, param): """ Delete an article. """ try: feed_id, article_id = param.split(':') article = self.feeds[feed_id].articles[article_id] except: return self.error_page("Bad URL. This article do not exists.") try: conn = sqlite3.connect(utils.sqlite_base, isolation_level = None) c = conn.cursor() c.execute("DELETE FROM articles WHERE article_link='" + article.article_link +"'") except Exception, e: return e finally: conn.commit() c.close() return self.index() delete_article.exposed = True def drop_base(self): """ Delete all articles. """ utils.drop_base() return self.management() drop_base.exposed = True # # Export functions # def export(self, export_method): """ Export articles stored in the SQLite database in text (raw or HTML) files. """ for feed in self.feeds.values(): # creates folder for each stream folder = utils.path + "/var/export/" + \ utils.normalize_filename(feed.feed_title.strip().replace(':', '').lower()) try: os.makedirs(folder) except OSError: # directories already exists (not a problem) pass for article in feed.articles.values(): name = article.article_date.strip().replace(' ', '_') # Export all articles in HTML format if export_method == "export_HTML": name = os.path.normpath(folder + "/" + name + ".html") content = htmlheader() content += '\n
\n' content += """

%s


""" % \ (article.article_link, article.article_title) content += article.article_description content += "
\n
\n" content += htmlfooter # Export for dokuwiki # example: http://wiki.cedricbonhomme.org/doku.php/news-archives elif export_method == "export_dokuwiki": name = os.path.normpath(folder + "/" + name.replace(':', '-') + ".txt") content = "" content += '\n
\n' content += """

%s


""" % \ (article.article_link, article.article_title) content += article.article_description content += '
\n
Generated with pyAggr3g470r\n' # Export all articles in raw text elif export_method == "export_TXT": content = "Title: " + article.article_title + "\n\n\n" content += utils.clear_string(article.article_description) name = os.path.normpath(folder + "/" + name + ".txt") with open(name, "w") as f: f.write(content) return self.management() export.exposed = True def epub(self, param): """ Export an article to EPUB. """ try: from epub import ez_epub except Exception, e: return self.error_page(e) try: feed_id, article_id = param.split(':') except: return self.error_page("Bad URL.") try: feed = self.feeds[feed_id] article = feed.articles[article_id] except: self.error_page("This article do not exists.") try: folder = utils.path + "/var/export/epub/" os.makedirs(folder) except OSError: # directories already exists (not a problem) pass section = ez_epub.Section() section.title = article.article_title.decode('utf-8') section.paragraphs = [utils.clear_string(article.article_description).decode('utf-8')] ez_epub.makeBook(article.article_title.decode('utf-8'), [feed.feed_title.decode('utf-8')], [section], \ os.path.normpath(folder) + "article.epub", lang='en-US', cover=None) return self.article(param) epub.exposed = True # # Monitoring functions # def update(self, path=None, event = None): """ Synchronizes transient objects (dictionary of feed and articles) with the database. Called when a changes in the database is detected. """ self.feeds, \ self.nb_articles, self.nb_unread_articles, \ self.nb_favorites, self.nb_mail_notifications = utils.load_feed() if self.feeds != {}: print "Base (%s) loaded" % utils.sqlite_base else: print "Base (%s) empty!" % utils.sqlite_base def watch_base(self): """Monitor a file. Detect the changes in base of feeds. When a change is detected, reload the base. """ mon = gamin.WatchMonitor() time.sleep(10) mon.watch_file(utils.sqlite_base, self.update) ret = mon.event_pending() try: print "Watching %s" % utils.sqlite_base while True: ret = mon.event_pending() if ret > 0: print "The base of feeds (%s) has changed.\nReloading..." % utils.sqlite_base ret = mon.handle_one_event() time.sleep(1) except KeyboardInterrupt: pass print "Stop watching", sqlite_base mon.stop_watch(sqlite_base) del mon def watch_base_classic(self): """ Monitor the base of feeds if the module gamin is not installed. """ time.sleep(10) old_time = os.path.getmtime(utils.sqlite_base) try: print "Watching %s" % utils.sqlite_base while True: time.sleep(1) # simple test (date of last modification: getmtime) if os.path.getmtime(utils.sqlite_base) != old_time: print "The base of feeds (%s) has changed.\nReloading..." % utils.sqlite_base self.update() old_time = os.path.getmtime(utils.sqlite_base) except KeyboardInterrupt: pass print "Stop watching", utils.sqlite_base if __name__ == '__main__': # Point of entry in execution mode print "Launching pyAggr3g470r..." LOCKER = threading.Lock() root = Root() root.favicon_ico = cherrypy.tools.staticfile.handler(filename=os.path.join(utils.path + "/img/favicon.png")) cherrypy.config.update({ 'server.socket_port': 12556, 'server.socket_host': "0.0.0.0"}) cherrypy.config.update({'error_page.404': error_page_404}) _cp_config = {'request.error_response': handle_error} if not os.path.isfile(utils.sqlite_base): # create the SQLite base if not exists print "Creating data base..." utils.create_base() # load the informations from base in memory print "Loading informations from data base..." root.update() # launch the available base monitoring method (gamin or classic) try: import gamin thread_watch_base = threading.Thread(None, root.watch_base, None, ()) except: print "The gamin module is not installed." print "The base of feeds will be monitored with the simple method." thread_watch_base = threading.Thread(None, root.watch_base_classic, None, ()) thread_watch_base.setDaemon(True) thread_watch_base.start() cherrypy.quickstart(root, "/" ,config=utils.path + "/cfg/cherrypy.cfg")