#! /usr/bin/env python
#-*- coding: utf-8 -*-
# jarr - A Web based news aggregator.
# Copyright (C) 2010-2015 Cédric Bonhomme - https://www.JARR-aggregator.org
#
# For more information : https://github.com/JARR-aggregator/JARR/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
This archive has been generated with jarr. A software under AGPLv3 license. You are welcome to copy, modify or redistribute the source code according to the AGPLv3 license.
""" CSS = """body { font:normal medium 'Gill Sans','Gill Sans MT',Verdana,sans-serif; margin:1.20em auto; width:80%; line-height:1.75; } blockquote { font-size:small; line-height:2.153846; margin:2.153846em 0; padding:0;font-style:oblique; border-left:1px dotted; margin-left:2.153846em; padding-left:2.153846em; } blockquote p{ margin:2.153846em 0; } p+br { display:none; } h1 { font-size:large; } h2,h3 { font-size:medium; } hr { border-style:dotted; height:1px; border-width: 1px 0 0 0; margin:1.45em 0 1.4em; padding:0; } a { text-decoration:none; color:#00008B; } #footer { clear:both; text-align:center; font-size:small; } img { border:0; } .horizontal,.simple li { margin:0; padding:0; list-style:none; display:inline } .simple li:before { content:"+ "; } .simple > li:first-child:before { content:""; } .author { text-decoration:none; display:block; float:right; margin-left:2em; font-size:small; } .content { margin:1.00em 1.00em; }""" def export_html(user): """ Export all articles of 'user' in Web pages. """ webzine_root = conf.WEBZINE_ROOT + "webzine/" nb_articles = format(len(models.Article.query.filter(models.Article.user_id == user.id).all()), ",d") index = HTML_HEADER("News archive") index += "%s articles.
\n%s articles.
\n""" % (format(len(feed.articles.all()), ",d"),) for article in feed.articles: post_file_name = os.path.normpath(feed_folder + "/" + str(article.id) + ".html") feed_index = os.path.normpath(feed_folder + "/index.html") posts += article.date.ctime() + " - " + """%s""" % \ (article.id, article.title[:150]) + "" + time.strftime("Generated on %d %b %Y at %H:%M.") + "
\n" index += HTML_FOOTER with open(webzine_root + "index.html", "w") as f: f.write(index) with open(webzine_root + "style.css", "w") as f: f.write(CSS) archive_file_name = datetime.now().strftime('%Y-%m-%d') + '.tar.gz' with tarfile.open(conf.WEBZINE_ROOT + archive_file_name, "w:gz") as tar: tar.add(webzine_root, arcname=os.path.basename(webzine_root)) shutil.rmtree(webzine_root) with open(conf.WEBZINE_ROOT + archive_file_name, 'rb') as export_file: return export_file.read(), archive_file_name def export_json(user): """ Export all articles of 'user' in JSON. """ result = [] for feed in user.feeds: result.append({ "title": feed.title, "description": feed.description, "link": feed.link, "site_link": feed.site_link, "enabled": feed.enabled, "created_date": feed.created_date.strftime('%s'), "articles": [ { "title": article.title, "link": article.link, "content": article.content, "readed": article.readed, "like": article.like, "date": article.date.strftime('%s'), "retrieved_date": article.retrieved_date.strftime('%s') } for article in feed.articles ] }) return jsonify(result=result)