aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCédric Bonhomme <cedric@cedricbonhomme.org>2016-11-08 14:39:47 +0100
committerCédric Bonhomme <cedric@cedricbonhomme.org>2016-11-08 14:39:47 +0100
commit2d72f44a90a76fe7450e59fdfdf4d42f44b9cd96 (patch)
tree39895c10f68cf0b13d957073268769d04aa924a0
parentCloses section HTML tag. (diff)
downloadnewspipe-2d72f44a90a76fe7450e59fdfdf4d42f44b9cd96.tar.gz
newspipe-2d72f44a90a76fe7450e59fdfdf4d42f44b9cd96.tar.bz2
newspipe-2d72f44a90a76fe7450e59fdfdf4d42f44b9cd96.zip
various improvements to the crawler (better use of coroutines, test if an article should be updated). tags are now retrieved for the k-means clustering (previously achived with the content of articles)
-rw-r--r--CHANGELOG.rst6
-rw-r--r--migrations/versions/2c5cc05216fa_adding_tag_handling_capacities.py30
-rw-r--r--src/conf.py18
-rw-r--r--src/conf/conf.cfg-sample2
-rw-r--r--src/crawler/classic_crawler.py42
-rw-r--r--src/web/controllers/abstract.py7
-rw-r--r--src/web/controllers/article.py29
-rw-r--r--src/web/lib/article_utils.py228
-rw-r--r--src/web/lib/feed_utils.py5
-rwxr-xr-xsrc/web/lib/misc_utils.py2
-rw-r--r--src/web/lib/utils.py24
-rw-r--r--src/web/models/__init__.py3
-rw-r--r--src/web/models/article.py9
-rw-r--r--src/web/models/tag.py22
-rw-r--r--src/web/models/user.py2
15 files changed, 297 insertions, 132 deletions
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 711425bc..6994ec92 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -8,7 +8,11 @@ Release History
* the new name of JARR is now Newspipe;
* the user can now add its twitter link through the profile page;
* it is now possible to edit the visibility of a feed (if it should be
- listed in the list of the user's public profile).
+ listed in the list of the user's public profile);
+ * tags of articles are now retrieved in order to use k-means clustering
+ on tags (will be faster than on the article's content);
+ * various improvements to the crawler (test if an article should be
+ updated and better use of coroutines).
Improvements:
* improved the layout of the profile page;
* the React.js page now only lists the feeds with unread articles by
diff --git a/migrations/versions/2c5cc05216fa_adding_tag_handling_capacities.py b/migrations/versions/2c5cc05216fa_adding_tag_handling_capacities.py
new file mode 100644
index 00000000..f9559fe3
--- /dev/null
+++ b/migrations/versions/2c5cc05216fa_adding_tag_handling_capacities.py
@@ -0,0 +1,30 @@
+"""adding tag handling capacities
+
+Revision ID: 2c5cc05216fa
+Revises: be2b8b6f33dd
+Create Date: 2016-11-08 07:41:13.923531
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '2c5cc05216fa'
+down_revision = 'be2b8b6f33dd'
+branch_labels = None
+depends_on = None
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+ op.create_table('tag',
+ sa.Column('text', sa.String(), nullable=False),
+ sa.Column('article_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['article_id'], ['article.id'],
+ ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('text', 'article_id')
+ )
+
+
+def downgrade():
+ op.drop_table('tag')
diff --git a/src/conf.py b/src/conf.py
index 9718f07c..807c97e5 100644
--- a/src/conf.py
+++ b/src/conf.py
@@ -35,7 +35,6 @@ DEFAULTS = {"platform_url": "https://www.newspipe.org/",
"default_max_error": "3",
"log_path": "newspipe.log",
"log_level": "info",
- "user_agent": "Newspipe (https://github.com/newspipe)",
"secret_key": "",
"security_password_salt": "",
"enabled": "false",
@@ -44,7 +43,10 @@ DEFAULTS = {"platform_url": "https://www.newspipe.org/",
"ssl": "true",
"host": "0.0.0.0",
"port": "5000",
- "crawling_method": "classic"
+ "crawling_method": "classic",
+ "crawler_user_agent": "Newspipe (https://github.com/newspipe)",
+ "crawler_timeout": "30",
+ "crawler_resolv": "false"
}
if not ON_HEROKU:
@@ -88,16 +90,14 @@ LOG_LEVEL = {'debug': logging.DEBUG,
SQLALCHEMY_DATABASE_URI = config.get('database', 'database_url')
+CRAWLING_METHOD = config.get('crawler', 'crawling_method')
API_LOGIN = config.get('crawler', 'api_login')
API_PASSWD = config.get('crawler', 'api_passwd')
-USER_AGENT = config.get('crawler', 'user_agent')
-DEFAULT_MAX_ERROR = config.getint('crawler',
- 'default_max_error')
+CRAWLER_USER_AGENT = config.get('crawler', 'user_agent')
+DEFAULT_MAX_ERROR = config.getint('crawler', 'default_max_error')
ERROR_THRESHOLD = int(DEFAULT_MAX_ERROR / 2)
-
-CRAWLING_METHOD = config.get('crawler', 'crawling_method')
-
-
+CRAWLER_TIMEOUT = config.get('crawler', 'timeout')
+CRAWLER_RESOLV = config.getboolean('crawler', 'resolv')
WEBSERVER_HOST = config.get('webserver', 'host')
WEBSERVER_PORT = config.getint('webserver', 'port')
diff --git a/src/conf/conf.cfg-sample b/src/conf/conf.cfg-sample
index 8c8f04bf..c3cce42d 100644
--- a/src/conf/conf.cfg-sample
+++ b/src/conf/conf.cfg-sample
@@ -20,6 +20,8 @@ default_max_error = 6
user_agent = Newspipe (https://github.com/Newspipe/Newspipe)
api_login =
api_passwd =
+timeout = 30
+resolv = true
[notification]
notification_email = Newspipe@no-reply.com
host = smtp.googlemail.com
diff --git a/src/crawler/classic_crawler.py b/src/crawler/classic_crawler.py
index dac34e8c..7d29d462 100644
--- a/src/crawler/classic_crawler.py
+++ b/src/crawler/classic_crawler.py
@@ -30,7 +30,7 @@ import asyncio
import logging
import feedparser
import dateutil.parser
-from datetime import datetime
+from datetime import datetime, timezone
from sqlalchemy import or_
import conf
@@ -111,7 +111,6 @@ async def parse_feed(user, feed):
async def insert_database(user, feed):
-
articles = await parse_feed(user, feed)
if None is articles:
return []
@@ -121,48 +120,41 @@ async def insert_database(user, feed):
new_articles = []
art_contr = ArticleController(user.id)
for article in articles:
+ new_article = await construct_article(article, feed)
+
try:
existing_article_req = art_contr.read(feed_id=feed.id,
- **extract_id(article))
+ entry_id=extract_id(article))
except Exception as e:
logger.exception("existing_article_req: " + str(e))
continue
-
exist = existing_article_req.count() != 0
if exist:
# if the article has been already retrieved, we only update
# the content or the title
- logger.debug('Article already in the database: '. \
- format(article['title']))
+ logger.info('Article already in the database: {}'. \
+ format(article['link']))
existing_article = existing_article_req.first()
- new_updated_date = None
- try:
- new_updated_date = dateutil.parser.parse(article['updated'])
- except Exception as e:
- new_updated_date = existing_article.date
- logger.exception('new_updated_date failed: {}'.format(e))
-
- if None is existing_article.updated_date:
- existing_article.updated_date = new_updated_date.replace(tzinfo=None)
- if existing_article.updated_date.strftime('%Y-%m-%dT%H:%M:%S') != \
- new_updated_date.strftime('%Y-%m-%dT%H:%M:%S'):
- logger.info('article updated')
- existing_article.updated_date = \
- new_updated_date.replace(tzinfo=None)
- if existing_article.title != article['title']:
- existing_article.title = article['title']
+
+ if new_article['date'].replace(tzinfo=None) != \
+ existing_article.date:
+ existing_article.date = new_article['date']
+ existing_article.updated_date = new_article['date']
+ if existing_article.title != new_article['title']:
+ existing_article.title = new_article['title']
content = get_article_content(article)
if existing_article.content != content:
existing_article.content = content
existing_article.readed = False
art_contr.update({'entry_id': existing_article.entry_id},
existing_article.dump())
+ logger.info('Article updated: {}'.format(article['link']))
continue
+
# insertion of the new article
- article = construct_article(article, feed)
try:
- new_articles.append(art_contr.create(**article))
- logger.info('New article added: {}'.format(article['link']))
+ new_articles.append(art_contr.create(**new_article))
+ logger.info('New article added: {}'.format(new_article['link']))
except Exception:
logger.exception('Error when inserting article in database:')
continue
diff --git a/src/web/controllers/abstract.py b/src/web/controllers/abstract.py
index 58532660..3c91e08a 100644
--- a/src/web/controllers/abstract.py
+++ b/src/web/controllers/abstract.py
@@ -91,11 +91,8 @@ class AbstractController:
obj = self._db_cls(**attrs)
db.session.add(obj)
- try:
- db.session.commit()
- except Exception as e:
- db.session.rollback()
- logger.exception(str(e))
+ db.session.flush()
+ db.session.commit()
return obj
def read(self, **filters):
diff --git a/src/web/controllers/article.py b/src/web/controllers/article.py
index 02c8fc75..4607b225 100644
--- a/src/web/controllers/article.py
+++ b/src/web/controllers/article.py
@@ -6,6 +6,7 @@ from collections import Counter
from bootstrap import db
from .abstract import AbstractController
+from web.lib.article_utils import process_filters
from web.controllers import CategoryController, FeedController
from web.models import Article
@@ -43,29 +44,11 @@ class ArticleController(AbstractController):
"no right on feed %r" % feed.id
attrs['user_id'], attrs['category_id'] = feed.user_id, feed.category_id
- # handling feed's filters
- for filter_ in feed.filters or []:
- match = False
- if filter_.get('type') == 'regex':
- match = re.match(filter_['pattern'], attrs.get('title', ''))
- elif filter_.get('type') == 'simple match':
- match = filter_['pattern'] in attrs.get('title', '')
- take_action = match and filter_.get('action on') == 'match' \
- or not match and filter_.get('action on') == 'no match'
-
- if not take_action:
- continue
-
- if filter_.get('action') == 'mark as read':
- attrs['readed'] = True
- logger.warn("article %s will be created as read",
- attrs['link'])
- elif filter_.get('action') == 'mark as favorite':
- attrs['like'] = True
- logger.warn("article %s will be created as liked",
- attrs['link'])
-
- return super().create(**attrs)
+ skipped, read, liked = process_filters(feed.filters, attrs)
+ if skipped:
+ return None
+ article = super().create(**attrs)
+ return article
def update(self, filters, attrs):
user_id = attrs.get('user_id', self.user_id)
diff --git a/src/web/lib/article_utils.py b/src/web/lib/article_utils.py
index f79c1b30..81a0b145 100644
--- a/src/web/lib/article_utils.py
+++ b/src/web/lib/article_utils.py
@@ -1,74 +1,60 @@
+import html
import logging
-import dateutil.parser
+import re
from datetime import datetime, timezone
+from enum import Enum
+from urllib.parse import SplitResult, urlsplit, urlunsplit
+
+import dateutil.parser
+from bs4 import BeautifulSoup, SoupStrainer
+from requests.exceptions import MissingSchema
import conf
-from web.lib.utils import to_hash
+from web.lib.utils import jarr_get
logger = logging.getLogger(__name__)
+PROCESSED_DATE_KEYS = {'published', 'created', 'updated'}
-def extract_id(entry, keys=[('link', 'link'), ('published', 'date'),
- ('updated', 'date')], force_id=False):
- """For a given entry will return a dict that allows to identify it. The
- dict will be constructed on the uid of the entry. if that identifier is
- absent, the dict will be constructed upon the values of "keys".
- """
- entry_id = entry.get('entry_id') or entry.get('id')
- if entry_id:
- return {'entry_id': entry_id}
- if not entry_id and force_id:
- return to_hash("".join(entry[entry_key] for _, entry_key in keys
- if entry_key in entry).encode('utf8'))
- else:
- ids = {}
- for entry_key, pyagg_key in keys:
- if entry_key in entry and pyagg_key not in ids:
- ids[pyagg_key] = entry[entry_key]
- if 'date' in pyagg_key:
- try:
- ids[pyagg_key] = dateutil.parser.parse(ids[pyagg_key])\
- .isoformat()
- except ValueError as e:
- logger.exception("extract_id: " + str(e))
- ids[pyagg_key] = datetime.now().isoformat()
- return ids
-
-
-def construct_article(entry, feed):
- if hasattr(feed, 'dump'): # this way can be a sqlalchemy obj or a dict
- feed = feed.dump()
- "Safe method to transorm a feedparser entry into an article"
- now = datetime.now()
- date = None
- for date_key in ('published', 'created', 'date'):
- if entry.get(date_key):
- try:
- date = dateutil.parser.parse(entry[date_key])\
- .astimezone(timezone.utc)
- except Exception as e:
- logger.exception(str(e))
- else:
- break
+def extract_id(entry):
+ """ extract a value from an entry that will identify it among the other of
+ that feed"""
+ return entry.get('entry_id') or entry.get('id') or entry['link']
- updated_date = None
- try:
- updated_date = dateutil.parser.parse(entry['updated'])
- except Exception:
- pass
- content = get_article_content(entry)
- article_link = entry.get('link')
- return {'feed_id': feed['id'],
- 'user_id': feed['user_id'],
- 'entry_id': extract_id(entry).get('entry_id', None),
- 'link': entry.get('link', feed['site_link']),
- 'title': entry.get('title', 'No title'),
- 'readed': False, 'like': False,
- 'content': content,
- 'retrieved_date': now,
- 'date': date or now,
- 'updated_date': updated_date or date or now}
+async def construct_article(entry, feed, fields=None, fetch=True):
+ "Safe method to transorm a feedparser entry into an article"
+ now = datetime.utcnow()
+ article = {}
+ def push_in_article(key, value):
+ if not fields or key in fields:
+ article[key] = value
+ push_in_article('feed_id', feed.id)
+ push_in_article('user_id', feed.user_id)
+ push_in_article('entry_id', extract_id(entry))
+ push_in_article('retrieved_date', now)
+ if not fields or 'date' in fields:
+ for date_key in PROCESSED_DATE_KEYS:
+ if entry.get(date_key):
+ try:
+ article['date'] = dateutil.parser.parse(entry[date_key])\
+ .astimezone(timezone.utc)
+ except Exception as e:
+ logger.exception(e)
+ else:
+ break
+ push_in_article('content', get_article_content(entry))
+ if fields is None or {'link', 'title'}.intersection(fields):
+ link, title = await get_article_details(entry, fetch)
+ push_in_article('link', link)
+ push_in_article('title', title)
+ if 'content' in article:
+ #push_in_article('content', clean_urls(article['content'], link))
+ push_in_article('content', article['content'])
+ push_in_article('tags', {tag.get('term').strip()
+ for tag in entry.get('tags', [])})
+ return article
+
def get_article_content(entry):
content = ''
@@ -77,3 +63,123 @@ def get_article_content(entry):
elif entry.get('summary'):
content = entry['summary']
return content
+
+
+async def get_article_details(entry, fetch=True):
+ article_link = entry.get('link')
+ article_title = html.unescape(entry.get('title', ''))
+ if fetch and conf.CRAWLER_RESOLV and article_link or not article_title:
+ try:
+ # resolves URL behind proxies (like feedproxy.google.com)
+ response = await jarr_get(article_link, timeout=5)
+ except MissingSchema:
+ split, failed = urlsplit(article_link), False
+ for scheme in 'https', 'http':
+ new_link = urlunsplit(SplitResult(scheme, *split[1:]))
+ try:
+ response = await jarr_get(new_link, timeout=5)
+ except Exception as error:
+ failed = True
+ continue
+ failed = False
+ article_link = new_link
+ break
+ if failed:
+ return article_link, article_title or 'No title'
+ except Exception as error:
+ logger.info("Unable to get the real URL of %s. Won't fix "
+ "link or title. Error: %s", article_link, error)
+ return article_link, article_title or 'No title'
+ article_link = response.url
+ if not article_title:
+ bs_parsed = BeautifulSoup(response.content, 'html.parser',
+ parse_only=SoupStrainer('head'))
+ try:
+ article_title = bs_parsed.find_all('title')[0].text
+ except IndexError: # no title
+ pass
+ return article_link, article_title or 'No title'
+
+
+class FiltersAction(Enum):
+ READ = 'mark as read'
+ LIKED = 'mark as favorite'
+ SKIP = 'skipped'
+
+
+class FiltersType(Enum):
+ REGEX = 'regex'
+ MATCH = 'simple match'
+ EXACT_MATCH = 'exact match'
+ TAG_MATCH = 'tag match'
+ TAG_CONTAINS = 'tag contains'
+
+
+class FiltersTrigger(Enum):
+ MATCH = 'match'
+ NO_MATCH = 'no match'
+
+
+def process_filters(filters, article, only_actions=None):
+ skipped, read, liked = False, None, False
+ filters = filters or []
+ if only_actions is None:
+ only_actions = set(FiltersAction)
+ for filter_ in filters:
+ match = False
+ try:
+ pattern = filter_.get('pattern', '')
+ filter_type = FiltersType(filter_.get('type'))
+ filter_action = FiltersAction(filter_.get('action'))
+ filter_trigger = FiltersTrigger(filter_.get('action on'))
+ if filter_type is not FiltersType.REGEX:
+ pattern = pattern.lower()
+ except ValueError:
+ continue
+ if filter_action not in only_actions:
+ logger.debug('ignoring filter %r' % filter_)
+ continue
+ if filter_action in {FiltersType.REGEX, FiltersType.MATCH,
+ FiltersType.EXACT_MATCH} and 'title' not in article:
+ continue
+ if filter_action in {FiltersType.TAG_MATCH, FiltersType.TAG_CONTAINS} \
+ and 'tags' not in article:
+ continue
+ title = article.get('title', '').lower()
+ tags = [tag.lower() for tag in article.get('tags', [])]
+ if filter_type is FiltersType.REGEX:
+ match = re.match(pattern, title)
+ elif filter_type is FiltersType.MATCH:
+ match = pattern in title
+ elif filter_type is FiltersType.EXACT_MATCH:
+ match = pattern == title
+ elif filter_type is FiltersType.TAG_MATCH:
+ match = pattern in tags
+ elif filter_type is FiltersType.TAG_CONTAINS:
+ match = any(pattern in tag for tag in tags)
+ take_action = match and filter_trigger is FiltersTrigger.MATCH \
+ or not match and filter_trigger is FiltersTrigger.NO_MATCH
+
+ if not take_action:
+ continue
+
+ if filter_action is FiltersAction.READ:
+ read = True
+ elif filter_action is FiltersAction.LIKED:
+ liked = True
+ elif filter_action is FiltersAction.SKIP:
+ skipped = True
+
+ if skipped or read or liked:
+ logger.info("%r applied on %r", filter_action.value,
+ article.get('link') or article.get('title'))
+ return skipped, read, liked
+
+
+def get_skip_and_ids(entry, feed):
+ entry_ids = construct_article(entry, feed,
+ {'entry_id', 'feed_id', 'user_id'}, fetch=False)
+ skipped, _, _ = process_filters(feed.filters,
+ construct_article(entry, feed, {'title', 'tags'}, fetch=False),
+ {FiltersAction.SKIP})
+ return skipped, entry_ids
diff --git a/src/web/lib/feed_utils.py b/src/web/lib/feed_utils.py
index 9925613f..94ae6e53 100644
--- a/src/web/lib/feed_utils.py
+++ b/src/web/lib/feed_utils.py
@@ -3,7 +3,7 @@ import urllib
import logging
import requests
import feedparser
-from conf import USER_AGENT
+from conf import CRAWLER_USER_AGENT
from bs4 import BeautifulSoup, SoupStrainer
from web.lib.utils import try_keys, try_get_icon_url, rebuild_url
@@ -32,7 +32,8 @@ def escape_keys(*keys):
@escape_keys('title', 'description')
def construct_feed_from(url=None, fp_parsed=None, feed=None, query_site=True):
- requests_kwargs = {'headers': {'User-Agent': USER_AGENT}, 'verify': False}
+ requests_kwargs = {'headers': {'User-Agent': CRAWLER_USER_AGENT},
+ 'verify': False}
if url is None and fp_parsed is not None:
url = fp_parsed.get('url')
if url is not None and fp_parsed is None:
diff --git a/src/web/lib/misc_utils.py b/src/web/lib/misc_utils.py
index 22d52b70..1359c798 100755
--- a/src/web/lib/misc_utils.py
+++ b/src/web/lib/misc_utils.py
@@ -109,6 +109,8 @@ def fetch(id, feed_id=None):
"""
cmd = [sys.executable, conf.BASE_DIR + '/manager.py', 'fetch_asyncio',
'--user_id='+str(id)]
+ if feed_id:
+ cmd.append('--feed_id='+str(feed_id))
return subprocess.Popen(cmd, stdout=subprocess.PIPE)
def history(user_id, year=None, month=None):
diff --git a/src/web/lib/utils.py b/src/web/lib/utils.py
index f2bed3ff..d206b769 100644
--- a/src/web/lib/utils.py
+++ b/src/web/lib/utils.py
@@ -6,6 +6,8 @@ import requests
from hashlib import md5
from flask import request, url_for
+import conf
+
logger = logging.getLogger(__name__)
@@ -46,11 +48,17 @@ def try_get_icon_url(url, *splits):
if split is None:
continue
rb_url = rebuild_url(url, split)
- response = requests.get(rb_url, verify=False, timeout=10)
+ response = None
# if html in content-type, we assume it's a fancy 404 page
- content_type = response.headers.get('content-type', '')
- if response.ok and 'html' not in content_type and response.content:
- return response.url
+ try:
+ response = jarr_get(rb_url)
+ content_type = response.headers.get('content-type', '')
+ except Exception:
+ pass
+ else:
+ if response is not None and response.ok \
+ and 'html' not in content_type and response.content:
+ return response.url
return None
@@ -71,3 +79,11 @@ def clear_string(data):
def redirect_url(default='home'):
return request.args.get('next') or request.referrer or url_for(default)
+
+
+async def jarr_get(url, **kwargs):
+ request_kwargs = {'verify': False, 'allow_redirects': True,
+ 'timeout': conf.CRAWLER_TIMEOUT,
+ 'headers': {'User-Agent': conf.CRAWLER_USER_AGENT}}
+ request_kwargs.update(kwargs)
+ return requests.get(url, **request_kwargs)
diff --git a/src/web/models/__init__.py b/src/web/models/__init__.py
index 53b692e2..1fc0c3e0 100644
--- a/src/web/models/__init__.py
+++ b/src/web/models/__init__.py
@@ -32,8 +32,9 @@ from .user import User
from .article import Article
from .icon import Icon
from .category import Category
+from .tag import Tag
-__all__ = ['Feed', 'Role', 'User', 'Article', 'Icon', 'Category']
+__all__ = ['Feed', 'Role', 'User', 'Article', 'Icon', 'Category', 'Tag']
import os
diff --git a/src/web/models/article.py b/src/web/models/article.py
index 23708f6b..5261cb0d 100644
--- a/src/web/models/article.py
+++ b/src/web/models/article.py
@@ -29,6 +29,8 @@ __license__ = "GPLv3"
from bootstrap import db
from datetime import datetime
from sqlalchemy import asc, desc, Index
+from sqlalchemy.ext.associationproxy import association_proxy
+
from web.models.right_mixin import RightMixin
@@ -49,6 +51,13 @@ class Article(db.Model, RightMixin):
feed_id = db.Column(db.Integer(), db.ForeignKey('feed.id'))
category_id = db.Column(db.Integer(), db.ForeignKey('category.id'))
+ # relationships
+ tag_objs = db.relationship('Tag', back_populates='article',
+ cascade='all,delete-orphan',
+ lazy=False,
+ foreign_keys='[Tag.article_id]')
+ tags = association_proxy('tag_objs', 'text')
+
# index
idx_article_uid = Index('user_id')
idx_article_uid_cid = Index('user_id', 'category_id')
diff --git a/src/web/models/tag.py b/src/web/models/tag.py
new file mode 100644
index 00000000..8d7fe4d4
--- /dev/null
+++ b/src/web/models/tag.py
@@ -0,0 +1,22 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from sqlalchemy import Column, ForeignKey, Integer, String
+from sqlalchemy.orm import relationship
+
+from bootstrap import db
+
+
+class Tag(db.Model):
+ text = Column(String, primary_key=True, unique=False)
+
+ # foreign keys
+ article_id = Column(Integer, ForeignKey('article.id', ondelete='CASCADE'),
+ primary_key=True)
+
+ # relationships
+ article = relationship('Article', back_populates='tag_objs',
+ foreign_keys=[article_id])
+
+ def __init__(self, text):
+ self.text = text
diff --git a/src/web/models/user.py b/src/web/models/user.py
index 37bc78fd..460958e0 100644
--- a/src/web/models/user.py
+++ b/src/web/models/user.py
@@ -64,7 +64,7 @@ class User(db.Model, UserMixin, RightMixin):
is_admin = db.Column(db.Boolean(), default=False)
is_api = db.Column(db.Boolean(), default=False)
- # relationship
+ # relationships
categories = db.relationship('Category', backref='user',
cascade='all, delete-orphan',
foreign_keys=[Category.user_id])
bgstack15