1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
|
#!/usr/bin/env python3
# File: libraries/polaris.py
# Author: bgstack15
# Startdate: 2024-07-06-7 19:11
# SPDX-License-Identifier: GPL-3.0-only
# Title: Library Plugin for Polaris
# Purpose: plugin for polaris-based library websites
# History:
# Usage:
# Reference:
# Improve:
# Dependencies:
# dep-devuan: python3-bs4
from .base import *
import requests, dateutil, base64, os, urllib, sys
# for python3.6 on AlmaLinux 8:
try:
import dateutil.parser
except:
pass
from bs4 import BeautifulSoup
class Library(BaseLibrary):
def __init__(self, config_obj = None, alias = None, username = None, password = None, baseurl = None, session = None):
if config_obj and "username" in config_obj:
self.username = config_obj["username"]
else:
self.username = username
if config_obj and "password" in config_obj:
self.password = config_obj["password"]
else:
self.password = password
if config_obj and "baseurl" in config_obj:
self.baseurl = config_obj["baseurl"]
else:
self.baseurl = baseurl if baseurl else "https://catalog.example.org/polaris"
#/logon.aspx?src=http%3a%2f%2fcatalog.example.org%2fpolaris%2fpatronaccount%2fdefault.aspx%3fctx%3d1.1033.0.0.3&ctx=1.1033.0.0.3"
self.baseurl = self.baseurl.rstrip("/")
if session and type(session) == requests.sessions.Session:
self.session = session
else:
self.session = requests.Session()
if config_obj and "alias" in config_obj:
self.alias = config_obj["alias"]
else:
self.alias = alias if alias else "Polaris-based library"
# asp/M$ components
self.language_code = "1.1033.0.0.3"
self.baseurl_http = self.baseurl.replace("https://","http://")
self.src = f"{self.baseurl_http}/patronaccount/default.aspx?ctx={self.language_code}&ctx={self.language_code}"
self.src2 = urllib.parse.unquote_plus(self.src)
self.card_expires = None
# log in now. Why would we not?
self.login()
def get_reservations(self, verbose = False):
""" STUB """
# this one lumps them all together in a list. Use the status field.
reservations = []
b = self.baseurl
s = self.session
output = s.get(f"{b}/PatronAccount/requests.aspx",headers={"Referer":f"{b}/PatronAccount/itemsout.aspx"}).content
soup = BeautifulSoup(output, "html.parser")
all_reservations = soup.find_all("tr",class_=["patron-account__grid-row","patron-account__grid-alternating-row"])
for item in all_reservations:
images_hrefs = [i["src"] for i in item.find_all("img",attrs={"aria-label":"Cover Image"})]
titles = [i.text for i in item.find_all("a",class_="patron-account__grid-link")]
labels = [i.text for i in item.find_all("label",class_="label-xs")]
#print(f"Labels = {labels}",file=sys.stderr)
values = [i.next_sibling.next_sibling for i in item.find_all("label",class_="label-xs")]
values2 = []
for i in values:
try:
values2.append(i.text)
except:
# if status = "Transferred for hold" then the position value will be blank.
values2.append("")
values_dict = dict(map(lambda i,j:(i,j),labels,values2))
dates = [i.text.replace("(as of ","").replace(")","").replace("(","") for i in item.find_all("span",class_="patron-account__holds-date")]
formats = [i["title"] for i in item.find_all("img") if "aria-label" not in i.attrs]
img_b64, img_type = self.get_image(images_hrefs[0])
# it is a happy accident that "date placed" for a ready reservation will indicate "until 7/17/2024". No work is required to capture this separately.
obj = {
"patron": self.alias,
"position": values_dict["Hold Request Position"],
"status": values_dict["Status"],
"date_placed": dates[0],
"format": formats[0],
"location": values_dict["Pickup Library"],
"title": titles[0],
"img_href": images_hrefs[0],
"img50": img_b64[:50],
"img": img_b64,
"img_type": img_type,
}
reservations.append(obj)
return reservations
def get_checkouts(self, verbose=False):
checked_out_objects = []
b = self.baseurl
s = self.session
# step 1: visit the checked out webpage
# curl 'https://catalog.example.org/polaris/patronaccount/itemsout.aspx' -H 'Referer: https://catalog.example.org/polaris/patronaccount/default.aspx?ctx=1.1033.0.0.3' -H 'Cookie: ASP.NET_SessionId=f4gn4iqzma4ftv3i3x3qo4k3; OrgID=1'
headers = {
"Referer": self.src2,
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
"Priority": "u=1",
}
output = s.get(f"{b}/patronaccount/itemsout.aspx", headers=headers).content.decode()
soup = BeautifulSoup(output, "html.parser")
all_checkouts = soup.find_all("tr",class_=["patron-account__grid-alternating-row","patron-account__grid-row"])
for item in all_checkouts:
images_hrefs = [i["src"] for i in item.find_all("img",attrs={"aria-label":"Cover Image"})]
titles = [i.text for i in item.find_all("span",id="labelTitle")]
renewals_lefts = [i.text for i in item.find_all("span",id="labelRenewalsLeft")]
due_dates = [i.text for i in item.find_all("span",id="labelDueDate")]
formats = item.find_all("img")
formats2 = []
for i in formats:
try:
i["title"]
formats2.append(i["alt"])
except:
pass
#formats = formats2
# for each item we must run the info link:
# curl 'https://catalog.example.org/polaris/patronaccount/components/ajaxiteminfo.aspx?RecID=1296014&VendorObjID=&VendorID=0' -H 'Referer: https://catalog.example.org/polaris/patronaccount/itemsout.aspx' -H 'Cookie: ASP.NET_SessionId=b0ssctcysdrbcfy3jlwwdvr0; OrgID=1'
info_links = [i["href"].replace("javascript:showModalBasic('","").replace("')","") for i in item.find_all("a",title="Item Details")]
# obj MUST HAVE patron, title, format, barcode, due, img
# obj SHOULD HAVE img50, img_href
if verbose:
print(f"DEBUG: got images_hrefs {images_hrefs}",file=sys.stderr)
print(f"DEBUG: got titles {titles}",file=sys.stderr)
print(f"DEBUG: got renewals_lefts {renewals_lefts}",file=sys.stderr)
print(f"DEBUG: got due_dates {due_dates}",file=sys.stderr)
print(f"DEBUG: got formats {formats}",file=sys.stderr)
print(f"DEBUG: got formats2 {formats2}",file=sys.stderr)
x = -1
for i in titles:
x += 1
img_b64, img_type = self.get_image(images_hrefs[x])
details_response = s.get(info_links[x]).content.decode().replace(" ","")
soup2 = BeautifulSoup(details_response,"html.parser")
#details_labels = [i.text for i in soup2.find_all("td",class_="nsm-label") if i.text]
#details_texts = [i.text for i in soup2.find_all("td",class_="") if i.text]
#details_texts = [i.text for i in soup2.find_all("td") if ("class" in i and "nsm-label" not in i["class"]) or ("class" not in i)]
details = [i.text for i in soup2.find_all("td")]
details_labels = details[::3]
details_texts = details[2::3]
details_dict = dict(zip(details_labels,details_texts))
if verbose:
print(f"DEBUG: labels {details_labels}",file=sys.stderr)
print(f"DEBUG: texts {details_texts}",file=sys.stderr)
print(f"DEBUG: details_dict {details_dict}",file=sys.stderr)
obj = {
"patron": self.alias,
"title": i,
"format": formats2[x],
"barcode": details_dict["Item Barcode"],
"due": dateutil.parser.parse(due_dates[x]),
"img_href": images_hrefs[x],
"renewals_left": renewals_lefts[x],
"img_type": img_type,
"img50": img_b64[:50],
"img": img_b64,
"checkout_date": dateutil.parser.parse(details_dict["Date of Checkout"]),
"renewals_left": details_dict["Renewals Permitted"],
}
checked_out_objects.append(obj)
return checked_out_objects
# Boilerplate
def get_class_name(self):
return os.path.basename(__file__).replace(".py","")
def login(self):
b = self.baseurl
s = self.session
# step 1: visit logon page
response = s.get(f"{b}/logon.aspx?src={self.src2}").content
# probably will need to get the __VIEWSTATE, VIEWSTATEGENERATOR, and Eventvalidation
soup = BeautifulSoup(response, "html.parser")
form = soup.find("form", id = "formMain")
viewstate = form.find("input", id = "__VIEWSTATE")["value"]
viewstategenerator = form.find("input", id = "__VIEWSTATEGENERATOR")["value"]
eventvalidation = form.find("input", id = "__EVENTVALIDATION")["value"]
# step 2: submit logon
# curl 'https://catalog.example.org/polaris/logon.aspx?src=http%3a%2f%2fcatalog.example.org%2fpolaris%2fpatronaccount%2fdefault.aspx%3fctx%3d1.1033.0.0.3&ctx=1.1033.0.0.3' -X POST -H 'Content-Type: application/x-www-form-urlencoded' -H 'Referer: https://catalog.example.org/polaris/logon.aspx?src=http%3a%2f%2fcatalog.example.org%2fpolaris%2fpatronaccount%2fdefault.aspx%3fctx%3d1.1033.0.0.3&ctx=1.1033.0.0.3' -H 'Cookie: ASP.NET_SessionId=vxcsdp1cj0hx4pw5xzyvjzmv; OrgID=1' --data-raw '__VIEWSTATE=%2FwEPDwUTRUNCATED_TRUNCATEDAFFNraXAgdG8gbWFpbiBjb250ZW50EVNraXAgdG8gbWFpbiBtZW51ZGR1k%2BAxg5Y9OX3bD7t2P%2FT5kMtk3%2F5W7qyJnA%2B8VzrtGg%3D%3D&__VIEWSTATEGENERATOR=ADF38500&__EVENTVALIDATION=%2FwEdAAR9XqzqHC%2FBfgLuDOYb7iPsH5Q20m6JqSZMIYtkRxbfSedJDH80kUkzbS%2FLyzKWHn1t7yJCbczH%2Bwz7aZL%2F8kJqv109lw0hamEH0qk8Qgc0RXgof%2BWcR4FnrI1R3xMIkD4%3D&ctl00%24BodyMainContent%24textboxBarcodeUsername=userNameHere&ctl00%24BodyMainContent%24textboxPassword=12Pass34&ctl00%24BodyMainContent%24buttonSubmit=Log+In'
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Referer": f"{b}/logon.aspx?src={self.src2}",
#"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:127.0) Gecko/20100101 Firefox/127.0",
}
data = {
"__VIEWSTATE": viewstate,
"__VIEWSTATEGENERATOR": viewstategenerator,
"__EVENTVALIDATION": eventvalidation,
"ctl00$BodyMainContent$textboxBarcodeUsername": self.username,
"ctl00$BodyMainContent$textboxPassword": self.password,
"ctl00$BodyMainContent$buttonSubmit": "Log In",
}
# this always returns a 200, even if the user login failed
url = f"{b}/logon.aspx?src={self.src2}"
response = s.post(url, headers=headers, data=data).content.decode()
# success is determined by absence of "invalid Library Card" or "Please try again"
for msg in ["invalid Library Card", "Please enter your Library", "Please try again"]:
if msg in response:
raise Exception(f"Failed to log in to {self.alias}")
# step 3: learn card expiration date
# curl 'https://catalog.example.org/polaris/PatronAccount/default.aspx' -H 'Referer: https://catalog.example.org/polaris/PatronAccount/requests.aspx' -H 'Cookie: ASP.NET_SessionId=umyzdtvpkv5mo45axo3fny20; OrgID=1'
headers = {
"Referer": f"{b}/PatronAccount/requests.aspx"
}
output = s.get(f"{b}/PatronAccount/default.aspx",headers=headers).content
soup = BeautifulSoup(output, "html.parser")
alldivs = soup.find_all("div",class_="row")
labels = []
values = []
for i in alldivs:
j = i.find_all("span")
try:
# replace colon-nbsp from labels with empty
labels.append(j[0].text.replace(":\xa0",""))
except:
labels.append("empty")
try:
values.append(j[1].text)
except:
values.append("empty")
values_dict = dict(map(lambda i,j:(i,j),labels,values))
if "Expiration date" not in values_dict:
print(f"Warning! Cannot determine expiration date for {self.alias}. Continuing.",file=sys.stderr)
else:
self.card_expires = dateutil.parser.parse(values_dict["Expiration date"])
|