-import urllib.request, urllib.parse, http.cookiejar, re, bs4, os
+import urllib.request, urllib.parse, http.cookiejar, re, bs4, os, time
from . import profile, lib, htcache
soup = bs4.BeautifulSoup
soupify = lambda cont: soup(cont, "html.parser")
super().__init__(message)
self.page = page
+def iterlast(itr, default=None):
+ if default is not None:
+ ret = default
+ try:
+ while True:
+ ret = next(itr)
+ except StopIteration:
+ return ret
+
+def find1(el, *args, **kwargs):
+ ret = el.find(*args, **kwargs)
+ if ret is None:
+ raise pageerror("could not find expected element", iterlast(el.parents, el))
+ return ret
+
def byclass(el, name, cl):
for ch in el.findAll(name):
if not isinstance(ch, bs4.Tag): continue
if isinstance(el, bs4.Tag):
return el
+def fetchreader(lib, readerid, page):
+ pg = soupify(lib.sess.fetch(lib.base + "areader?" + urllib.parse.urlencode({"id": readerid,
+ "p": str(page),
+ "supress_webtoon": "t"}),
+ headers={"Referer": "http://bato.to/reader"}))
+ return pg
+
class page(lib.page):
- def __init__(self, chapter, stack, n, url):
+ def __init__(self, chapter, stack, readerid, n):
self.stack = stack
+ self.lib = chapter.lib
self.chapter = chapter
self.n = n
self.id = str(n)
self.name = "Page %s" % n
- self.url = url
+ self.readerid = readerid
self.ciurl = None
def iurl(self):
if self.ciurl is None:
- page = soupify(htcache.fetch(self.url))
- img = nextel(page.find("div", id="full_image")).img
+ page = fetchreader(self.lib, self.readerid, self.n)
+ img = find1(page, "img", id="comic_page")
self.ciurl = img["src"]
return self.ciurl
return self.name
def __repr(self):
- return "<batoto.page %r.%r.%r>" % (self.chapter.manga.name, self.chapter.name, self.name)
+ return "<batoto.page %r.%r.%r.%r>" % (self.chapter.manga.name, self.chapter.group.name, self.chapter.name, self.name)
class chapter(lib.pagelist):
- def __init__(self, manga, stack, id, name, url):
+ def __init__(self, group, stack, id, name, readerid):
self.stack = stack
- self.manga = manga
+ self.group = group
+ self.manga = group.manga
+ self.lib = self.manga.lib
self.id = id
self.name = name
- self.url = url
+ self.readerid = readerid
self.cpag = None
def __getitem__(self, i):
pnre = re.compile(r"page (\d+)")
def pages(self):
if self.cpag is None:
- pg = soupify(htcache.fetch(self.url))
+ pg = fetchreader(self.lib, self.readerid, 1)
cpag = []
- for opt in pg.find("select", id="page_select").findAll("option"):
- url = opt["value"]
+ for opt in find1(pg, "select", id="page_select").findAll("option"):
n = int(self.pnre.match(opt.string).group(1))
- cpag.append(page(self, self.stack + [(self, len(cpag))], n, url))
+ cpag.append(page(self, self.stack + [(self, len(cpag))], self.readerid, n))
self.cpag = cpag
return self.cpag
return self.name
def __repr__(self):
- return "<batoto.chapter %r.%r>" % (self.manga.name, self.name)
+ return "<batoto.chapter %r.%r.%r>" % (self.manga.name, self.group.name, self.name)
+
+class group(lib.pagelist):
+ def __init__(self, manga, stack, id, name):
+ self.stack = stack
+ self.manga = manga
+ self.id = id
+ self.name = name
+ self.ch = []
+
+ def __getitem__(self, i):
+ return self.ch[i]
+
+ def __len__(self):
+ return len(self.ch)
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return "<batoto.group %r.%r" % (self.manga.name, self.name)
class manga(lib.manga):
def __init__(self, lib, id, name, url):
return False
return True
- cure = re.compile(r"/read/_/(\d+)/[^/]*")
+ cure = re.compile(r"/reader#([a-z0-9]+)")
def ch(self):
if self.cch is None:
page = self.sess.lfetch(self.url, self.vfylogin)
url = ch.td.a["href"]
m = self.cure.search(url)
if m is None: raise pageerror("Got weird chapter URL: %r" % url, page)
- cid = m.group(1)
- url = self.lib.base + "read/_/" + cid
+ readerid = m.group(1)
name = ch.td.a.text
- cch.append((cid, name, url))
+ gname = nextel(nextel(ch.td)).text.strip()
+ cch.append((readerid, name, gname))
cch.reverse()
- rch = []
- for n, (cid, name, url) in enumerate(cch):
- rch.append(chapter(self, [(self, n)], cid, name, url))
- self.cch = rch
+ groups = {}
+ for n, (readerid, name, gname) in enumerate(cch):
+ groups.setdefault(gname, [n, []])[1].append((readerid, name))
+ groups = sorted(groups.items(), key=lambda o: o[1][0])
+ rgrp = []
+ for n, (gname, (_, gch)) in enumerate(groups):
+ ngrp = group(self, [(self, n)], gname, gname)
+ for m, (readerid, name) in enumerate(gch):
+ ngrp.ch.append(chapter(ngrp, ngrp.stack + [(ngrp, m)], readerid, name, readerid))
+ rgrp.append(ngrp)
+ self.cch = rgrp
return self.cch
def altnames(self):
self.creds = credentials
self.jar = http.cookiejar.CookieJar()
self.web = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(self.jar))
- self.loggedin = False
+ self.lastlogin = 0
rlre = re.compile(r"Welcome, (.*) ")
- def dologin(self):
- with self.web.open(self.base) as hs:
- page = soupify(hs.read())
+ def dologin(self, pre=None):
+ now = time.time()
+ if now - self.lastlogin < 60:
+ raise Exception("Too soon since last login attempt")
+ if pre is None:
+ with self.web.open(self.base) as hs:
+ page = soupify(hs.read())
+ else:
+ page = pre
cur = page.find("a", id="user_link")
- print(0)
if cur:
- m = self.rlre.search(cur.get_text())
+ m = self.rlre.search(cur.text)
if not m or m.group(1) != self.creds.username:
- print(1)
outurl = None
nav = page.find("div", id="user_navigation")
if nav:
with self.web.open(self.base) as hs:
page = soupify(hs.read())
else:
- print(2)
return
else:
- print(3)
+ pass
form = page.find("form", id="login")
+ if not form and pre:
+ return self.dologin()
values = {}
for el in form.findAll("input", type="hidden"):
values[el["name"]] = el["value"]
values["ips_username"] = self.creds.username
values["ips_password"] = self.creds.password
+ values["rememberMe"] = "1"
values["anonymous"] = "1"
req = urllib.request.Request(form["action"], urllib.parse.urlencode(values).encode("ascii"))
+ req.add_header("User-Agent", self.useragent)
with self.web.open(req) as hs:
page = soupify(hs.read())
for resp in page.findAll("p", attrs={"class": "message"}):
break
else:
raise pageerror("Could not log in", page)
-
- def login(self):
- if not self.loggedin:
- if self.creds:
- self.dologin()
- self.loggedin = True
+ self.lastlogin = now
def open(self, url):
return self.web.open(url)
- def fetch(self, url):
- with self.open(url) as hs:
+ useragent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.22 (KHTML, like Gecko) Chrome/25.0.1364.160 Safari/537.22"
+ def fetch(self, url, headers=None):
+ req = urllib.request.Request(url)
+ req.add_header("User-Agent", self.useragent)
+ if headers is not None:
+ for k, v in headers.items():
+ req.add_header(k, v)
+ with self.open(req) as hs:
return hs.read()
def lfetch(self, url, ck):
page = soupify(self.fetch(url))
if not ck(page):
- self.login()
+ self.dologin(pre=page)
page = soupify(self.fetch(url))
if not ck(page):
raise pageerror("Could not verify login status despite having logged in", page)
while True:
_pars = dict(pars)
_pars["p"] = str(p)
- resp = urllib.request.urlopen(self.base + "search?" + urllib.parse.urlencode(_pars).encode("ascii"))
+ req = urllib.request.Request(self.base + "search?" + urllib.parse.urlencode(_pars))
+ req.add_header("User-Agent", session.useragent)
+ resp = urllib.request.urlopen(req)
try:
page = soupify(resp.read())
finally: