-import urllib, re
-import BeautifulSoup, json
-import lib, htcache
-soup = BeautifulSoup.BeautifulSoup
-
-class imgstream(lib.imgstream):
- def __init__(self, url):
- self.bk = urllib.urlopen(url)
- ok = False
- try:
- if self.bk.getcode() != 200:
- raise IOError("Server error: " + str(self.bk.getcode()))
- self.ctype = self.bk.info()["Content-Type"]
- self.clen = int(self.bk.info()["Content-Length"])
- ok = True
- finally:
- if not ok:
- self.bk.close()
-
- def fileno(self):
- return self.bk.fileno()
-
- def close(self):
- self.bk.close()
-
- def read(self, sz = None):
- if sz is None:
- return self.bk.read()
- else:
- return self.bk.read(sz)
+import urllib.request, re
+import bs4, json
+from . import lib, htcache
+soup = bs4.BeautifulSoup
+soupify = lambda cont: soup(cont, "html.parser")
class page(lib.page):
def __init__(self, chapter, stack, n, url):
self.manga = self.volume.manga
self.n = n
self.id = str(n)
- self.name = u"Page %s" % n
+ self.name = "Page %s" % n
self.url = url
self.ciurl = None
def iurl(self):
if self.ciurl is None:
- page = soup(htcache.fetch(self.url))
+ page = soupify(htcache.fetch(self.url))
self.ciurl = page.find("div", id="viewer").find("img", id="image")["src"]
return self.ciurl
def open(self):
- return imgstream(self.iurl())
+ return lib.stdimgstream(self.iurl())
def __str__(self):
return self.name
def pages(self):
if self.cpag is None:
- pg = soup(htcache.fetch(self.url + "1.html"))
+ pg = soupify(htcache.fetch(self.url + "1.html"))
l = pg.find("form", id="top_bar").find("div", attrs={"class": "l"})
if len(l.contents) != 3:
raise Exception("parse error: weird page list for %r" % self)
m = l.contents[2].strip()
- if m[:3] != u"of ":
+ if m[:3] != "of ":
raise Exception("parse error: weird page list for %r" % self)
- self.cpag = [page(self, self.stack + [(self, n)], n + 1, self.url + ("%i.html" % (n + 1))) for n in xrange(int(m[3:]))]
+ self.cpag = [page(self, self.stack + [(self, n)], n + 1, self.url + ("%i.html" % (n + 1))) for n in range(int(m[3:]))]
return self.cpag
def __str__(self):
def nextel(el):
while True:
el = el.nextSibling
- if isinstance(el, BeautifulSoup.Tag):
+ if isinstance(el, bs4.Tag):
return el
class manga(lib.manga):
def vols(self):
if self.cvol is None:
- page = soup(htcache.fetch(self.url))
+ page = soupify(htcache.fetch(self.url))
vls = page.find("div", id="chapters").findAll("div", attrs={"class": "slide"})
cvol = []
for i, vn in enumerate(reversed(vls)):
name = vn.find("h3", attrs={"class": "volume"}).contents[0].strip()
- vid = name.encode("utf8")
- vol = volume(self, [(self, i)], vid, name)
+ vol = volume(self, [(self, i)], name, name)
cls = nextel(vn)
- if cls.name != u"ul" or cls["class"] != u"chlist":
+ if cls.name != "ul" or "chlist" not in cls["class"]:
raise Exception("parse error: weird volume list for %r" % self)
for o, ch in enumerate(reversed(cls.findAll("li"))):
n = ch.div.h3 or ch.div.h4
- name = n.a.string
- chid = name.encode("utf8")
+ chid = name = n.a.string
for span in ch("span"):
try:
- if u" title " in (u" " + span["class"] + u" "):
+ if "title" in span["class"]:
name += " " + span.string
except KeyError:
pass
- url = n.a["href"].encode("us-ascii")
+ url = n.a["href"]
if url[-7:] == "/1.html":
url = url[:-6]
elif self.cure.search(url) is not None:
return "<mangafox.manga %r>" % self.name
def libalphacmp(a, b):
- return cmp(a.upper(), b.upper())
+ if a.upper() < b.upper():
+ return -1
+ elif a.upper() > b.upper():
+ return 1
+ return 0
class library(lib.library):
def __init__(self):
self.base = "http://mangafox.me/"
def alphapage(self, pno):
- page = soup(htcache.fetch(self.base + ("directory/%i.htm?az" % pno)))
+ page = soupify(htcache.fetch(self.base + ("directory/%i.htm?az" % pno)))
ls = page.find("div", id="mangalist").find("ul", attrs={"class": "list"}).findAll("li")
ret = []
ubase = self.base + "manga/"
for m in ls:
t = m.find("div", attrs={"class": "manga_text"}).find("a", attrs={"class": "title"})
name = t.string
- url = t["href"].encode("us-ascii")
+ url = t["href"]
if url[:len(ubase)] != ubase or url.find('/', len(ubase)) != (len(url) - 1):
raise Exception("parse error: unexpected manga URL for %r: %s" % (name, url))
ret.append(manga(self, url[len(ubase):-1], name, url))
return ret
def alphapages(self):
- page = soup(htcache.fetch(self.base + "directory/?az"))
+ page = soupify(htcache.fetch(self.base + "directory/?az"))
ls = page.find("div", id="mangalist").find("div", id="nav").find("ul").findAll("li")
return int(ls[-2].find("a").string)
def byname(self, prefix):
- if not isinstance(prefix, unicode):
- prefix = prefix.decode("utf8")
l = 1
r = self.alphapages()
while True:
i = 0
def search(self, expr):
- resp = urllib.urlopen(self.base + ("ajax/search.php?term=%s" % urllib.quote(expr)))
- try:
- rc = json.load(resp)
- finally:
- resp.close()
- return [manga(self, id.encode("utf8"), name, self.base + ("manga/%s/" % id.encode("utf8"))) for num, name, id, genres, author in rc]
+ req = urllib.request.Request(self.base + ("ajax/search.php?term=%s" % urllib.parse.quote(expr)),
+ headers={"User-Agent": "automanga/1"})
+ with urllib.request.urlopen(req) as resp:
+ rc = json.loads(resp.read().decode("utf-8"))
+ return [manga(self, id, name, self.base + ("manga/%s/" % id)) for num, name, id, genres, author in rc]
def byid(self, id):
url = self.base + ("manga/%s/" % id)
- page = soup(htcache.fetch(url))
+ page = soupify(htcache.fetch(url))
if page.find("div", id="title") is None:
# Assume we got the search page
raise KeyError(id)