import lib, htcache
soup = BeautifulSoup.BeautifulSoup
-class imgstream(object):
+class imgstream(lib.imgstream):
def __init__(self, url):
self.bk = urllib.urlopen(url)
self.ctype = self.bk.info()["Content-Type"]
+ self.clen = int(self.bk.info()["Content-Length"])
+
+ def fileno(self):
+ return self.bk.fileno()
def close(self):
self.bk.close()
- def __enter__(self):
- return self
-
- def __exit__(self, *exc_info):
- self.close()
-
def read(self, sz = None):
if sz is None:
return self.bk.read()
return self.bk.read(sz)
class page(lib.page):
- def __init__(self, chapter, n, url):
+ def __init__(self, chapter, stack, n, url):
+ self.stack = stack
self.chapter = chapter
self.volume = self.chapter.volume
self.manga = self.volume.manga
self.n = n
+ self.id = str(n)
+ self.name = u"Page %s" % n
self.url = url
self.ciurl = None
def open(self):
return imgstream(self.iurl())
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return "<mangafox.page %r.%r.%r.%r>" % (self.manga.name, self.volume.name, self.chapter.name, self.name)
+
class chapter(lib.pagelist):
- def __init__(self, volume, name, url):
+ def __init__(self, volume, stack, id, name, url):
+ self.stack = stack
self.volume = volume
self.manga = volume.manga
+ self.id = id
self.name = name
self.url = url
self.cpag = None
m = l.contents[2].strip()
if m[:3] != u"of ":
raise Exception("parse error: weird page list for %r" % self)
- self.cpag = [page(self, n + 1, self.url + ("%i.html" % (n + 1))) for n in xrange(int(m[3:]))]
+ self.cpag = [page(self, self.stack + [(self, n)], n + 1, self.url + ("%i.html" % (n + 1))) for n in xrange(int(m[3:]))]
return self.cpag
def __str__(self):
return "<mangafox.chapter %r.%r.%r>" % (self.manga.name, self.volume.name, self.name)
class volume(lib.pagelist):
- def __init__(self, manga, name):
+ def __init__(self, manga, stack, id, name):
+ self.stack = stack
self.manga = manga
+ self.id = id
self.name = name
self.ch = []
return el
class manga(lib.manga):
- def __init__(self, lib, name, url):
+ def __init__(self, lib, id, name, url):
self.lib = lib
+ self.id = id
self.name = name
self.url = url
self.cvol = None
+ self.stack = []
def __getitem__(self, i):
return self.vols()[i]
if self.cvol is None:
page = soup(htcache.fetch(self.url))
vls = page.find("div", id="chapters").findAll("div", attrs={"class": "slide"})
- self.cvol = []
- for i in xrange(len(vls)):
- vol = volume(self, vls[i].find("h3", attrs={"class": "volume"}).contents[0].strip())
- cls = nextel(vls[i])
+ cvol = []
+ for i, vn in enumerate(reversed(vls)):
+ name = vn.find("h3", attrs={"class": "volume"}).contents[0].strip()
+ vid = name.encode("utf8")
+ vol = volume(self, [(self, i)], vid, name)
+ cls = nextel(vn)
if cls.name != u"ul" or cls["class"] != u"chlist":
raise Exception("parse error: weird volume list for %r" % self)
- for ch in cls.findAll("li"):
+ for o, ch in enumerate(reversed(cls.findAll("li"))):
n = ch.div.h3 or ch.div.h4
name = n.a.string
+ chid = name.encode("utf8")
for span in ch("span"):
try:
if u" title " in (u" " + span["class"] + u" "):
url = n.a["href"].encode("us-ascii")
if url[-7:] != "/1.html":
raise Exception("parse error: unexpected chapter URL for %r: %s" % (self, url))
- vol.ch.insert(0, chapter(vol, name, url[:-6]))
- self.cvol.insert(0, vol)
+ vol.ch.append(chapter(vol, vol.stack + [(vol, o)], chid, name, url[:-6]))
+ cvol.append(vol)
+ self.cvol = cvol
return self.cvol
def __str__(self):
class library(lib.library):
def __init__(self):
- self.base = "http://www.mangafox.com/"
+ self.base = "http://mangafox.me/"
def alphapage(self, pno):
page = soup(htcache.fetch(self.base + ("directory/%i.htm?az" % pno)))
ls = page.find("div", id="mangalist").find("ul", attrs={"class": "list"}).findAll("li")
ret = []
+ ubase = self.base + "manga/"
for m in ls:
t = m.find("div", attrs={"class": "manga_text"}).find("a", attrs={"class": "title"})
name = t.string
url = t["href"].encode("us-ascii")
- ret.append(manga(self, name, url))
+ if url[:len(ubase)] != ubase or url.find('/', len(ubase)) != (len(url) - 1):
+ raise Exception("parse error: unexpected manga URL for %r: %s" % (name, url))
+ ret.append(manga(self, url[len(ubase):-1], name, url))
return ret
def alphapages(self):
pno += 1
ls = self.alphapage(pno)
i = 0
+
+ def byid(self, id):
+ url = self.base + ("manga/%s/" % id)
+ page = soup(htcache.fetch(url))
+ if page.find("div", id="title") is None:
+ # Assume we got the search page
+ raise KeyError(id)
+ name = page.find("div", id="series_info").find("div", attrs={"class": "cover"}).img["alt"]
+ return manga(self, id, name, url)
+
+ def __iter__(self):
+ raise NotImplementedError("mangafox iterator")