1 import BeautifulSoup, urlparse
3 soup = BeautifulSoup.BeautifulSoup
4 soupify = lambda cont: soup(cont, convertEntities=soup.HTML_ENTITIES)
7 def __init__(self, chapter, stack, n, url):
10 self.manga = chapter.manga
13 self.name = u"Page %s" % n
18 if self.ciurl is None:
19 page = soupify(htcache.fetch(self.url))
20 self.ciurl = page.find("div", id="imgholder").find("img", id="img")["src"].encode("us-ascii")
24 return lib.stdimgstream(self.iurl())
30 return "<mrnet.page %r.%r.%r>" % (self.manga.name, self.chapter.name, self.name)
32 class chapter(lib.pagelist):
33 def __init__(self, manga, stack, id, name, url):
41 def __getitem__(self, i):
42 return self.pages()[i]
45 return len(self.pages())
49 pg = soupify(htcache.fetch(self.url))
51 for opt in pg.find("div", id="selectpage").find("select", id="pageMenu").findAll("option"):
52 url = urlparse.urljoin(self.url, opt["value"].encode("us-ascii"))
54 pag.append(page(self, self.stack + [(self, len(pag))], n, url))
62 return "<mrnet.chapter %r.%r>" % (self.manga.name, self.name)
64 class manga(lib.manga):
65 def __init__(self, lib, id, name, url):
73 def __getitem__(self, i):
81 page = soupify(htcache.fetch(self.url))
82 cls = page.find("div", id="chapterlist").find("table", id="listing")
85 for tr in cls.findAll("tr"):
87 if td is None: continue
89 url = urlparse.urljoin(self.url, cla["href"].encode("us-ascii"))
91 cid = name.encode("utf8")
92 if isinstance(cla.nextSibling, unicode):
93 ncont = unicode(cla.nextSibling)
94 if len(ncont) > 3 and ncont[:3] == u" : ":
95 name += u": " + ncont[3:]
96 cch.append(chapter(self, [(self, len(cch))], cid, name, url))
104 return "<mrnet.manga %r>" % self.name
106 class library(lib.library):
108 self.base = "http://www.mangareader.net/"
112 page = soupify(htcache.fetch(url))
113 if page.find("h2", attrs={"class": "aname"}) is None:
115 name = page.find("h2", attrs={"class": "aname"}).string
116 return manga(self, id, name, url)
119 page = soupify(htcache.fetch(self.base + "alphabetical"))
120 for sec in page.findAll("div", attrs={"class": "series_alpha"}):
121 for li in sec.find("ul", attrs={"class": "series_alpha"}).findAll("li"):
122 url = li.a["href"].encode("us-ascii")
124 if url[:1] != "/": continue
127 # Does this distinction mean something?
128 id = id[id.rindex('/') + 1:]
129 if id[-5:] != ".html":
132 yield manga(self, id, name, urlparse.urljoin(self.base, url))
134 def byname(self, prefix):
135 if not isinstance(prefix, unicode):
136 prefix = prefix.decode("utf8")
137 prefix = prefix.lower()
139 if manga.name.lower()[:len(prefix)] == prefix:
142 def search(self, expr):
143 if not isinstance(expr, unicode):
144 expr = expr.decode("utf8")
147 if expr in manga.name.lower():