| 1 | import urllib |
| 2 | import BeautifulSoup |
| 3 | import lib, htcache |
| 4 | soup = BeautifulSoup.BeautifulSoup |
| 5 | |
| 6 | class imgstream(lib.imgstream): |
| 7 | def __init__(self, url): |
| 8 | self.bk = urllib.urlopen(url) |
| 9 | self.ctype = self.bk.info()["Content-Type"] |
| 10 | self.clen = int(self.bk.info()["Content-Length"]) |
| 11 | |
| 12 | def fileno(self): |
| 13 | return self.bk.fileno() |
| 14 | |
| 15 | def close(self): |
| 16 | self.bk.close() |
| 17 | |
| 18 | def read(self, sz = None): |
| 19 | if sz is None: |
| 20 | return self.bk.read() |
| 21 | else: |
| 22 | return self.bk.read(sz) |
| 23 | |
| 24 | class page(lib.page): |
| 25 | def __init__(self, chapter, stack, n, url): |
| 26 | self.stack = stack |
| 27 | self.chapter = chapter |
| 28 | self.volume = self.chapter.volume |
| 29 | self.manga = self.volume.manga |
| 30 | self.n = n |
| 31 | self.id = str(n) |
| 32 | self.url = url |
| 33 | self.ciurl = None |
| 34 | |
| 35 | def iurl(self): |
| 36 | if self.ciurl is None: |
| 37 | page = soup(htcache.fetch(self.url)) |
| 38 | self.ciurl = page.find("div", id="viewer").find("img", id="image")["src"] |
| 39 | return self.ciurl |
| 40 | |
| 41 | def open(self): |
| 42 | return imgstream(self.iurl()) |
| 43 | |
| 44 | class chapter(lib.pagelist): |
| 45 | def __init__(self, volume, stack, id, name, url): |
| 46 | self.stack = stack |
| 47 | self.volume = volume |
| 48 | self.manga = volume.manga |
| 49 | self.id = id |
| 50 | self.name = name |
| 51 | self.url = url |
| 52 | self.cpag = None |
| 53 | |
| 54 | def __getitem__(self, i): |
| 55 | return self.pages()[i] |
| 56 | |
| 57 | def __len__(self): |
| 58 | return len(self.pages()) |
| 59 | |
| 60 | def pages(self): |
| 61 | if self.cpag is None: |
| 62 | pg = soup(htcache.fetch(self.url + "1.html")) |
| 63 | l = pg.find("form", id="top_bar").find("div", attrs={"class": "l"}) |
| 64 | if len(l.contents) != 3: |
| 65 | raise Exception("parse error: weird page list for %r" % self) |
| 66 | m = l.contents[2].strip() |
| 67 | if m[:3] != u"of ": |
| 68 | raise Exception("parse error: weird page list for %r" % self) |
| 69 | self.cpag = [page(self, self.stack + [(self, n)], n + 1, self.url + ("%i.html" % (n + 1))) for n in xrange(int(m[3:]))] |
| 70 | return self.cpag |
| 71 | |
| 72 | def __str__(self): |
| 73 | return self.name |
| 74 | |
| 75 | def __repr__(self): |
| 76 | return "<mangafox.chapter %r.%r.%r>" % (self.manga.name, self.volume.name, self.name) |
| 77 | |
| 78 | class volume(lib.pagelist): |
| 79 | def __init__(self, manga, stack, id, name): |
| 80 | self.stack = stack |
| 81 | self.manga = manga |
| 82 | self.id = id |
| 83 | self.name = name |
| 84 | self.ch = [] |
| 85 | |
| 86 | def __getitem__(self, i): |
| 87 | return self.ch[i] |
| 88 | |
| 89 | def __len__(self): |
| 90 | return len(self.ch) |
| 91 | |
| 92 | def __str__(self): |
| 93 | return self.name |
| 94 | |
| 95 | def __repr__(self): |
| 96 | return "<mangafox.volume %r.%r>" % (self.manga.name, self.name) |
| 97 | |
| 98 | def nextel(el): |
| 99 | while True: |
| 100 | el = el.nextSibling |
| 101 | if isinstance(el, BeautifulSoup.Tag): |
| 102 | return el |
| 103 | |
| 104 | class manga(lib.manga): |
| 105 | def __init__(self, lib, id, name, url): |
| 106 | self.lib = lib |
| 107 | self.id = id |
| 108 | self.name = name |
| 109 | self.url = url |
| 110 | self.cvol = None |
| 111 | self.stack = [] |
| 112 | |
| 113 | def __getitem__(self, i): |
| 114 | return self.vols()[i] |
| 115 | |
| 116 | def __len__(self): |
| 117 | return len(self.vols()) |
| 118 | |
| 119 | def vols(self): |
| 120 | if self.cvol is None: |
| 121 | page = soup(htcache.fetch(self.url)) |
| 122 | vls = page.find("div", id="chapters").findAll("div", attrs={"class": "slide"}) |
| 123 | self.cvol = [] |
| 124 | for i, vn in enumerate(reversed(vls)): |
| 125 | name = vn.find("h3", attrs={"class": "volume"}).contents[0].strip() |
| 126 | vid = name.encode("utf8") |
| 127 | vol = volume(self, [(self, i)], vid, name) |
| 128 | cls = nextel(vn) |
| 129 | if cls.name != u"ul" or cls["class"] != u"chlist": |
| 130 | raise Exception("parse error: weird volume list for %r" % self) |
| 131 | for o, ch in enumerate(reversed(cls.findAll("li"))): |
| 132 | n = ch.div.h3 or ch.div.h4 |
| 133 | name = n.a.string |
| 134 | chid = name.encode("utf8") |
| 135 | for span in ch("span"): |
| 136 | try: |
| 137 | if u" title " in (u" " + span["class"] + u" "): |
| 138 | name += " " + span.string |
| 139 | except KeyError: |
| 140 | pass |
| 141 | url = n.a["href"].encode("us-ascii") |
| 142 | if url[-7:] != "/1.html": |
| 143 | raise Exception("parse error: unexpected chapter URL for %r: %s" % (self, url)) |
| 144 | vol.ch.append(chapter(vol, vol.stack + [(vol, o)], chid, name, url[:-6])) |
| 145 | self.cvol.append(vol) |
| 146 | return self.cvol |
| 147 | |
| 148 | def __str__(self): |
| 149 | return self.name |
| 150 | |
| 151 | def __repr__(self): |
| 152 | return "<mangafox.manga %r>" % self.name |
| 153 | |
| 154 | def libalphacmp(a, b): |
| 155 | return cmp(a.upper(), b.upper()) |
| 156 | |
| 157 | class library(lib.library): |
| 158 | def __init__(self): |
| 159 | self.base = "http://mangafox.me/" |
| 160 | |
| 161 | def alphapage(self, pno): |
| 162 | page = soup(htcache.fetch(self.base + ("directory/%i.htm?az" % pno))) |
| 163 | ls = page.find("div", id="mangalist").find("ul", attrs={"class": "list"}).findAll("li") |
| 164 | ret = [] |
| 165 | ubase = self.base + "manga/" |
| 166 | for m in ls: |
| 167 | t = m.find("div", attrs={"class": "manga_text"}).find("a", attrs={"class": "title"}) |
| 168 | name = t.string |
| 169 | url = t["href"].encode("us-ascii") |
| 170 | if url[:len(ubase)] != ubase or url.find('/', len(ubase)) != (len(url) - 1): |
| 171 | raise Exception("parse error: unexpected manga URL for %r: %s" % (name, url)) |
| 172 | ret.append(manga(self, url[len(ubase):-1], name, url)) |
| 173 | return ret |
| 174 | |
| 175 | def alphapages(self): |
| 176 | page = soup(htcache.fetch(self.base + "directory/?az")) |
| 177 | ls = page.find("div", id="mangalist").find("div", id="nav").find("ul").findAll("li") |
| 178 | return int(ls[-2].find("a").string) |
| 179 | |
| 180 | def byname(self, prefix): |
| 181 | if not isinstance(prefix, unicode): |
| 182 | prefix = prefix.decode("utf8") |
| 183 | l = 1 |
| 184 | r = self.alphapages() |
| 185 | while True: |
| 186 | if l > r: |
| 187 | return |
| 188 | c = l + ((r + 1 - l) // 2) |
| 189 | ls = self.alphapage(c) |
| 190 | if libalphacmp(ls[0].name, prefix) > 0: |
| 191 | r = c - 1 |
| 192 | elif libalphacmp(ls[-1].name, prefix) < 0: |
| 193 | l = c + 1 |
| 194 | else: |
| 195 | pno = c |
| 196 | break |
| 197 | i = 0 |
| 198 | while i < len(ls): |
| 199 | m = ls[i] |
| 200 | if libalphacmp(m.name, prefix) >= 0: |
| 201 | break |
| 202 | i += 1 |
| 203 | while True: |
| 204 | while i < len(ls): |
| 205 | m = ls[i] |
| 206 | if not m.name[:len(prefix)].upper() == prefix.upper(): |
| 207 | return |
| 208 | yield m |
| 209 | i += 1 |
| 210 | pno += 1 |
| 211 | ls = self.alphapage(pno) |
| 212 | i = 0 |
| 213 | |
| 214 | def byid(self, id): |
| 215 | url = self.base + ("manga/%s/" % id) |
| 216 | page = soup(htcache.fetch(url)) |
| 217 | if page.find("div", id="title") is None: |
| 218 | # Assume we got the search page |
| 219 | raise KeyError(id) |
| 220 | name = page.find("div", id="series_info").find("div", attrs={"class": "cover"}).img["alt"] |
| 221 | return manga(self, id, name, url) |
| 222 | |
| 223 | def __iter__(self): |
| 224 | raise NotImplementedError("mangafox iterator") |