| 1 | import bs4 |
| 2 | from urllib.parse import urljoin |
| 3 | from . import lib, htcache |
| 4 | soup = bs4.BeautifulSoup |
| 5 | soupify = lambda cont: soup(cont, "html.parser") |
| 6 | |
| 7 | class page(lib.page): |
| 8 | def __init__(self, chapter, stack, n, url): |
| 9 | self.stack = stack |
| 10 | self.chapter = chapter |
| 11 | self.manga = chapter.manga |
| 12 | self.n = n |
| 13 | self.id = str(n) |
| 14 | self.name = "Page %s" % (n + 1,) |
| 15 | self.iurl = url |
| 16 | |
| 17 | def open(self): |
| 18 | return lib.stdimgstream(self.iurl) |
| 19 | |
| 20 | def __str__(self): |
| 21 | return self.name |
| 22 | |
| 23 | def __repr__(self): |
| 24 | return "<kakalot.page %r.%r.%r>" % (self.manga.name, self.chapter.name, self.name) |
| 25 | |
| 26 | class chapter(lib.pagelist): |
| 27 | def __init__(self, manga, stack, id, name, url): |
| 28 | self.stack = stack |
| 29 | self.manga = manga |
| 30 | self.id = id |
| 31 | self.name = name |
| 32 | self.url = url |
| 33 | self.cpag = None |
| 34 | |
| 35 | def __getitem__(self, i): |
| 36 | return self.pages()[i] |
| 37 | |
| 38 | def __len__(self): |
| 39 | return len(self.pages()) |
| 40 | |
| 41 | def pages(self): |
| 42 | if self.cpag is None: |
| 43 | pg = soupify(htcache.fetch(self.url)) |
| 44 | pag = [] |
| 45 | for n, img in enumerate(pg.find("div", id="vungdoc").findAll("img")): |
| 46 | url = urljoin(self.url, img["src"]) |
| 47 | pag.append(page(self, self.stack + [(self, n)], n, url)) |
| 48 | self.cpag = pag |
| 49 | return self.cpag |
| 50 | |
| 51 | def __str__(self): |
| 52 | return self.name |
| 53 | |
| 54 | def __repr__(self): |
| 55 | return "<kakalot.chapter %r.%r>" % (self.manga.name, self.name) |
| 56 | |
| 57 | class manga(lib.manga): |
| 58 | def __init__(self, lib, id, name, url): |
| 59 | self.lib = lib |
| 60 | self.id = id |
| 61 | self.name = name |
| 62 | self.url = url |
| 63 | self.cch = None |
| 64 | self.stack = [] |
| 65 | |
| 66 | def __getitem__(self, i): |
| 67 | return self.ch()[i] |
| 68 | |
| 69 | def __len__(self): |
| 70 | return len(self.ch()) |
| 71 | |
| 72 | def ch(self): |
| 73 | if self.cch is None: |
| 74 | page = soupify(htcache.fetch(self.url)) |
| 75 | cls = page.find("div", attrs={"class": "chapter-list"}) |
| 76 | cch = [] |
| 77 | for row in reversed(cls.findAll("div", attrs={"class": "row"})): |
| 78 | link = row.find("a") |
| 79 | url = urljoin(self.url, link["href"]) |
| 80 | p1 = url.rfind("/") |
| 81 | p2 = url.rfind("/", 0, p1 - 1) |
| 82 | if p1 < 0 or p2 < 0 or url[p2 + 1 : p1] != self.id: |
| 83 | raise Exception("unexpected chapter url: %s" % (url,)) |
| 84 | cid = url[p1 + 1:] |
| 85 | if len(cid) < 1: |
| 86 | raise Exception("unexpected chapter url: %s" % (url,)) |
| 87 | name = link.string |
| 88 | cch.append(chapter(self, [(self, len(cch))], cid, name, url)) |
| 89 | self.cch = cch |
| 90 | return self.cch |
| 91 | |
| 92 | def __str__(self): |
| 93 | return self.name |
| 94 | |
| 95 | def __repr__(self): |
| 96 | return "<kakalot.manga %r>" % self.name |
| 97 | |
| 98 | class library(lib.library): |
| 99 | def __init__(self): |
| 100 | self.base = "https://mangakakalot.com/" |
| 101 | |
| 102 | def byid(self, id): |
| 103 | url = urljoin(self.base + "manga/", id) |
| 104 | page = soupify(htcache.fetch(url)) |
| 105 | ul = page.find("ul", attrs={"class": "manga-info-text"}) |
| 106 | if ul is None: raise KeyError(id) |
| 107 | name = ul.li.h1 |
| 108 | if name is None: raise KeyError(id) |
| 109 | name = name.string |
| 110 | return manga(self, id, name, url) |