1 import os, hashlib, urllib.request, time, re, weakref
2 from urllib.parse import urljoin, urlencode
4 soup = lambda cont: bs4.BeautifulSoup(cont, "html.parser")
6 __all__ = ["anime", "getlist",
7 "error", "incompatible"]
9 base = "http://www.animenewsnetwork.com/encyclopedia/"
11 class error(Exception):
14 class incompatible(error):
16 super().__init__("ANN HTML has changed")
19 cachedir = os.path.join(os.getenv("HOME"), ".ann", "cache")
20 if not os.path.isdir(cachedir):
29 d.update(url.encode("ascii"))
30 return os.path.join(cachedir, d.hexdigest())
34 cachefile = cachename(url)
35 if cachefile and os.path.exists(cachefile):
36 if time.time() - os.stat(cachefile).st_mtime < 86400:
37 with open(cachefile, "rb") as fp:
40 with urllib.request.urlopen(url) as fp:
43 co = open(cachefile, "wb")
51 m = re.search(rx, s, re.I)
53 return s[:m.start()] + rep + s[m.end():]
57 def afind(soup, *args, **kwargs):
58 ret = soup.find(*args, **kwargs)
64 if isinstance(soup, bs4.Tag) or isinstance(soup, list):
69 elif isinstance(soup, str):
76 class cproperty(object):
79 def __init__(self, bk):
81 self.cache = weakref.WeakKeyDictionary()
83 def __get__(self, ins, cls):
84 if ins is None: return self
85 ret = self.cache.get(ins, self._default)
86 if ret is self._default:
91 def __set__(self, ins, val):
94 def __delete__(self, ins):
99 def __init__(self, id):
101 self.url = urljoin(base, "anime.php?id=%i" % self.id)
109 return afind(self._page, "div", id="maincontent")
112 for t in afind(self._main, "div", id="content-zone")("div", "encyc-info-type"):
113 if t.strong and t.strong.text.lower().strip()[:-1] == nm:
114 return t.contents[t.contents.index(t.strong) + 1:]
118 return afind(self._main, "h1", id="page_header").text
119 _nre = re.compile(r"^(.*\S)\s+\(([^\)]+)\)$")
122 m = self._nre.search(self.rawname)
124 return (self.rawname, None)
125 return m.groups()[0:2]
127 def name(self): return self._sname[0]
129 def type(self): return self._sname[1]
134 for el in self._info("alternative title"):
135 if isinstance(el, bs4.Tag) and el.name == "div" and "tab" in el.get("class", []):
136 m = self._nre.search(el.text)
138 ret.append((m.groups()[0], m.groups()[1]))
140 ret.append((el.text, None))
141 if (self.name, None) in ret:
142 ret.remove((self.name, None))
143 ret.insert(0, (self.name, None))
148 ret = cstr(self._info("number of episodes"))
155 return cstr(self._info("vintage")).strip()
159 return [cstr(el) for x in (self._info("genres") or []) if isinstance(x, bs4.Tag) for el in x.findAll("a")]
163 return [cstr(el) for x in (self._info("themes") or []) if isinstance(x, bs4.Tag) for el in x.findAll("a")]
166 return "<ann.anime: %r (%i)>" % (self.name, self.id)
175 linkpat = re.compile("^/encyclopedia/anime\\.php\\?id=(\d+)$")
177 name = s(name, "^(the|a)\s+", "")
179 raise error("list() needs a prefix of at least one character")
181 if 'a' <= fc <= 'z' or 'A' <= fc <= 'Z':
185 d = get(urljoin(base, "anime.php?" + urlencode({"list": fc})))
187 ldiv = afind(afind(d, "div", id="maincontent"), "div", "lst")
188 for link in ldiv("a", "HOVERLINE"):
191 if isinstance(el, str):
192 rawname += el.strip()
194 mn = s(mn, "^a\s+", "")
195 mn = mn.replace("\u014d", "ou")
196 mn = mn.replace("\u016b", "uu")
197 if mn.lower().startswith(name.lower()):
198 m = linkpat.match(link["href"])
201 found = anime.byid(int(m.groups()[0]))
202 found.rawname = rawname