1 import os, hashlib, urllib.request, time, re, weakref
2 from urllib.parse import urljoin, urlencode
4 soup = lambda cont: bs4.BeautifulSoup(cont, "html.parser")
6 base = "http://www.animenewsnetwork.com/encyclopedia/"
8 class error(Exception):
11 class incompatible(error):
13 super().__init__("ANN HTML has changed")
16 cachedir = os.path.join(os.getenv("HOME"), ".ann", "cache")
17 if not os.path.isdir(cachedir):
26 d.update(url.encode("ascii"))
27 return os.path.join(cachedir, d.hexdigest())
31 cachefile = cachename(url)
32 if cachefile and os.path.exists(cachefile):
33 if time.time() - os.stat(cachefile).st_mtime < 86400:
34 with open(cachefile, "rb") as fp:
37 with urllib.request.urlopen(url) as fp:
40 co = open(cachefile, "wb")
48 m = re.search(rx, s, re.I)
50 return s[:m.start()] + rep + s[m.end():]
54 def afind(soup, *args, **kwargs):
55 ret = soup.find(*args, **kwargs)
61 if isinstance(soup, bs4.Tag) or isinstance(soup, list):
71 class cproperty(object):
74 def __init__(self, bk):
76 self.cache = weakref.WeakKeyDictionary()
78 def __get__(self, ins, cls):
79 if ins is None: return self
80 ret = self.cache.get(ins, self._default)
81 if ret is self._default:
86 def __set__(self, ins, val):
89 def __delete__(self, ins):
94 def __init__(self, id):
96 self.url = urljoin(base, "anime.php?id=%i" % self.id)
104 return afind(self._page, "div", id="maincontent")
107 for t in afind(self._main, "div", id="content-zone")("div", "encyc-info-type"):
108 if t.strong and t.strong.text.lower().strip()[:-1] == nm:
109 return t.contents[t.contents.index(t.strong) + 1:]
113 return afind(self._main, "h1", id="page_header").text
114 _nre = re.compile(r"^(.*\S)\s+\(([^\)]+)\)$")
117 m = self._nre.search(self.rawname)
119 return (self.rawname, None)
120 return m.groups()[0:2]
122 def name(self): return self._sname[0]
124 def type(self): return self._sname[1]
129 for el in self._info("alternative title"):
130 if isinstance(el, bs4.Tag) and el.name == "div" and "tab" in el.get("class", []):
131 m = self._nre.search(el.text)
133 ret.append((m.groups()[0], m.groups()[1]))
135 ret.append((el.text, None))
136 if (self.name, None) in ret:
137 ret.remove((self.name, None))
138 ret.insert(0, (self.name, None))
143 ret = cstr(self._info("number of episodes"))
149 return "<ann.anime: %r (%i)>" % (self.name, self.id)
158 linkpat = re.compile("^/encyclopedia/anime\\.php\\?id=(\d+)$")
160 name = s(name, "^(the|a)\s+", "")
162 raise error("list() needs a prefix of at least one character")
164 if 'a' <= fc <= 'z' or 'A' <= fc <= 'Z':
168 d = get(urljoin(base, "anime.php?" + urlencode({"list": fc})))
170 ldiv = afind(afind(d, "div", id="maincontent"), "div", "lst")
171 for link in ldiv("a", "HOVERLINE"):
174 if isinstance(el, str):
175 rawname += el.strip()
177 mn = s(mn, "^a\s+", "")
178 mn = mn.replace("\u014d", "ou")
179 mn = mn.replace("\u016b", "uu")
180 if mn.lower().startswith(name.lower()):
181 m = linkpat.match(link["href"])
184 found = anime.byid(int(m.groups()[0]))
185 found.rawname = rawname