1 import urllib.request, re
3 from . import lib, htcache
4 soup = bs4.BeautifulSoup
5 soupify = lambda cont: soup(cont)
8 def __init__(self, chapter, stack, n, url):
10 self.chapter = chapter
11 self.volume = self.chapter.volume
12 self.manga = self.volume.manga
15 self.name = "Page %s" % n
20 if self.ciurl is None:
21 page = soupify(htcache.fetch(self.url))
22 self.ciurl = page.find("div", id="viewer").find("img", id="image")["src"]
26 return lib.stdimgstream(self.iurl())
32 return "<mangafox.page %r.%r.%r.%r>" % (self.manga.name, self.volume.name, self.chapter.name, self.name)
34 class chapter(lib.pagelist):
35 def __init__(self, volume, stack, id, name, url):
38 self.manga = volume.manga
44 def __getitem__(self, i):
45 return self.pages()[i]
48 return len(self.pages())
52 pg = soupify(htcache.fetch(self.url + "1.html"))
53 l = pg.find("form", id="top_bar").find("div", attrs={"class": "l"})
54 if len(l.contents) != 3:
55 raise Exception("parse error: weird page list for %r" % self)
56 m = l.contents[2].strip()
58 raise Exception("parse error: weird page list for %r" % self)
59 self.cpag = [page(self, self.stack + [(self, n)], n + 1, self.url + ("%i.html" % (n + 1))) for n in range(int(m[3:]))]
66 return "<mangafox.chapter %r.%r.%r>" % (self.manga.name, self.volume.name, self.name)
68 class volume(lib.pagelist):
69 def __init__(self, manga, stack, id, name):
76 def __getitem__(self, i):
86 return "<mangafox.volume %r.%r>" % (self.manga.name, self.name)
91 if isinstance(el, bs4.Tag):
94 class manga(lib.manga):
95 cure = re.compile(r"/c[\d.]+/$")
97 def __init__(self, lib, id, name, url):
105 def __getitem__(self, i):
106 return self.vols()[i]
109 return len(self.vols())
112 if self.cvol is None:
113 page = soupify(htcache.fetch(self.url))
114 vls = page.find("div", id="chapters").findAll("div", attrs={"class": "slide"})
116 for i, vn in enumerate(reversed(vls)):
117 name = vn.find("h3", attrs={"class": "volume"}).contents[0].strip()
118 vol = volume(self, [(self, i)], name, name)
120 if cls.name != "ul" or "chlist" not in cls["class"]:
121 raise Exception("parse error: weird volume list for %r" % self)
122 for o, ch in enumerate(reversed(cls.findAll("li"))):
123 n = ch.div.h3 or ch.div.h4
124 chid = name = n.a.string
125 for span in ch("span"):
127 if "title" in span["class"]:
128 name += " " + span.string
132 if url[-7:] == "/1.html":
134 elif self.cure.search(url) is not None:
137 raise Exception("parse error: unexpected chapter URL for %r: %s" % (self, url))
138 vol.ch.append(chapter(vol, vol.stack + [(vol, o)], chid, name, url))
147 return "<mangafox.manga %r>" % self.name
149 def libalphacmp(a, b):
150 if a.upper() < b.upper():
152 elif a.upper() > b.upper():
156 class library(lib.library):
158 self.base = "http://mangafox.me/"
160 def alphapage(self, pno):
161 page = soupify(htcache.fetch(self.base + ("directory/%i.htm?az" % pno)))
162 ls = page.find("div", id="mangalist").find("ul", attrs={"class": "list"}).findAll("li")
164 ubase = self.base + "manga/"
166 t = m.find("div", attrs={"class": "manga_text"}).find("a", attrs={"class": "title"})
169 if url[:len(ubase)] != ubase or url.find('/', len(ubase)) != (len(url) - 1):
170 raise Exception("parse error: unexpected manga URL for %r: %s" % (name, url))
171 ret.append(manga(self, url[len(ubase):-1], name, url))
174 def alphapages(self):
175 page = soupify(htcache.fetch(self.base + "directory/?az"))
176 ls = page.find("div", id="mangalist").find("div", id="nav").find("ul").findAll("li")
177 return int(ls[-2].find("a").string)
179 def byname(self, prefix):
181 r = self.alphapages()
185 c = l + ((r + 1 - l) // 2)
186 ls = self.alphapage(c)
187 if libalphacmp(ls[0].name, prefix) > 0:
189 elif libalphacmp(ls[-1].name, prefix) < 0:
197 if libalphacmp(m.name, prefix) >= 0:
203 if not m.name[:len(prefix)].upper() == prefix.upper():
208 ls = self.alphapage(pno)
211 def search(self, expr):
212 req = urllib.request.Request(self.base + ("ajax/search.php?term=%s" % urllib.parse.quote(expr)),
213 headers={"User-Agent": "automanga/1"})
214 with urllib.request.urlopen(req) as resp:
215 rc = json.loads(resp.read().decode("utf-8"))
216 return [manga(self, id, name, self.base + ("manga/%s/" % id)) for num, name, id, genres, author in rc]
219 url = self.base + ("manga/%s/" % id)
220 page = soupify(htcache.fetch(url))
221 if page.find("div", id="title") is None:
222 # Assume we got the search page
224 name = page.find("div", id="series_info").find("div", attrs={"class": "cover"}).img["alt"]
225 return manga(self, id, name, url)
228 raise NotImplementedError("mangafox iterator")