1 import urllib.request, urllib.parse, re
3 from . import lib, htcache
4 soup = bs4.BeautifulSoup
5 soupify = lambda cont: soup(cont, "html.parser")
8 def __init__(self, chapter, stack, n, url):
10 self.chapter = chapter
11 self.volume = self.chapter.volume
12 self.manga = self.volume.manga
15 self.name = "Page %s" % n
20 if self.ciurl is None:
21 page = soupify(htcache.fetch(self.url))
22 self.ciurl = page.find("div", id="viewer").find("img", id="image")["src"]
26 return lib.stdimgstream(self.iurl())
32 return "<mangafox.page %r.%r.%r.%r>" % (self.manga.name, self.volume.name, self.chapter.name, self.name)
34 class chapter(lib.pagelist):
35 def __init__(self, volume, stack, id, name, url):
38 self.manga = volume.manga
44 def __getitem__(self, i):
45 return self.pages()[i]
48 return len(self.pages())
52 pg = soupify(htcache.fetch(self.url + "1.html"))
53 l = pg.find("form", id="top_bar").find("div", attrs={"class": "l"})
54 if len(l.contents) != 3:
55 raise Exception("parse error: weird page list for %r" % self)
56 m = l.contents[2].strip()
58 raise Exception("parse error: weird page list for %r" % self)
59 self.cpag = [page(self, self.stack + [(self, n)], n + 1, self.url + ("%i.html" % (n + 1))) for n in range(int(m[3:]))]
66 return "<mangafox.chapter %r.%r.%r>" % (self.manga.name, self.volume.name, self.name)
68 class volume(lib.pagelist):
69 def __init__(self, manga, stack, id, name):
76 def __getitem__(self, i):
86 return "<mangafox.volume %r.%r>" % (self.manga.name, self.name)
91 if isinstance(el, bs4.Tag):
94 class manga(lib.manga):
95 cure = re.compile(r"/c[\d.]+/$")
97 def __init__(self, lib, id, name, url):
105 def __getitem__(self, i):
106 return self.vols()[i]
109 return len(self.vols())
112 if self.cvol is None:
113 page = soupify(htcache.fetch(self.url))
114 vls = page.find("div", id="chapters").findAll("div", attrs={"class": "slide"})
116 for i, vn in enumerate(reversed(vls)):
117 name = vn.find("h3", attrs={"class": "volume"}).contents[0].strip()
118 vol = volume(self, [(self, i)], name, name)
120 if cls.name != "ul" or "chlist" not in cls["class"]:
121 raise Exception("parse error: weird volume list for %r" % self)
122 for o, ch in enumerate(reversed(cls.findAll("li"))):
123 n = ch.div.h3 or ch.div.h4
124 chid = name = n.a.string
125 for span in ch("span"):
127 if "title" in span["class"]:
128 name += " " + span.string
131 url = urllib.parse.urljoin(self.url, n.a["href"])
132 if url[-7:] == "/1.html":
134 elif self.cure.search(url) is not None:
137 raise Exception("parse error: unexpected chapter URL for %r: %s" % (self, url))
138 vol.ch.append(chapter(vol, vol.stack + [(vol, o)], chid, name, url))
147 return "<mangafox.manga %r>" % self.name
149 def libalphacmp(a, b):
150 if a.upper() < b.upper():
152 elif a.upper() > b.upper():
156 class library(lib.library):
158 self.base = "http://mangafox.me/"
160 def alphapage(self, pno):
161 abase = self.base + ("directory/%i.htm?az" % pno)
162 page = soupify(htcache.fetch(abase))
163 ls = page.find("div", id="mangalist").find("ul", attrs={"class": "list"}).findAll("li")
166 t = m.find("div", attrs={"class": "manga_text"}).find("a", attrs={"class": "title"})
168 url = urllib.parse.urljoin(abase, t["href"])
169 p = url.find("/manga/")
170 if p < 0 or url.find('/', p + 7) != (len(url) - 1):
171 raise Exception("parse error: unexpected manga URL for %r: %s" % (name, url))
172 ret.append(manga(self, url[p + 7:-1], name, url))
175 def alphapages(self):
176 page = soupify(htcache.fetch(self.base + "directory/?az"))
177 ls = page.find("div", id="mangalist").find("div", id="nav").find("ul").findAll("li")
178 return int(ls[-2].find("a").string)
180 def byname(self, prefix):
182 r = self.alphapages()
186 c = l + ((r + 1 - l) // 2)
187 ls = self.alphapage(c)
188 if libalphacmp(ls[0].name, prefix) > 0:
190 elif libalphacmp(ls[-1].name, prefix) < 0:
198 if libalphacmp(m.name, prefix) >= 0:
204 if not m.name[:len(prefix)].upper() == prefix.upper():
209 ls = self.alphapage(pno)
212 def search(self, expr):
213 req = urllib.request.Request(self.base + ("ajax/search.php?term=%s" % urllib.parse.quote(expr)),
214 headers={"User-Agent": "automanga/1"})
215 with urllib.request.urlopen(req) as resp:
216 rc = json.loads(resp.read().decode("utf-8"))
217 return [manga(self, id, name, self.base + ("manga/%s/" % id)) for num, name, id, genres, author in rc]
220 url = self.base + ("manga/%s/" % id)
221 page = soupify(htcache.fetch(url))
222 if page.find("div", id="title") is None:
223 # Assume we got the search page
225 name = page.find("div", id="series_info").find("div", attrs={"class": "cover"}).img["alt"]
226 return manga(self, id, name, url)
229 raise NotImplementedError("mangafox iterator")