4 soup = BeautifulSoup.BeautifulSoup
6 class imgstream(lib.imgstream):
7 def __init__(self, url):
8 self.bk = urllib.urlopen(url)
11 if self.bk.getcode() != 200:
12 raise IOError("Server error: " + str(self.bk.getcode()))
13 self.ctype = self.bk.info()["Content-Type"]
14 self.clen = int(self.bk.info()["Content-Length"])
21 return self.bk.fileno()
26 def read(self, sz = None):
30 return self.bk.read(sz)
33 def __init__(self, chapter, stack, n, url):
35 self.chapter = chapter
36 self.volume = self.chapter.volume
37 self.manga = self.volume.manga
40 self.name = u"Page %s" % n
45 if self.ciurl is None:
46 page = soup(htcache.fetch(self.url))
47 self.ciurl = page.find("div", id="viewer").find("img", id="image")["src"]
51 return imgstream(self.iurl())
57 return "<mangafox.page %r.%r.%r.%r>" % (self.manga.name, self.volume.name, self.chapter.name, self.name)
59 class chapter(lib.pagelist):
60 def __init__(self, volume, stack, id, name, url):
63 self.manga = volume.manga
69 def __getitem__(self, i):
70 return self.pages()[i]
73 return len(self.pages())
77 pg = soup(htcache.fetch(self.url + "1.html"))
78 l = pg.find("form", id="top_bar").find("div", attrs={"class": "l"})
79 if len(l.contents) != 3:
80 raise Exception("parse error: weird page list for %r" % self)
81 m = l.contents[2].strip()
83 raise Exception("parse error: weird page list for %r" % self)
84 self.cpag = [page(self, self.stack + [(self, n)], n + 1, self.url + ("%i.html" % (n + 1))) for n in xrange(int(m[3:]))]
91 return "<mangafox.chapter %r.%r.%r>" % (self.manga.name, self.volume.name, self.name)
93 class volume(lib.pagelist):
94 def __init__(self, manga, stack, id, name):
101 def __getitem__(self, i):
111 return "<mangafox.volume %r.%r>" % (self.manga.name, self.name)
116 if isinstance(el, BeautifulSoup.Tag):
119 class manga(lib.manga):
120 cure = re.compile(r"/v\d+/c[\d.]+/$")
122 def __init__(self, lib, id, name, url):
130 def __getitem__(self, i):
131 return self.vols()[i]
134 return len(self.vols())
137 if self.cvol is None:
138 page = soup(htcache.fetch(self.url))
139 vls = page.find("div", id="chapters").findAll("div", attrs={"class": "slide"})
141 for i, vn in enumerate(reversed(vls)):
142 name = vn.find("h3", attrs={"class": "volume"}).contents[0].strip()
143 vid = name.encode("utf8")
144 vol = volume(self, [(self, i)], vid, name)
146 if cls.name != u"ul" or cls["class"] != u"chlist":
147 raise Exception("parse error: weird volume list for %r" % self)
148 for o, ch in enumerate(reversed(cls.findAll("li"))):
149 n = ch.div.h3 or ch.div.h4
151 chid = name.encode("utf8")
152 for span in ch("span"):
154 if u" title " in (u" " + span["class"] + u" "):
155 name += " " + span.string
158 url = n.a["href"].encode("us-ascii")
159 if url[-7:] == "/1.html":
161 elif self.cure.search(url) is not None:
164 raise Exception("parse error: unexpected chapter URL for %r: %s" % (self, url))
165 vol.ch.append(chapter(vol, vol.stack + [(vol, o)], chid, name, url))
174 return "<mangafox.manga %r>" % self.name
176 def libalphacmp(a, b):
177 return cmp(a.upper(), b.upper())
179 class library(lib.library):
181 self.base = "http://mangafox.me/"
183 def alphapage(self, pno):
184 page = soup(htcache.fetch(self.base + ("directory/%i.htm?az" % pno)))
185 ls = page.find("div", id="mangalist").find("ul", attrs={"class": "list"}).findAll("li")
187 ubase = self.base + "manga/"
189 t = m.find("div", attrs={"class": "manga_text"}).find("a", attrs={"class": "title"})
191 url = t["href"].encode("us-ascii")
192 if url[:len(ubase)] != ubase or url.find('/', len(ubase)) != (len(url) - 1):
193 raise Exception("parse error: unexpected manga URL for %r: %s" % (name, url))
194 ret.append(manga(self, url[len(ubase):-1], name, url))
197 def alphapages(self):
198 page = soup(htcache.fetch(self.base + "directory/?az"))
199 ls = page.find("div", id="mangalist").find("div", id="nav").find("ul").findAll("li")
200 return int(ls[-2].find("a").string)
202 def byname(self, prefix):
203 if not isinstance(prefix, unicode):
204 prefix = prefix.decode("utf8")
206 r = self.alphapages()
210 c = l + ((r + 1 - l) // 2)
211 ls = self.alphapage(c)
212 if libalphacmp(ls[0].name, prefix) > 0:
214 elif libalphacmp(ls[-1].name, prefix) < 0:
222 if libalphacmp(m.name, prefix) >= 0:
228 if not m.name[:len(prefix)].upper() == prefix.upper():
233 ls = self.alphapage(pno)
237 url = self.base + ("manga/%s/" % id)
238 page = soup(htcache.fetch(url))
239 if page.find("div", id="title") is None:
240 # Assume we got the search page
242 name = page.find("div", id="series_info").find("div", attrs={"class": "cover"}).img["alt"]
243 return manga(self, id, name, url)
246 raise NotImplementedError("mangafox iterator")