2 from urllib.parse import urljoin
3 from . import lib, htcache
4 soup = bs4.BeautifulSoup
5 soupify = lambda cont: soup(cont, "html.parser")
8 def __init__(self, chapter, stack, n, url):
10 self.chapter = chapter
11 self.manga = chapter.manga
14 self.name = "Page %s" % n
19 if self.ciurl is None:
20 page = soupify(htcache.fetch(self.url))
21 self.ciurl = page.find("div", id="imgholder").find("img", id="img")["src"]
25 return lib.stdimgstream(self.iurl())
31 return "<mrnet.page %r.%r.%r>" % (self.manga.name, self.chapter.name, self.name)
33 class chapter(lib.pagelist):
34 def __init__(self, manga, stack, id, name, url):
42 def __getitem__(self, i):
43 return self.pages()[i]
46 return len(self.pages())
50 pg = soupify(htcache.fetch(self.url))
52 for opt in pg.find("div", id="selectpage").find("select", id="pageMenu").findAll("option"):
53 url = urljoin(self.url, opt["value"])
55 pag.append(page(self, self.stack + [(self, len(pag))], n, url))
63 return "<mrnet.chapter %r.%r>" % (self.manga.name, self.name)
65 class manga(lib.manga):
66 def __init__(self, lib, id, name, url):
74 def __getitem__(self, i):
82 page = soupify(htcache.fetch(self.url))
83 cls = page.find("div", id="chapterlist").find("table", id="listing")
86 for tr in cls.findAll("tr"):
88 if td is None: continue
90 url = urljoin(self.url, cla["href"])
91 cid = name = cla.string
92 if isinstance(cla.nextSibling, str):
93 ncont = str(cla.nextSibling)
94 if len(ncont) > 3 and ncont[:3] == " : ":
95 name += ": " + ncont[3:]
96 cch.append(chapter(self, [(self, len(cch))], cid, name, url))
104 return "<mrnet.manga %r>" % self.name
106 class library(lib.library):
108 self.base = "http://www.mangareader.net/"
112 page = soupify(htcache.fetch(url))
113 if page.find("h2", attrs={"class": "aname"}) is None:
115 name = page.find("h2", attrs={"class": "aname"}).string
116 return manga(self, id, name, url)
119 page = soupify(htcache.fetch(self.base + "alphabetical"))
120 for sec in page.findAll("div", attrs={"class": "series_alpha"}):
121 for li in sec.find("ul", attrs={"class": "series_alpha"}).findAll("li"):
124 if url[:1] != "/": continue
127 # Does this distinction mean something?
128 id = id[id.rindex('/') + 1:]
129 if id[-5:] != ".html":
132 yield manga(self, id, name, urljoin(self.base, url))
134 def byname(self, prefix):
135 prefix = prefix.lower()
137 if manga.name.lower()[:len(prefix)] == prefix:
140 def search(self, expr):
143 if expr in manga.name.lower():