From 2fe9bb38a62153db6c53f51f450b04136a85268b Mon Sep 17 00:00:00 2001 From: sh1nobu Date: Sat, 18 Sep 2021 14:04:18 +0800 Subject: [PATCH] made it into a class based file for better accessing of variable --- src/backend.py | 158 +++++++++++++++++++++---------------------------- 1 file changed, 66 insertions(+), 92 deletions(-) diff --git a/src/backend.py b/src/backend.py index c21b7e6..f821abb 100644 --- a/src/backend.py +++ b/src/backend.py @@ -1,92 +1,68 @@ -import requests -import os +# Dependencies +import requests as req import shutil +import re +import os from bs4 import BeautifulSoup - -folder_path = "" +from dataclasses import dataclass -def get_path(folder): - global folder_path - folder_path = folder +@dataclass(init=True) +class Download: + name: str + episode_quality: str + episode_number: int + folder: str - -def get_links(name, episode_number, source=None): - if source is not None: - source_ep = f"https://gogoanime.pe/{name}-episode-" - episode_links = [f"{source_ep}{i}" for i in range(1, int(episode_number) + 1)] - episode_links.insert(0, source) - else: - source_ep = f"https://gogoanime.pe/{name}-episode-" - episode_links = [f"{source_ep}{i}" for i in range(1, int(episode_number) + 1)] - return episode_links - - -def get_download_links(episode_link): - episode_link_resp = requests.get(episode_link) - soup = BeautifulSoup(episode_link_resp.content, "html.parser") - exist = soup.find("h1", {"class": "entry-title"}) - if exist is None: - # 200 - link = soup.find("li", {"class": "dowloads"}) - return link.a.get("href") - else: - # 404 - episode_link = f"{episode_link}-" - episode_link_resp = requests.get(episode_link) - soup = BeautifulSoup(episode_link_resp.content, "html.parser") - exist = soup.find("h1", {"class": "entry-title"}) - if exist is None: - link = soup.find("li", {"class": "dowloads"}) - return link.a.get("href") - else: - return None - - -def get_download_urls(download_link): - link = requests.get(download_link[1]) - soup = BeautifulSoup(link.content, "html.parser") - link = soup.find("div", {"class": "mirror_link"}).find_all( - "div", {"class": "dowload"} - ) - try: - if len(link) > 5: - return [ - download_link[1].split("+")[-1], - link[int(download_link[0]) + 1].a.get("href"), + def get_links(self, source=None): + if source != None: + source_ep = f"https://gogoanime.pe/{self.name}-episode-" + episode_links = [ + f"{source_ep}{i}" for i in range(1, self.episode_number + 1) ] + episode_links.insert(0, source) else: - return [ - download_link[1].split("+")[-1], - link[int(download_link[0])].a.get("href"), + source_ep = f"https://gogoanime.pe/{self.name}-episode-" + episode_links = [ + f"{source_ep}{i}" for i in range(1, self.episode_number + 1) ] - except IndexError: - return [download_link[1].split("+")[-1], link[0].a.get("href")] + return episode_links + def get_download_links(self, episode_link): + with req.get(episode_link) as res: + soup = BeautifulSoup(res.content, "html.parser") + exist = soup.find("h1", {"class": "entry-title"}) + if exist is None: + # Episode link == 200 + episode_link = soup.find("li", {"class": "dowloads"}) + return episode_link.a.get("href") + else: + # Episode link == 404 + episode_link = f"{episode_link}-" + with req.get(episode_link) as res: + soup = BeautifulSoup(res.content, "html.parser") + exist = soup.find("h1", {"class": "entry-title"}) + if exist is None: + episode_link = soup.find("li", {"class": "dowloads"}) + return episode_link.a.get("href") + else: + return None -def download_episodes(url): - if "cdn" in url[1]: - header = { - "Host": "gogo-cdn.com", - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:91.0) Gecko/20100101 Firefox/91.0", - "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", - "Accept-Language": "en-US,en;q=0.5", - "Accept-Encoding": "gzip, deflate", - "Referer": "https://streamani.io/", - "Upgrade-Insecure-Requests": "1", - "Sec-Fetch-Dest": "document", - "Sec-Fetch-Mode": "navigate", - "Sec-Fetch-Site": "cross-site", - "Sec-Fetch-User": "?1", - "Te": "trailers", - "Connection": "close", - } - url_resp = requests.get(url[1], headers=header, stream=True) - episode_name = f"{url[0]}.mp4" - file_name = os.path.join(folder_path, episode_name) - with open(file_name, "wb") as file: - shutil.copyfileobj(url_resp.raw, file) - else: + def get_download_urls(self, download_link): + with req.get(download_link) as res: + soup = BeautifulSoup(res.content, "html.parser") + link = soup.find("div", {"class": "mirror_link"}).find( + "div", + text=re.compile(fr"\b{self.episode_quality}\b"), + attrs={"class": "dowload"}, + ) + if link == None: + link = soup.find("div", {"class": "mirror_link"}).find( + "div", {"class": "dowload"} + ) + return [download_link.split("+")[-1], link.a.get("href")] + + def download_episodes(self, url): header = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", @@ -94,16 +70,14 @@ def download_episodes(url): "Accept-Encoding": "gzip, deflate", "Connection": "close", } - url_resp = requests.get(url, headers=header, stream=True) - episode_name = f"{url[0]}.mp4" - file_name = os.path.join(folder_path, episode_name) - with open(file_name, "wb") as file: - shutil.copyfileobj(url_resp.raw, file) - # url_resp = requests.get(url, headers=header, stream=True) - # try: - # episode_name = f"{url.split('/')[-1].split('-')[-2]}.mp4" - # except IndexError: - # episode_name = f"{url_resp.url.split('/')[-1].split('.')[-3]}.mp4" - # file_name = os.path.join(folder_path, episode_name) - # with open(file_name, "wb") as file: - # shutil.copyfileobj(url_resp.raw, file) + with req.get(url[1], headers=header, stream=True) as res: + episode_name = f"EP.{url[0]}.mp4" + file_loc = os.path.join(self.folder, episode_name) + with open(file_loc, "wb") as file: + shutil.copyfileobj(res.raw, file, 8192) + + +class InvalidInputValue(Exception): + """Raise when custom_episode_number is equal to 0 or custom_episode_number is greater than episode_number""" + + pass