changed some codes that will let the script download all episodes

This commit is contained in:
sh1nobu 2021-09-06 18:55:20 +08:00
parent 01c7a60133
commit 8657080d34
2 changed files with 46 additions and 42 deletions

View File

@ -25,9 +25,23 @@ def get_links(name, episode_number, source=None):
def get_download_links(episode_link):
episode_link_resp = requests.get(episode_link, stream=True)
soup = BeautifulSoup(episode_link_resp.content, "html.parser")
link = soup.find("li", {"class": "dowloads"})
return link.a.get("href")
exist = soup.find("h1", {"class": "entry-title"})
if exist is None:
# 202
link = soup.find("li", {"class": "dowloads"})
return link.a.get("href")
else:
# 404
episode_link = f"{episode_link}-"
episode_link_resp = requests.get(episode_link, stream=True)
soup = BeautifulSoup(episode_link_resp.content, "html.parser")
exist = soup.find("h1", {"class": "entry-title"})
if exist is None:
link = soup.find("li", {"class": "dowloads"})
return link.a.get("href")
else:
pass
def get_download_urls(download_link):
link = requests.get(download_link, stream=True)
@ -44,7 +58,8 @@ def download_episodes(url):
"Accept-Encoding": "gzip, deflate",
"Connection": "close",
}
url_resp = requests.get(url[1], headers=header, stream=True)
file_name = os.path.join(folder_path, f"{url[0]}.mp4")
url_resp = requests.get(url, headers=header, stream=True)
episode_name = f"{url.split('-')[-2]}.mp4"
file_name = os.path.join(folder_path, episode_name)
with open(file_name, "wb") as file:
shutil.copyfileobj(url_resp.raw, file)
shutil.copyfileobj(url_resp.raw, file)

View File

@ -81,55 +81,44 @@ def bitanime():
if episode_zero is None:
# Episode 0 does exist
exec = concurrent.futures.ThreadPoolExecutor()
episode_links = bd.get_links(name, episode_number, source)
episode_links = bd.get_links(name, episode_number)
download_links = list(exec.map(bd.get_download_links, episode_links))
download_urls = list(exec.map(bd.get_download_urls, download_links))
conv_download_urls = {
episode_title: url for episode_title, url in enumerate(download_urls)
}
download_urls = sorted(set(conv_download_urls.items()))
filtered_download_links = [download_link for download_link in download_links if download_link]
download_urls = list(exec.map(bd.get_download_urls, filtered_download_links))
print(f"Downloading {Fore.LIGHTCYAN_EX}{len(download_urls)} episode/s")
print(f"{Fore.LIGHTGREEN_EX}====================================")
print(download_urls)
print(len(download_urls))
# bd.get_path(folder)
# thread_map(
# bd.download_episodes, download_urls, ncols=75, total=len(download_urls)
# )
# try:
# os.startfile(folder)
# except (AttributeError):
# import sys, subprocess
bd.get_path(folder)
thread_map(
bd.download_episodes, download_urls, ncols=75, total=len(download_urls)
)
try:
os.startfile(folder)
except (AttributeError):
import sys, subprocess
# opener = "open" if sys.platform == "darwin" else "xdg-open"
# subprocess.call([opener, folder])
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, folder])
else:
# Episode 0 does not exist
exec = concurrent.futures.ThreadPoolExecutor()
episode_links = bd.get_links(name, episode_number)
download_links = list(exec.map(bd.get_download_links, episode_links))
download_urls = list(exec.map(bd.get_download_urls, download_links))
conv_download_urls = {
episode_title + 1: url
for episode_title, url in enumerate(download_urls)
}
download_urls = sorted(set(conv_download_urls.items()))
filtered_download_links = [download_link for download_link in download_links if download_link]
download_urls = list(exec.map(bd.get_download_urls, filtered_download_links))
print(f"Downloading {Fore.LIGHTCYAN_EX}{len(download_urls)} episode/s")
print(f"{Fore.LIGHTGREEN_EX}====================================")
print(download_urls)
print(len(download_urls))
# bd.get_path(folder)
# thread_map(
# bd.download_episodes, download_urls, ncols=75, total=len(download_urls)
# )
# try:
# os.startfile(folder)
# except (AttributeError):
# import sys, subprocess
bd.get_path(folder)
thread_map(
bd.download_episodes, download_urls, ncols=75, total=len(download_urls)
)
try:
os.startfile(folder)
except (AttributeError):
import sys, subprocess
# opener = "open" if sys.platform == "darwin" else "xdg-open"
# subprocess.call([opener, folder])
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, folder])
use_again = input("Do you want to download other anime? (y|n) >> ").lower()
if use_again == "y":