1
0
mirror of https://github.com/hanxi/xiaomusic.git synced 2025-12-06 14:52:50 +08:00

feat: 支持b站合集和收藏下载 (#487)

* feat:支持收藏夹和合集下载

* feat:支持收藏夹和合集下载

* feat:支持收藏夹和合集下载
This commit is contained in:
nfzsh
2025-06-09 00:37:26 +08:00
committed by GitHub
parent acab694adc
commit c72b19ffc0
2 changed files with 92 additions and 2 deletions

View File

@@ -47,6 +47,7 @@ from xiaomusic.utils import (
deepcopy_data_no_sensitive_info,
download_one_music,
download_playlist,
check_bili_fav_list,
downloadfile,
get_latest_version,
is_mp3,
@@ -548,7 +549,23 @@ class DownloadPlayList(BaseModel):
@app.post("/downloadplaylist")
async def downloadplaylist(data: DownloadPlayList, Verifcation=Depends(verification)):
try:
download_proc = await download_playlist(config, data.url, data.dirname)
bili_fav_list = await check_bili_fav_list(data.url)
download_proc_list = []
if bili_fav_list:
for bvid, title in bili_fav_list.items():
bvurl = f"https://www.bilibili.com/video/{bvid}"
download_proc_list[title] = await download_one_music(config, bvurl, os.path.join(data.dirname, title))
for title, download_proc_sigle in download_proc_list.items():
exit_code = await download_proc_sigle.wait()
log.info(f"Download completed {title} with exit code {exit_code}")
dir_path = os.path.join(config.download_path, data.dirname)
log.debug(f"Download dir_path: {dir_path}")
# 可能只是部分失败,都需要整理下载目录
remove_common_prefix(dir_path)
chmoddir(dir_path)
return {"ret": "OK"}
else:
download_proc = await download_playlist(config, data.url, data.dirname)
async def check_download_proc():
# 等待子进程完成

View File

@@ -22,7 +22,8 @@ import urllib.parse
from collections.abc import AsyncIterator
from dataclasses import asdict, dataclass
from http.cookies import SimpleCookie
from urllib.parse import urlparse
from time import sleep
from urllib.parse import urlparse, parse_qs
import aiohttp
import mutagen
@@ -940,6 +941,78 @@ def _set_wave_tags(audio, info):
audio["Title"] = info.title
audio["Artist"] = info.artist
async def check_bili_fav_list(url):
bvid_info = {}
parsed_url = urlparse(url)
path = parsed_url.path
# 提取查询参数
query_params = parse_qs(parsed_url.query)
if 'space.bilibili.com' in url:
if '/favlist' in path:
lid = query_params.get('fid', [None])[0]
type = query_params.get('ctype', [None])[0]
if type == '11':
type = 'create'
elif type == '21':
type = 'collect'
else:
raise ValueError("当前只支持合集和收藏夹")
elif '/lists/' in path:
parts = path.split('/')
if len(parts) >= 4 and '?' in url:
lid = parts[3] # 提取 lid
type = query_params.get('type', [None])[0]
# https://api.bilibili.com/x/polymer/web-space/seasons_archives_list?season_id={lid}&page_size=30&page_num=1
page_size = 100
page_num = 1
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0 Safari/537.36",
"Accept": "application/json, text/plain, */*",
"Referer": url,
"Origin": "https://space.bilibili.com",
}
async with aiohttp.ClientSession(headers=headers) as session:
if type == 'season' or type == 'collect':
while True:
list_url = f"https://api.bilibili.com/x/polymer/web-space/seasons_archives_list?season_id={lid}&page_size={page_size}&page_num={page_num}"
async with session.get(list_url) as response:
if response.status != 200:
raise Exception(f"Failed to fetch data from {list_url}")
data = await response.json()
archives = data.get('data', {}).get('archives', [])
if not archives:
break
for archive in archives:
bvid = archive.get('bvid', None)
title = archive.get('title', None)
bvid_info[bvid] = title
if len(archives) < page_size:
break
page_num += 1
sleep(1)
elif type == 'create':
while True:
list_url = f"https://api.bilibili.com/x/v3/fav/resource/list?media_id={lid}&pn={page_num}&ps={page_size}&order=mtime"
async with session.get(list_url) as response:
if response.status != 200:
raise Exception(f"Failed to fetch data from {list_url}")
data = await response.json()
medias = data.get('data', {}).get('medias', [])
if not medias:
break
for media in medias:
bvid = media.get('bvid', None)
title = media.get('title', None)
bvurl = f"https://www.bilibili.com/video/{bvid}"
bvid_info[bvurl] = title
if len(medias) < page_size:
break
page_num += 1
else:
raise ValueError("当前只支持合集和收藏夹")
return bvid_info
# 下载播放列表
async def download_playlist(config, url, dirname):