soup.select('.pc_temp_songlist > ul > li >a')
同样对歌曲时长的分析方法也如上,对 select 的具体用法可参考如下🔗:
Python中BeautifulSoup库的find_all、select用法
三、爬取酷狗Top500的歌曲
import requests
from bs4 import BeautifulSoup
import json
# 获取网页的HTML源码
def getText(url):
try:
r = requests.get(url, timeout=30) # get()函数的参数url必须链接采用HTTP或HTTPS方式访问,每次请求超时时间为30秒
r.raise_for_status() # 返回的请求状态status_code不是200,这个方法会产生一个异常
r.encoding = 'utf-8' # 对encoding属性赋值更改编码方式
return r.text
except:
return ""
# 解析HTML页面格式,提取有用信息
def getInfo(url):
soup = BeautifulSoup(getText(url), 'html.parser') # 创建一个BeautifulSoup对象
songs = soup.select('.pc_temp_songlist > ul > li >a') # 获取包含“歌手-歌名”信息的标签
times = soup.select('.pc_temp_songlist > ul > li > span .pc_temp_time') # 获取包含“歌曲时长”信息的标签
for song, time in zip(songs, times):
data = {
"singer": song.get_text().split('-')[0],
"song title": song.get_text().split('-')[1],
"time": time.get_text().strip()
}
print(data)
SONGS.append(data)
SONGS = []
if __name__ == '__main__':
urls = ['https://www.kugou.com/yy/rank/home/{}-8888.html'.format(str(i)) for i in range(1, 24)]
for url in urls:
getInfo(url)
print("歌曲数量:", len(SONGS))
# 爬取的数据以JSON文件格式保存
with open('songs.json', 'w', encoding='utf-8') as json_file:
json_str = json.dumps(SONGS, ensure_ascii=False, indent=4)
json_file.write(json_str)
最终生成的json文件格式如下: 非常简单