I am trying to scrape all links for tracks from my playlist.
This is my code
from selenium import webdriver
from time import sleep
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
playlist = 'minimal_house'
url = 'https://www.youtube.com/channel/UCt2GxiTBN_RiE-cbP0cmk5Q/playlists'
html = urlopen(url)
soup = BeautifulSoup(html , 'html.parser')
tracks = soup.find(title = playlist).get('href')
print(tracks)
url = url + tracks
print(url)
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all('a',attrs={'class':'yt-simple-endpoint style-scope ytd-playlist-panel-video-renderer'})
print(links)
I couldn't scrape 'a'; nor by id; nor by class name.
it's my messy code that works for me:
from selenium import webdriver
from time import sleep
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
playlist = 'minimal_house'
url = 'https://www.youtube.com/channel/UCt2GxiTBN_RiE-cbP0cmk5Q/playlists'
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
tracks = soup.find('a', attrs={'title': playlist}).get('href')
print(tracks)
url = 'https://www.youtube.com' + str(tracks)
print(url)
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all('a')
links = set([link.get('href') for link in links if link.get('href').count('watch')])
print(links)
since class names change base on device request, it's better to get all links in this case.
and you need to use selenium to scroll down to fetch all the list.
Related
import requests
from bs4 import BeautifulSoup
url = 'https://www.worldometers.info/world-population/population-by-country/'
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'html.parser')
urls = []
for link in soup.find_all('a'):
urls = link.get('href')
print(urls)
the URL to be printed as "https://www.worldometers.info/world-population/china-population/"
but is just printed as "world-population/china-population"
followed by I need to fetch one particular table from each URL fetched
That's relative url so you have to make them absolute urls as follows
import requests
from bs4 import BeautifulSoup
url = 'https://www.worldometers.info/world-population/population-by-country/'
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'html.parser')
urls = []
for link in soup.find_all('a'):
urls = link.get('href')
full_url='https://www.worldometers.info'+urls
print(full_url)
I'm trying to make a program to read the price of bitcoin from a website. I used bs4 and was bale to get the section I was looking for but its surrounded by the html tags.
output: <div class="priceValue___11gHJ">$52,693.18</div>
I just want the price and i have tried the regex and lxml methods, but I keep getting errors
import requests
from bs4 import BeautifulSoup
#get url
url = "https://coinmarketcap.com/currencies/bitcoin/"
r = requests.get(url)
#parse html
soup = BeautifulSoup(r.content, 'html5lib')
#find div
find_div = soup.find('div', {"class": "priceValue___11gHJ"})
print(find_div)
You need to do .text:
import requests
from bs4 import BeautifulSoup
#get url
url = "https://coinmarketcap.com/currencies/bitcoin/"
r = requests.get(url)
#parse html
soup = BeautifulSoup(r.content, 'html5lib')
#find div
find_div = soup.find('div', {"class": "priceValue___11gHJ"})
print(find_div.text) # $52,693.18
I am having issues trying to exclude results given from my beautiful soup program this is my code:
from bs4 import BeautifulSoup
import requests
URL = 'https://en.wikipedia.org/wiki/List_of_Wikipedia_mobile_applications'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
for link in soup.find_all('a'):
print(link.get('href'))
I don't want to get the results that start with a "#" for example: #cite_ref-18
I have tried using for loops but I get this error message: KeyError: 0
You can use the str.startswith() method:
from bs4 import BeautifulSoup
import requests
URL = 'https://en.wikipedia.org/wiki/List_of_Wikipedia_mobile_applications'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
for tag in soup.find_all('a'):
link = tag.get('href')
if not str(link).startswith('#'):
print(link)
You can use CSS selector a[href]:not([href^="#"]). This will select all <a> tags with href= attribute but not the ones starting with # character:
import requests
from bs4 import BeautifulSoup
URL = 'https://en.wikipedia.org/wiki/List_of_Wikipedia_mobile_applications'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
for link in soup.select('a[href]:not([href^="#"])'):
print(link['href'])
I am new to BeautifulSoup and I am praticing with little tasks. Here I try to get the "previous" link in this site. The html is
here
My code is
import requests, bs4
from bs4 import BeautifulSoup
url = 'https://www.xkcd.com/'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.find('div', id="comic")
url2 = result.find('ul', class_='comicNav').find('a', rel='prev').find('href')
But it shows NoneType.. I have read some posts about the child elements in html, and I tried some different things. But it still does not work.. Thank you for your help in advance.
Tou could use a CSS Selector instead.
import requests, bs4
from bs4 import BeautifulSoup
url = 'https://www.xkcd.com/'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
result = soup.select('.comicNav a[rel~="prev"]')[0]
print(result)
if you want just the href change
result = soup.select('.comicNav a[rel~="prev"]')[0]["href"]
To get prev link.find ul tag and then find a tag. Try below code.
import requests, bs4
from bs4 import BeautifulSoup
url = 'https://www.xkcd.com/'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
url2 = soup.find('ul', class_='comicNav').find('a',rel='prev')['href']
print(url2)
Output:
/2254/
I'm trying to crawl url address from <a href=>
But This site's <href> is #none.
how can i crawl this url address?
I've already figured out a lot but i couldn't find tips.
like this
<a href="#none" onclick="goDetail(519975);">
title
<a>
from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
import re
ssl._create_default_https_context = ssl._create_unverified_context
html = urlopen('https://www.daegu.ac.kr/article/DG159/list')
bs = BeautifulSoup(html, 'html.parser')
nameList = bs.findAll('td', {'class': 'list_left'})
for name in nameList:
print(name.get_text())
print(name.get_url)
print('\n----------------------------------------------')
You can concatenate the id from the onclick on to a base url (which is what happens for the onclick event). The first three links (without onclick) have a different base.
from bs4 import BeautifulSoup as bs
import requests
base1 = 'https://www.daegu.ac.kr/article/DG159/detail/'
base2 = 'https://www.daegu.ac.kr/article/DG159'
r = requests.get('https://www.daegu.ac.kr/article/DG159/list')
soup = bs(r.content, 'lxml')
links = [base1 + a['onclick'].split('(')[1].split(')')[0] if a.has_attr('onclick') else base2 + a['href'] for a in soup.select('.board_tbl_list a')]
print(links)