I am learning to build web crawlers and currently working on getting all urls from a site. I have been playing around and don't have the same code as I did before but I have been able to get all the links but my issues is the recursion I need to do the same things over and over but what I think my issue is the recursion what it is doing is right for the code I have written. My code is bellow
#!/usr/bin/python
import urllib2
import urlparse
from BeautifulSoup import BeautifulSoup
def getAllUrl(url):
page = urllib2.urlopen( url ).read()
urlList = []
try:
soup = BeautifulSoup(page)
soup.prettify()
for anchor in soup.findAll('a', href=True):
if not 'http://' in anchor['href']:
if urlparse.urljoin('http://bobthemac.com', anchor['href']) not in urlList:
urlList.append(urlparse.urljoin('http://bobthemac.com', anchor['href']))
else:
if anchor['href'] not in urlList:
urlList.append(anchor['href'])
length = len(urlList)
for url in urlList:
getAllUrl(url)
return urlList
except urllib2.HTTPError, e:
print e
if __name__ == "__main__":
urls = getAllUrl('http://bobthemac.com')
for x in urls:
print x
What I am trying to achieve is get all the urls for a site with the current set-up the program runs till it runs out of memory all I want is to get the urls from a site. Does anyone have any idea on how to do this think I have the right idea just need some small changes to the code.
EDIT
For those of you what are intrested bellow is my working code that gets all the urs for the site someone might find it useful. It's not the best code and does need some work but with some work it could be quite good.
#!/usr/bin/python
import urllib2
import urlparse
from BeautifulSoup import BeautifulSoup
def getAllUrl(url):
urlList = []
try:
page = urllib2.urlopen( url ).read()
soup = BeautifulSoup(page)
soup.prettify()
for anchor in soup.findAll('a', href=True):
if not 'http://' in anchor['href']:
if urlparse.urljoin('http://bobthemac.com', anchor['href']) not in urlList:
urlList.append(urlparse.urljoin('http://bobthemac.com', anchor['href']))
else:
if anchor['href'] not in urlList:
urlList.append(anchor['href'])
return urlList
except urllib2.HTTPError, e:
urlList.append( e )
if __name__ == "__main__":
urls = getAllUrl('http://bobthemac.com')
fullList = []
for x in urls:
listUrls = list
listUrls = getAllUrl(x)
try:
for i in listUrls:
if not i in fullList:
fullList.append(i)
except TypeError, e:
print 'Woops wrong content passed'
for i in fullList:
print i
I think this works:
#!/usr/bin/python
import urllib2
import urlparse
from BeautifulSoup import BeautifulSoup
def getAllUrl(url):
try:
page = urllib2.urlopen( url ).read()
except:
return []
urlList = []
try:
soup = BeautifulSoup(page)
soup.prettify()
for anchor in soup.findAll('a', href=True):
if not 'http://' in anchor['href']:
if urlparse.urljoin(url, anchor['href']) not in urlList:
urlList.append(urlparse.urljoin(url, anchor['href']))
else:
if anchor['href'] not in urlList:
urlList.append(anchor['href'])
length = len(urlList)
return urlList
except urllib2.HTTPError, e:
print e
def listAllUrl(urls):
for x in urls:
print x
urls.remove(x)
urls_tmp = getAllUrl(x)
for y in urls_tmp:
urls.append(y)
if __name__ == "__main__":
urls = ['http://bobthemac.com']
while(urls.count>0):
urls = getAllUrl('http://bobthemac.com')
listAllUrl(urls)
In you function getAllUrl, you call getAllUrl again in a for loop, it makes a recursion.
Elements will never be moved out once put into urlList, so urlList will never be empty, and then, the recursion will never break up.
That's why your program will never end up util out of memory.
Related
Currently the code: Finds the urls for all gyms and puts in csv like so:
https://www.lifetime.life/life-time-locations/al-vestavia-hills.html
https://www.lifetime.life/life-time-locations/az-biltmore.html
What I want it to do: I am having trouble extracting the address from each url. My attempt at the address part is is in the 4th and 5th line from the bottom of "The code" below. The exact error is:
gymrow.append(address_line1[0].text)
IndexError: list index out of range
The code*:
import urllib2
import BeautifulSoup
initial_url = "https://www.lifetime.life"
request = urllib2.Request("https://www.lifetime.life/view-all-locations.html")
response = urllib2.urlopen(request)
soup = BeautifulSoup.BeautifulSoup(response)
with open('gyms2.csv', 'w') as gf:
gymwriter = csv.writer(gf)
for a in soup.findAll('a'):
if '/life-time-locations/' in a['href']:
gymurl1 = (urlparse.urljoin(initial_url, a.get('href')))
sitemap_content = requests.get(gymurl1).content
gymrow = [gymurl1]
address_line1 = soup.select('p[class~=small m-b-sm p-t-1] > span[class~=btn-icon-text]')
gymrow.append(address_line1[0].text)
print(gymrow)
gymwriter.writerow(gymrow)
time.sleep(3)
Image of inspect element: the p class, span class and the address I want to scrape
Thank you very much!
You get HTML from subpage but you don't convert to soup so you search on main page
response = requests.get(gymurl)
sub_soup = BeautifulSoup(response.text)
I had also problem with CSS selector
address_line = sub_soup.select('p.small.m-b-sm.p-t-1 span.btn-icon-text')
Some pages doesn't have elements in this place and it raise error so I use try/except to catch it.
Tested on Python 3 because on Python 2 .select() didn't work for me
import requests
from bs4 import BeautifulSoup
import urllib.parse
import csv
import time
initial_url = "https://www.lifetime.life"
response = requests.get("https://www.lifetime.life/view-all-locations.html")
soup = BeautifulSoup(response.text)
with open('gyms2.csv', 'w') as gf:
gymwriter = csv.writer(gf)
for a in soup.findAll('a'):
if '/life-time-locations/' in a['href']:
gymurl = urllib.parse.urljoin(initial_url, a.get('href'))
print(gymurl)
response = requests.get(gymurl)
sub_soup = BeautifulSoup(response.text)
try:
address_line = sub_soup.select('p.small.m-b-sm.p-t-1 span.btn-icon-text')
gymrow = [gymurl, address_line[0].text.strip()]
print(gymrow)
gymwriter.writerow(gymrow)
time.sleep(3)
except Exception as ex:
print(ex)
EDIT: Python 2 using find() instead of select()
import requests
import BeautifulSoup
import csv
import urllib2
import time
initial_url = "https://www.lifetime.life"
response = requests.get("https://www.lifetime.life/view-all-locations.html")
soup = BeautifulSoup.BeautifulSoup(response.text)
with open('gyms2.csv', 'w') as gf:
gymwriter = csv.writer(gf)
for a in soup.findAll('a'):
if '/life-time-locations/' in a['href']:
gymurl = urllib2.urlparse.urljoin(initial_url, a.get('href'))
print(gymurl)
response = requests.get(gymurl)
sub_soup = BeautifulSoup.BeautifulSoup(response.text)
try:
address_line = sub_soup.find('p', {'class': 'small m-b-sm p-t-1'}).find('span', {'class': 'btn-icon-text'})
gymrow = [gymurl, address_line.text]
print(gymrow)
gymwriter.writerow(gymrow)
time.sleep(3)
except Exception as ex:
print(ex)
EDIT: It seems there are many versions of pages. Every page may need separted try/except. But instead of putting second try/except inside first except I use continue to skip next try/except if first try works correctly.
import requests
from bs4 import BeautifulSoup
import urllib.parse
import csv
import time
initial_url = "https://www.lifetime.life"
response = requests.get("https://www.lifetime.life/view-all-locations.html")
soup = BeautifulSoup(response.text)
with open('gyms2.csv', 'w') as gf:
gymwriter = csv.writer(gf)
for a in soup.findAll('a'):
if '/life-time-locations/' in a['href']:
gymurl = urllib.parse.urljoin(initial_url, a.get('href'))
print(gymurl)
response = requests.get(gymurl)
sub_soup = BeautifulSoup(response.text)
try:
address_line = sub_soup.select('p.small.m-b-sm.p-t-1 span.btn-icon-text')
gymrow = [gymurl, address_line[0].text.strip()]
print('type 1:', gymrow)
gymwriter.writerow(gymrow)
time.sleep(3)
continue # go back to `for`
except Exception as ex:
print('ex:', ex)
try:
address_line = sub_soup.find('div', {'class': 'btn-resp-md'}).find('p')
gymrow = [gymurl, address_line.text.strip()]
print('type 2:', gymrow)
gymwriter.writerow(gymrow)
time.sleep(3)
continue # go back to `for`
except Exception as ex:
print('ex:', ex)
try:
address_line = sub_soup.find('p', {'class': 'm-b-grid'})
gymrow = [gymurl, address_line.text.strip()]
print('type 3:', gymrow)
gymwriter.writerow(gymrow)
time.sleep(3)
continue # go back to `for`
except Exception as ex:
print('ex:', ex)
This is my code:
https://pastebin.com/R11qiTF4
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as req
from urllib.parse import urljoin
import re
urls = ["https://www.helios-gesundheit.de"]
domain_list = ["https://www.helios-gesundheit.de/kliniken/schwerin/"]
prohibited = ["info", "news"]
text_keywords = ["Helios", "Helios"]
url_list = []
desired = "https://www.helios-gesundheit.de/kliniken/schwerin/unser-angebot/unsere-fachbereiche-klinikum/allgemein-und-viszeralchirurgie/team-allgemein-und-viszeralchirurgie/"
for x in range(len(domain_list)):
url_list.append(urls[x]+domain_list[x].replace(urls[x], ""))
print(url_list)
def prohibitedChecker(prohibited_list, string):
for x in prohibited_list:
if x in string:
return True
else:
return False
break
def parseHTML(url):
requestHTML = req(url)
htmlPage = requestHTML.read()
requestHTML.close()
parsedHTML = soup(htmlPage, "html.parser")
return parsedHTML
searched_word = "Helios"
for url in url_list:
parsedHTML = parseHTML(url)
href_crawler = parsedHTML.find_all("a", href=True)
for href in href_crawler:
crawled_url = urljoin(url,href.get("href"))
print(crawled_url)
if "www" not in crawled_url:
continue
parsedHTML = parseHTML(crawled_url)
results = parsedHTML.body.find_all(string=re.compile('.*{0}.*'.format(searched_word)), recursive=True)
for single_result in results:
keyword_text_check = prohibitedChecker(text_keywords, single_result.string)
if keyword_text_check != True:
continue
print(single_result.string)
I'm trying to print the contents of ''desired'' variable. The problem is the following, my code doesn't even get to request the URL of ''desired'' because its not in the website scope. ''desired'' href link is inside another href link that's inside the page I'm currently scraping. I thought I'd fix this by adding another for loop inside line 39 for loop, that requests every href found in my first, but this is too messy and not efficient
Is there way to get a list of every directory of a website url?
I have to modify this code so the scraping keeps only the links that contain a specific keyword. In my case I'm scraping a newspaper page to find news related to the term 'Brexit'.
I've tried modifying the method parse_links so it only keeps the links (or 'a' tags), that contain 'Brexit' in them, but it doesn't seem to work.
Where should i place the condition?
import requests
from bs4 import BeautifulSoup
from queue import Queue, Empty
from concurrent.futures import ThreadPoolExecutor
from urllib.parse import urljoin, urlparse
class MultiThreadScraper:
def __init__(self, base_url):
self.base_url = base_url
self.root_url = '{}://{}'.format(urlparse(self.base_url).scheme, urlparse(self.base_url).netloc)
self.pool = ThreadPoolExecutor(max_workers=20)
self.scraped_pages = set([])
self.to_crawl = Queue(10)
self.to_crawl.put(self.base_url)
def parse_links(self, html):
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all('a', href=True)
for link in links:
url = link['href']
if url.startswith('/') or url.startswith(self.root_url):
url = urljoin(self.root_url, url)
if url not in self.scraped_pages:
self.to_crawl.put(url)
def scrape_info(self, html):
return
def post_scrape_callback(self, res):
result = res.result()
if result and result.status_code == 200:
self.parse_links(result.text)
self.scrape_info(result.text)
def scrape_page(self, url):
try:
res = requests.get(url, timeout=(3, 30))
return res
except requests.RequestException:
return
def run_scraper(self):
while True:
try:
target_url = self.to_crawl.get(timeout=60)
if target_url not in self.scraped_pages:
print("Scraping URL: {}".format(target_url))
self.scraped_pages.add(target_url)
job = self.pool.submit(self.scrape_page, target_url)
job.add_done_callback(self.post_scrape_callback)
except Empty:
return
except Exception as e:
print(e)
continue
if __name__ == '__main__':
s = MultiThreadScraper("https://elpais.com/")
s.run_scraper()
You need to import re module to get the specific text value.Try the below code.
import re
links = soup.find_all('a', text=re.compile("Brexit"))
This should return links which contains only Brexit.
You can get text of the element by using method getText() and check, if string actually contain "Brexit":
if "Brexit" in link.getText().split():
url = link["href"]
I added a check in this function. See if that does the rick for you:
def parse_links(self, html):
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all('a', href=True)
for link in links:
if 'BREXIT' in link.text.upper(): #<------ new if statement
url = link['href']
if url.startswith('/') or url.startswith(self.root_url):
url = urljoin(self.root_url, url)
if url not in self.scraped_pages:
self.to_crawl.put(url)
I have a script that scrapes a specific website, where the number of a page is defined with ?start={}. This site.
This is my script:
from bs4 import BeautifulSoup
from urllib.request import urlopen
def parse():
for i in range(0, 480, 5):
html = urlopen('http://rl.odessa.ua/index.php/ru/poslednie-novosti?start={}'.format(i))
soup = BeautifulSoup(html, 'lxml')
for article in soup.findAll('article', class_ = 'item'):
try:
print('\t' + article.find('h1').find('a').get_text())
print(article.find('p').get_text() + '\n' + '*'*80)
except AttributeError as e:
print(e)
parse()
At the bottom of the page is located div.pagination with a.next. Here's a screenshot.
Is it a bad practise using range() instead of pagination? Anyway, please help me to rewrite the code above using pagination.
Whichever method works for you is fine, but locating the next button would make things easier. It could be done as follows:
from bs4 import BeautifulSoup
from urllib.request import urlopen
def parse():
base_url = 'http://rl.odessa.ua/index.php'
url = 'http://rl.odessa.ua/index.php/ru/poslednie-novosti?start=0'
while True:
html = urlopen(url)
soup = BeautifulSoup(html, 'lxml')
for article in soup.findAll('article', class_ = 'item'):
try:
print('\t' + article.find('h1').find('a').get_text())
print(article.find('p').get_text() + '\n' + '*'*80)
except AttributeError as e:
print(e)
next_button = soup.find('a', class_='next', href=True)
if next_button:
url = base_url + next_button['href']
else:
break
parse()
I am trying to get some data from the following website. https://www.drugbank.ca/drugs
For every drug in the table, I will need to go deeply and have the name and some other specific features like categories, structured indication (please click on drug name to see the features I will use).
I wrote the following code but the issue that I can't make my code handle pagination (as you see there more than 2000 pages!).
import requests
from bs4 import BeautifulSoup
def drug_data():
url = 'https://www.drugbank.ca/drugs/'
r = requests.get(url)
soup = BeautifulSoup(r.text ,"lxml")
for link in soup.select('name-head a'):
href = 'https://www.drugbank.ca/drugs/' + link.get('href')
pages_data(href)
def pages_data(item_url):
r = requests.get(item_url)
soup = BeautifulSoup(r.text, "lxml")
g_data = soup.select('div.content-container')
for item in g_data:
print item.contents[1].text
print item.contents[3].findAll('td')[1].text
try:
print item.contents[5].findAll('td',{'class':'col-md-2 col-sm-4'})
[0].text
except:
pass
print item_url
drug_data()
How can I scrape all of the data and handle pagination properly?
This page uses almost the same url for all pages so you can use for loop to generate them
def drug_data(page_number):
url = 'https://www.drugbank.ca/drugs/?page=' + str(page_number)
#... rest ...
# --- later ---
for x in range(1, 2001):
drug_data(x)
Or using while and try/except to get more then 2000 pages
def drug_data(page_number):
url = 'https://www.drugbank.ca/drugs/?page=' + str(page_number)
#... rest ...
# --- later ---
page = 0
while True:
try:
page += 1
drug_data(page)
except Exception as ex:
print(ex)
print("probably last page:", page)
break # exit `while` loop
You can also find url to next page in HTML
<a rel="next" class="page-link" href="/drugs?approved=1&c=name&d=up&page=2">›</a>
so you can use BeautifulSoup to get this link and use it.
It displays current url, finds link to next page (using class="page-link" rel="next") and loads it
import requests
from bs4 import BeautifulSoup
def drug_data():
url = 'https://www.drugbank.ca/drugs/'
while url:
print(url)
r = requests.get(url)
soup = BeautifulSoup(r.text ,"lxml")
#data = soup.select('name-head a')
#for link in data:
# href = 'https://www.drugbank.ca/drugs/' + link.get('href')
# pages_data(href)
# next page url
url = soup.findAll('a', {'class': 'page-link', 'rel': 'next'})
print(url)
if url:
url = 'https://www.drugbank.ca' + url[0].get('href')
else:
break
drug_data()
BTW: never use except:pass because you can have error which you didn't expect and you will not know why it doesn't work. Better display error
except Exception as ex:
print('Error:', ex)