Multithreading requests without a URL list - python

I'm currently trying to make my requests faster by multithreading them but I'm not sure how to do it the way I want. I know about grequests but they seem to require a URL list. I have code with a starting number contained in URL and would like all threads to stop after getting a status_code of 200
I have tried to accomplish this by grequests but couldn't make it work. Also tried threading but don't know how to stop all threads after working URL was found
import requests
import webbrowser
def url_request(number):
url = "http://website.com/download/" + str(number) + ".zip"
r = requests.head(url)
if r.status_code == 404:
print(url + " - 404 Not Found!")
number += 1
url_request(number)
elif r.status_code == 200:
webbrowser.open(url)
print(url + " - 200 Found!")
if __name__ == "__main__":
url_request(int(input("Starting number: ")))
What I want the code to do is execute multiple request.head at once with a number after "Starting number" and will stop after one of the threads finds url with status_code 200.

Ok, figured it out. Thanks for your advice.
Here's the code:
from gevent import monkey
monkey.patch_all()
import grequests
import webbrowser
def url_request_threaded(startnumber, stopnumber):
urls = []
for i in range(startnumber, stopnumber):
urls.append("http://website.com/download/" + str(i) + ".zip")
gr = (grequests.head(url, stream=False) for url in urls)
gresponses = grequests.imap(gr, size=10)
try:
for response in gresponses:
if response.status_code == 404:
print(response.url + " - 404 Not Found!")
elif response.status_code == 200:
webbrowser.open(response.url)
print(response.url + " - 200 Found!")
raise SystemExit
except SystemExit:
pass
if __name__ == "__main__":
while True:
try:
startn = input("Starting number: ")
startn = int(startn)
stopn = input("End number: ")
stopn = int(stopn)
url_request_threaded(b, v, startn, stopn)
except ValueError:
print("Must be a number!")
continue
break

Related

How to get simple threading to work Python

i want to know how i can add simple threading to my code. At the moment it checks just one by one, and if some site isnt reachable it will wait for the timeout before it will continue with the next one this slows everything down.
import requests
import sys
import time
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
with open("websites.txt", 'r') as websites:
websites = websites.read().splitlines()
with open("para1.txt", 'r') as para1:
para1 = para1.read().splitlines()
with open("para2.txt", 'r') as para2:
para2 = para2.read().splitlines()
def main():
for i in para1:
for j in para2:
for m in websites:
try:
res = requests.get(m + i + j, verify=False, timeout=10)
print(m + i + j)
if res.status_code == 200:
print('Yes')
else:
print('No')
except Exception as e:
print(e)
except KeyboardInterrupt:
sys.exit()
finally:
res.close()
time.sleep(1)
if __name__ == '__main__':
main()
You can apply ThreadPoolExecutor moving part of code which perform requests to separate function and pass it as argument:
import urllib3
import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def check_func(url):
response = requests.get(url, verify=False, timeout=10)
return response.status_code == 200
def main():
with open("websites.txt") as website_f, open("para1.txt") as para1_f,
open("para2.txt", 'r') as para2_f, ThreadPoolExecutor(max_workers=4) as executor:
tasks = {}
for website in website_f:
for para1 in para1_f:
for para2 in para2_f:
url = website.rstrip() + para1.rstrip() + para2.rstrip()
tasks[executor.submit(check_func, url)] = url
for task in as_completed(tasks):
url = tasks[task]
try:
result = task.result()
except KeyboardInterrupt: # handling Ctrl + C
for task in tasks:
task.cancel() # won't cancel already finished or pending futures
except CancelledError: # will never happen (normally)
pass
except Exception as e:
print(url, "-", "ERROR", e)
else:
print(url, "-", "GOOD" if result else "BAD")
if __name__ == "__main__":
main()
P.S. I haven't tested entire code so if there're any problems with it - write in comments.

requests_html stop website from redirecting

I am trying to scrape the follow link https://9anime.to/watch/one-piece-dub.34r/r2wjlq using python/requests_html.
My problem is it gets auto redirected to the default server tab instead of the mp4upload tab, trying to find a fix for this but cant figure it out.
Below is the code
import re
import requests
import cloudscraper
from urllib import parse
from bs4 import BeautifulSoup
from requests_html import HTMLSession
base_url = 'https://9anime.to'
class nine_scraper:
def get_ep_links(url):
html = nine_scraper.get_html(url, True)
servers = html.find('div', id='servers-container')
if servers:
results = []
mp4upload_results = []
mp4upload = servers.find('div', attrs={'data-id': '35'})
mp4upload_eps = mp4upload.find_all('a', href=True)
for ep in mp4upload_eps:
x = (ep.get('href'), ep.text)
mp4upload_results.append(x)
for result in mp4upload_results:
results.append(base_url + result[0])
return results
else:
print('No servers found!!')
def get_series_info(url):
return
def get_servers(html):
return
def find_download(url):
html = nine_scraper.get_html(url, True)
def search(query):
if '&page=' in query:
query = query.split('&page=')
search_url = base_url + '/search?keyword=' + parse.quote(query[0]) + '&page=' + query[1]
else:
search_url = base_url + '/search?keyword=' + parse.quote(query)
html = nine_scraper.get_html(search_url, False)
film_list = html.find('div', class_='film-list')
if film_list:
results = []
prev_page = html.find('a', class_='pull-left')
next_page = html.find('a', class_='pull-right')
films = film_list.find_all('div', class_='inner')
for film in films:
results.append((film.find('a', class_='name').text.strip(), film.find('a', class_='name').get('href').strip()))
if prev_page.get('href'):
param = parse.urlsplit(base_url + '/' + prev_page.get('href')).query
url = parse.unquote_plus(param.replace('keyword=', ''), encoding='utf-8')
results.append(('Previous page', url))
if next_page.get('href'):
param = parse.urlsplit(base_url + '/' + next_page.get('href')).query
url = parse.unquote_plus(param.replace('keyword=', ''), encoding='utf-8')
results.append(('Next page', url))
return results
else:
print('No results found!')
def get_html(url, render_js=False): # Load webpage and return its html
try:
if render_js: # Check if page needs to render javascript, if so use 'requests_html'
session = HTMLSession() # Make a GET request to your webpage, using 'Requests'
resp = session.get(url, timeout=10)
resp.raise_for_status() # Raise an exception if respones doesnt come back 200-400
resp.html.render(timeout=10) # Render the javascript
html = BeautifulSoup(resp.html.html, 'html.parser') # Parse the html data we just got with 'BeautifulSoup4'
return html # Return the parsed html
else: # Use 'cloudscraper' since we dont need to load any javascript
c_scraper = cloudscraper.create_scraper() # Make a GET request to your webpage, using 'Requests'
resp = c_scraper.get(url)
resp.raise_for_status() # Raise an exception if respones doesnt come back 200-400
html = BeautifulSoup(resp.content, 'html.parser') # Parse the html data we just got with 'BeautifulSoup4'
return html # Return the parsed html
except requests.HTTPError as e:
print(f'HTTP error occurred: {e}')
except requests.ConnectionError as e:
print(f'Connection Error occurred: {e}')
except requests.Timeout as e:
print(f'Timeout Error occurred: {e}')
except requests.RequestException as e:
print(f'General Error occurred: {e}')
except Exception as e:
print(f'Other error occurred: {e}')
except KeyboardInterrupt:
print("Someone closed the program")
import sys
from os import system, name
from scrapers import nine_scraper
def screen_clear():
# for mac and linux(os.name is 'posix')
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
def main_menu():
while True:
screen_clear()
print('------9anime downloader------\n[1] Search \n[2] Download \n[3] Exit\n-----------------------------\n')
main_choice = input('Enter your choice [1-3] >')
if main_choice == '1':
search_menu()
break
elif main_choice == '2':
continue
elif main_choice == '3':
screen_clear()
sys.exit()
else:
continue
def search_menu(query=False):
screen_clear()
print('--------------9anime downloader/search--------------\n')
if query:
search_results = nine_scraper.search(query)
results_menu(search_results)
else:
query = input('Please enter the name of the anime >')
if query:
search_results = nine_scraper.search(query)
results_menu(search_results)
def results_menu(results):
for num, result in enumerate(results, 1):
title = result[0]
link = result[1]
if 'Previous page' not in title:
if 'Next page' in title:
n = True
print('[N] ' + title)
else:
print(f'[{num}] {title}')
else:
p = True
print('[P] ' + title)
print('[M] Main menu')
titles, links = map(list, zip(*results))
while True:
search_choice = input('Enter choice >')
try:
search_choice = int(search_choice)
if 1 <= search_choice <= len(results) + 1:
print(links[search_choice - 1])
print(titles[search_choice - 1])
ep_links = nine_scraper.get_ep_links(links[search_choice - 1])
for link in ep_links:
print(link)
nine_scraper.find_download(link)
# series_menu(links[search_choice - 1])
break
except ValueError:
if search_choice.lower() == 'm':
main_menu()
break
elif search_choice.lower() == 'p':
if p:
url = links[-2]
search_menu(url)
break
continue
elif search_choice.lower() == 'n':
if n:
url = links.pop()
search_menu(url)
break
continue
def series_menu(url):
info = nine_scraper.get_series_info()
main_menu()
I know it has to be some javascript that is redirecting the page but i cant figure out what i need to do in order to stop that, any help would be very appreciated!
Using requests_html you can set allow_redirects=False like this:
r = session.get(url,allow_redirects=False)
Now your request should go only to the requested URL.

Trouble multiprocessing - "The parameter is incorrect" on p.start()

I've been having a lot of trouble multiprocessing - I've literally been trying for hours and can't get it right. Here's my code, commented the best I could do.
Linked all my code as I don't know what's causing it exactly.
Line 74 it says, on p.start()
The most relevant part of code is the bottom of the question.
Here are my imports
import urllib
import socket
import multiprocessing as mp
import queue
import requests
Header used for higher chance of success upon connecting to a website
headers={'User-agent' : 'Mozilla/5.0'}
Main function takes four parameters - queue, the URL List, the Output file, and the list of vulnerable URLs.
def mainFunction(q, URLList, Output, vulnURLS):
This list is used to check if the page source has any of the errors in the list after adding a string query to the end of the url (')
queries = ['SQL syntax', 'mysql_fetch', 'mysql_num_rows', 'mySQL Error', 'mySQL_connect()', 'UNION SELECT', 'MySQL server version']
This puts the URL in the correct format before testing for injection points.
URLReplace = [("['", ""),("']",""), ("\n", ""), ("https://","http://"), ("\s", "%20"), ("\s", "%20")]
URL = ''.join(str(URLList))
for URL in URLList:
if (z < len(URLReplace)):
URL = URL.replace(URLReplace[z])
z = z + 1
URL = (URL + "'")
This is the try request, where it attempts to connect and scrapes the HTML off of the webpage.
try:
req = requests.get(URL, timeout=2)
htmlObject = urllib.request.urlopen(URL)
This iterates through the list to check for any possible vulnerabilities. Also returns 404/400 messages.
if (y < len(queries)):
if queries[x] in htmlObject:
print ("\t [+] " + URL)
vulnURLS.append(URL)
Output.open()
for VURLS in vulnURLS:
Output.write(VURLS + '\n')
Output.close()
y = y + 1
else:
print ("\t [-] " + URL)
except urllib.error.HTTPError as e:
if e.code == 404:
print("\t [-] Page not found.")
if e.code == 400:
print ("\t [+] " + URL)
except urllib.error.URLError as e:
print("\t [-] URL Timed Out")
except socket.timeout as e:
print("\t [-] URL Timed Out")
except socket.error as e:
print("\t [-] Error in URL")
Here's the important part, where I use the Queue & multiprocessor.
if __name__=='__main__':
q = mp.Queue()
URLList = [i.strip().split() for i in open('sites.txt').readlines()]
Output = open('output.txt', 'r')
vulnURLS = []
p = mp.Process(target=mainFunction, args=(q, URLList, Output, vulnURLS))
p.start()
q.put(mainFunction(URLList))
q.close()
q.join_thread()
p.join()
Please help me out with this problem, I've been stuck on it for hours and am getting very frustrated that I cannot follow the solution. Every module I look at I follow to a T and get this same error.
I have tried multi-threading, but it is extremely slow and unstable when compared to multiprocessing.
Change to the following:
p = mp.Process(target=mainFunction, args=(q, Output))
p.start()
for url in URLList:
q.put(url)

Best way to make thousands of get requests in python

Right now I am working on a python script which takes in a list of url's as an argument, then performs a GET request on each url and then searches through the output with xpath to fingerprint the website. It seems to work like a charm when the list is around 50 sites long, but anything after that causes the program to slow down to the point where it stop (usually around 150 sites). Scroll down to where you see main app logic and the relevant code it below. Right now I am just using 50 elements in the array and it works fine, but anything after makes the entire program stop. Any suggestions would be greatly appreciated!
#!/usr/bin/python
# Web Scraper
# 1.0
# Imports for file
from multiprocessing.dummy import Pool as ThreadPool
from threading import Thread
from Queue import Queue
from lxml import html
import requests
import time
import sys
# Get Raw HTML
def scrape(url):
try:
page = requests.get(url, timeout=2.0)
if page.status_code == requests.codes.ok:
html_page = html.fromstring(page.content)
s =requests.session()
s.close()
return html_page
else:
s =requests.session()
s.close()
return False
except:
s =requests.session()
s.close()
return False
# Format URL
def format_url(url):
if url.find("http://") == -1:
url = "http://"+url
if url[-1] == "/":
url = url[:-1]
return url
# Check if WordPress Site
def check_wordpress(tree):
scripts = tree.xpath("//script[contains(#src,'wp-content')]")
if len(scripts) > 0:
return True
return False
# Check WordPress Version
def wordpress_version(tree):
type = tree.xpath("//meta[#name='generator']/#content")
version = 0
if len(type) > 0:
details = type[0].split()
if len(details)>1 and details[0] == "WordPress":
if len(details) > 1:
version = details[1]
else:
version = type[0]
return version
# Find Contact Page
def find_contact_page(tree):
contact = tree.xpath("//a[contains(text(),'Contact')]/#href")
try_xpath = 1
while len(contact) == 0:
if try_xpath == 1:
contact = tree.xpath("//span[contains(text(),'Contact')]/../#href")
elif try_xpath == 2:
contact = tree.xpath("//p[contains(text(),'Contact')]/../#href")
elif try_xpath == 3:
break
try_xpath+=1
if len(contact) > 0:
contact = contact[0]
if contact.find('#') == -1:
if contact[0] == '/':
contact = url + "" + contact
print contact
# Juicer method
def juice(url):
url = format_url(url)
string = url
tree = scrape(url)
if tree == False:
return string + " \t\t\t No XML tree"
elif check_wordpress(tree) == True:
version = wordpress_version(tree)
return string + " \t\t\t WordPress: " + str(version)
else:
return string + " \t\t\t Not WordPress"
# Main App Logic Below ------------------------------------->
# Open list of websites from given argument
list = open(sys.argv[1],'r').read().split('\n')
# Juice url
def juice_url():
while True:
url = q.get()
result = juice(url)
print result
q.task_done()
# Create concurrent queues
concurrent = 50
q = Queue(concurrent)
for i in range(concurrent):
t = Thread(target=juice_url)
t.daemon = True
t.start()
# Add URL to Queue
time1 = time.time()
for url in list[0:50]:
q.put(url)
q.join()
# Calculate total time
total = time.time() - time1
print "Total Time: %f" % total
print "Average Time: %f" % (total/50)

Count tweets for a hashtag

i want to know if there are any way to count the hashtags from twitter using the streaming API like hashtags.org i have made a script using python and tweetstream and i can make a count but for TTs are always 180k i believe its a limit of 50 tweets/seconds. this is the code:
#!/usr/bin/python
import tweetstream
import sys
print sys.argv
twitterUsername = "user"
twitterPassword = "pass"
twitterWordFilter = sys.argv[1]
try:
with tweetstream.FilterStream(twitterUsername, twitterPassword,track=twitterWordFilter) as stream:
for tweet in stream:
print stream.count
except tweetstream.ConnectionError, e:
print "Disconnected from twitter. Reason:", e.reason
def get_tweet_count(term):
total_tweet_count = 0
page = 1
while True:
url = 'http://search.twitter.com/search.json?q='
+ urllib.quote(term) + '&rpp=100&page=' + str(page)
response = urllib2.urlopen(url)
json_content = response.read()
tweets = json.loads(json_content)['results']
total_tweet_count += len(tweets)
# Are we at the last page or have we run out of pages?
if len(tweets) < 100 or page >= 15:
break
max_id = tweets[0]['id_str']
page += 1
# Wait so twitter doesn't get annoyed with us
time.sleep(1)
return total_tweet_count
This script I adaptated from code on GitHub.

Categories