windows chrome refresh tab 0(or current tab) via command line - python

I'm trying to do it with python, webbrowser module. But it does not have Chromium-specific functions. Is there another way? probably with a batch script?

I use this myself: (I wrote is quickly as it was only for personal use). With a lot of clean up you might be able to get what you want. See https://developers.google.com/chrome-developer-tools/docs/remote-debugging
import urllib2
import urllib
import os
import subprocess
import json
from websocket import create_connection
def refresh_page(url):
data = json.load(urllib2.urlopen('http://localhost:9222/json'))
found_page = False
for page in data:
if page['url'].lower() == url.lower():
found_page = True
websocketURL = page['webSocketDebuggerUrl']
ws = create_connection(websocketURL)
obj = { "id": 0,
"method": "Page.reload",
"params":
{
"ignoreCache": True,
"scriptToEvaluateOnLoad": ""
}
}
dev_request = json.dumps(obj)
ws.send(dev_request)
result = ws.recv()
ws.close()
if not found_page:
raise Exception("No pageFound")
def open_or_refresh(file_name):
file_name = "".join ( [f if f in r'\/:*?"<>|' else urllib.quote(f) for f in file_name] )
file_name = 'file:///' + file_name.replace('\\', '/')
file_name = file_name.encode('ascii', 'ignore')
try:
refresh_page(file_name)
except:
cmd = (r'"%(LOCALAPPDATA)s\Google\Chrome\Application\chrome.exe"'%os.environ
+ r' --remote-debugging-port=9222 "%s"' % file_name)
subprocess.Popen(cmd)
open_or_refresh(r"C:\test.html")
open_or_refresh(r"C:\test.html")

Related

Search haveibeenpwned for all emails on a domain

I am able to use haveibeenpwned to search for 1 account compromise. However, I could not find an option to use the API key to search for compromise of all the email accounts on a domain. (For example. if the domain is xyz.com, I want to search for the compromise of abc#xyz.com, peter.charlie#xyz.com and so on). I am aware of the notification email that I can sign up for. But, that is a lengthy process and I prefer using the API.
So, I wrote a script to search against haveibeenpwned for all the email address of my domain, but it takes very long. I searched through a couple of Github projects, but I did not find any such implementation. Has anyone tried this before?
I have added the code below. I am using Multi threading approach, but still it takes very long, is there any other Optimization strategy I can use? Please help. Thank you.
import requests, json
import threading
from time import sleep
import datetime
import splunklib.client as client
import splunklib.results as results
date = datetime.datetime.now()
from itertools import islice
import linecache
import sys
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
class myThread (threading.Thread):
def __init__(self, threadID, name, list_emails):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.list_emails = list_emails
def run(self):
i=0
print "Starting " + self.name
for email in self.list_emails:
print i
i=i+1
result = check_pasteaccount(email)
print email
print result
print result
print "Exiting " + self.name
def check_pasteaccount(account):
account = str(account)
result = ""
URL = "https://haveibeenpwned.com/api/v3/pasteaccount/%s?truncateResponse=false" % (account)
# print(URL)
headers= {'hibp-api-key':api_key}
result = ""
try:
r = requests.get(url=URL,headers=headers)
# sleep(2)
status_code = r.status_code
if status_code == 200:
data = r.text
result = []
for entry in json.loads(data.decode('utf8')):
if int((date - datetime.datetime.strptime(entry['Date'], '%Y-%m-%dT%H:%M:%SZ')).days) > 120:
pass
else:
result.append(['Title: {0}'.format(entry['Title']), \
'Source: {0}'.format(['Source']), \
'Paste ID: {0}'.format(entry['Id'])])
if len(result) == 0:
result = "No paste reported for given account and time frame."
else:
paste_result = ""
for entry in result:
for item in entry:
paste_result += str(item) + "\r\n"
paste_result += "\r\n"
result = paste_result
elif status_code == 404:
result = "No paste for the account"
else:
if status_code == 429:
sleep(5)
# print "Limit exceeded, sleeping"
result = check_pasteaccount(account)
else:
result = "Exception"
print status_code
except Exception as e:
result = "Exception"
PrintException()
pass
return result
def split_every(n, iterable):
iterable = iter(iterable)
for chunk in iter(lambda: list(islice(iterable, n)), []):
yield chunk
def main():
print datetime.datetime.now()
# Fetching the list of email addresses from Splunk
list_emails = connect_splunk()
print datetime.datetime.now()
i=0
list_split = split_every(1000,list_emails)
threads=[]
for list in list_split:
i=i+1
thread_name = "Thread" + str(i)
thread = myThread(1, thread_name, list)
thread.start()
threads.append(thread)
# Wait for all the threads to complete
for t in threads:
t.join()
print "Completed Search"
Here's a shorter and maybe more efficient version of your script using the standard multiprocessing library instead of a hand-rolled thread system.
You'll need Python 3.6+ since we're using f-strings.
You'll need to install the tqdm module for fancy progress bars.
You can adjust the number of concurrent requests with the pool size parameter.
Output is written in machine-readable JSON Lines format into a timestamped file.
A single requests session is shared (per-worker), which means less time spent connecting to HIBP.
import datetime
import json
import multiprocessing
import random
import time
import requests
import tqdm
HIBP_PARAMS = {
"truncateResponse": "false",
}
HIBP_HEADERS = {
"hibp-api-key": "xxx",
}
sess = requests.Session()
def check_pasteaccount(account):
while True:
resp = sess.get(
url=f"https://haveibeenpwned.com/api/v3/pasteaccount/{account}",
params=HIBP_PARAMS,
headers=HIBP_HEADERS,
)
if resp.status_code == 429:
print("Quota exceeded, waiting for a while")
time.sleep(random.uniform(3, 7))
continue
if resp.status_code >= 400:
return {
"account": account,
"status": resp.status_code,
"result": resp.text,
}
return {
"account": account,
"status": resp.status_code,
"result": resp.json(),
}
def connect_splunk():
# TODO: return emails
return []
def main():
list_emails = [str(account) for account in connect_splunk()]
datestamp = datetime.datetime.now().isoformat().replace(":", "-")
output_filename = f"accounts-log-{datestamp}.jsonl"
print(f"Accounts to look up: {len(list_emails)}")
print(f"Output filename: {output_filename}")
with multiprocessing.Pool(processes=16) as p:
with open(output_filename, "a") as f:
results_iterable = p.imap_unordered(
check_pasteaccount, list_emails, chunksize=20
)
for result in tqdm.tqdm(
results_iterable,
total=len(list_emails),
unit="acc",
unit_scale=True,
):
print(json.dumps(result, sort_keys=True), file=f)
if __name__ == "__main__":
main()

Using WGET or Python to download and rename attachments from CSV requiring basic authentication

I scraped a ticketing website that we were using and I now have a CSV file which looks like this: ID, Attachment_URL, Ticket_URL. What I now need to do is download every attachment and rename the file with the Ticket_URL. The main issue I have is that when navigating to the Attachment_URL you must use basic authentication and then you are redirected to an aws s3 link. I have been able to download individual files using wget, but I have not been able to iterate through the entire list (35k rows or so), and I am not sure how I would be able to name the file as the ticket_id. Any advice would be appreciated.
Got it.
To open the authenticated session:
# -*- coding: utf-8 -*-
import requests
import re
from bs4 import BeautifulSoup
import csv
import pandas as pd
import time
s = requests.session()
payload = {
'user': '',
'pw': ''
}
s.post('login.url.here', data=payload)
for i in range(1, 6000):
testURL = s.get(
'https://urlhere.com/efw/stuff&page={}'.format(i))
soup = BeautifulSoup(testURL.content)
table = soup.find("table", {"class": "table-striped"})
table_body = table.find('tbody')
rows = table_body.find_all('tr')[1:]
print "The current page is: " + str(i)
for row in rows:
cols = row.find_all('a', attrs={'href': re.compile("^/helpdesk/")})
# time.sleep(1)
with open('fd.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(cols)
print cols
print cols
Then I cleaned the links a bit in R and to download the files.
#! /usr/bin/env python
import threading
import os
from time import gmtime, strftime
from Queue import Queue
import requests
s = requests.session()
payload = {
'user': '',
'pw': ''
}
s.post('login', data=payload)
class log:
def info(self, message):
self.__message("info", message)
def error(self, message):
self.__message("error", message)
def debug(self, message):
self.__message("debug", message)
def __message(self, log_level, message):
date = strftime("%Y-%m-%d %H:%M:%S", gmtime())
print "%s [%s] %s" % (date, log_level, message)
class fetch:
def __init__(self):
self.temp_dir = "/tmp"
def run_fetcher(self, queue):
while not queue.empty():
url, ticketid = queue.get()
if ticketid.endswith("NA"):
fileName = url.split("/")[-1] + 'NoTicket'
else:
fileName = ticketid.split("/")[-1]
response = s.get(url)
with open(os.path.join('/Users/Desktop/FolderHere', fileName + '.mp3'), 'wb') as f:
f.write(response.content)
print fileName
queue.task_done()
if __name__ == '__main__':
# load in classes
q = Queue()
log = log()
fe = fetch()
# get bucket name
#Read in input file
with open('/Users/name/csvfilehere.csv', 'r') as csvfile:
for line in csvfile:
id,url,ticket = line.split(",")
q.put([url.strip(),ticket.strip()])
# spin up fetcher workers
threads = []
for i in range(8):
t = threading.Thread(target=fe.run_fetcher, args=(q,))
t.daemon = True
threads.append(t)
t.start()
# close threads
[x.join() for x in threads]
# close queue
q.join()
log.info("End")

<class 'urllib2.URLError'>: <urlopen error [Errno 2] No such file or directory>

What i am trying to do is to show tweets on a webpage using python cgi script.
this is my code.
This code is working fine in terminal. but showing errors listed below.
#!/usr/bin/python
import cgitb
cgitb.enable(False, '/var/www/twitter/errors')
import oauth2
import time
import urllib2
import json
url1 = "https://api.twitter.com/1.1/search/tweets.json"
params = {
"oauth_version": "1.0",
"oauth_nonce": oauth2.generate_nonce(),
"oauth_timestamp": int(time.time())
}
consumer_key=''
consumer_secret=''
access_token=''
access_secret=''
consumer = oauth2.Consumer(key=consumer_key, secret=consumer_secret)
token = oauth2.Token(key=access_token, secret=access_secret)
params["oauth_consumer_key"] = consumer.key
params["oauth_token"] = token.key
prev_id = int("435458631669415936")
for i in range(1):
url = url1
params["q"] = "fifa"
params["count"] = 15
# params["geocode"] = ""
# params["lang"] = "English"
params["locale"] = "en"
params["result_type"] = "popular" # Example Values: mixed, recent, popular
# params["until"] = ""
# params["since_id"] = ""
# params["max_id"] = str(prev_id)
req=oauth2.Request(method="GET",url=url,parameters=params)
signature_method=oauth2.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method,consumer,token)
headers=req.to_header()
url=req.to_url()
# print headers
# print url
response=urllib2.Request(url)
data=json.load(urllib2.urlopen(response))
if data["statuses"] == []:
print "end of data"
break
else:
prev_id = int(data["statuses"][-1]["id"]) - 1
print prev_id, i
print data["statuses"]
#f = open("outfile_" + str(i) + ".txt", "w")
json.dump(data["statuses"], f)
f.close()
time.sleep(5)
this is my code and i m facing this problem (i have changed the secrets and tokens for security):
global URLError = <class 'urllib2.URLError'>, err = error(2, 'No such file or directory')
<class 'urllib2.URLError'>: <urlopen error [Errno 2] No such file or directory>
args = (error(2, 'No such file or directory'),)
errno = None
filename = None
message = ''
reason = error(2, 'No such file or directory')
strerror = None
change this line to see errors inside the web page. It's possible the given logging directory doesn't exist, or exists but isn't writable by the webserver user:
cgitb.enable(True) # '/var/www/twitter/errors')

Python Script with Gevent Pool, consumes a lot of memory, locks up

I have a very simple Python script using gevent.pool to download URLs (see below). The script runs fine for a couple of days and then locks up. I noticed that the memory usage is very high at that time. Am I using gevent incorrectly?
import sys
from gevent import monkey
monkey.patch_all()
import urllib2
from gevent.pool import Pool
inputFile = open(sys.argv[1], 'r')
urls = []
counter = 0
for line in inputFile:
counter += 1
urls.append(line.strip())
inputFile.close()
outputDirectory = sys.argv[2]
def fetch(url):
try:
body = urllib2.urlopen("http://" + url, None, 5).read()
if len(body) > 0:
outputFile = open(outputDirectory + "/" + url, 'w')
outputFile.write(body)
outputFile.close()
print "Success", url
except:
pass
pool = Pool(int(sys.argv[3]))
pool.map(fetch, urls)
body = urllib2.urlopen("http://" + url, None, 5).read()
Above line reads the entire content in memory as a string. To prevent that, change fetch() as follow:
def fetch(url):
try:
u = urllib2.urlopen("http://" + url, None, 5)
try:
with open(outputDirectory + "/" + url, 'w') as outputFile:
while True:
chunk = u.read(65536)
if not chunk:
break
outputFile.write(chunk)
finally:
u.close()
print "Success", url
except:
print "Fail", url

Send file using POST from a Python script

Is there a way to send a file using POST from a Python script?
From: https://requests.readthedocs.io/en/latest/user/quickstart/#post-a-multipart-encoded-file
Requests makes it very simple to upload Multipart-encoded files:
with open('report.xls', 'rb') as f:
r = requests.post('http://httpbin.org/post', files={'report.xls': f})
That's it. I'm not joking - this is one line of code. The file was sent. Let's check:
>>> r.text
{
"origin": "179.13.100.4",
"files": {
"report.xls": "<censored...binary...data>"
},
"form": {},
"url": "http://httpbin.org/post",
"args": {},
"headers": {
"Content-Length": "3196",
"Accept-Encoding": "identity, deflate, compress, gzip",
"Accept": "*/*",
"User-Agent": "python-requests/0.8.0",
"Host": "httpbin.org:80",
"Content-Type": "multipart/form-data; boundary=127.0.0.1.502.21746.1321131593.786.1"
},
"data": ""
}
Yes. You'd use the urllib2 module, and encode using the multipart/form-data content type. Here is some sample code to get you started -- it's a bit more than just file uploading, but you should be able to read through it and see how it works:
user_agent = "image uploader"
default_message = "Image $current of $total"
import logging
import os
from os.path import abspath, isabs, isdir, isfile, join
import random
import string
import sys
import mimetypes
import urllib2
import httplib
import time
import re
def random_string (length):
return ''.join (random.choice (string.letters) for ii in range (length + 1))
def encode_multipart_data (data, files):
boundary = random_string (30)
def get_content_type (filename):
return mimetypes.guess_type (filename)[0] or 'application/octet-stream'
def encode_field (field_name):
return ('--' + boundary,
'Content-Disposition: form-data; name="%s"' % field_name,
'', str (data [field_name]))
def encode_file (field_name):
filename = files [field_name]
return ('--' + boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' % (field_name, filename),
'Content-Type: %s' % get_content_type(filename),
'', open (filename, 'rb').read ())
lines = []
for name in data:
lines.extend (encode_field (name))
for name in files:
lines.extend (encode_file (name))
lines.extend (('--%s--' % boundary, ''))
body = '\r\n'.join (lines)
headers = {'content-type': 'multipart/form-data; boundary=' + boundary,
'content-length': str (len (body))}
return body, headers
def send_post (url, data, files):
req = urllib2.Request (url)
connection = httplib.HTTPConnection (req.get_host ())
connection.request ('POST', req.get_selector (),
*encode_multipart_data (data, files))
response = connection.getresponse ()
logging.debug ('response = %s', response.read ())
logging.debug ('Code: %s %s', response.status, response.reason)
def make_upload_file (server, thread, delay = 15, message = None,
username = None, email = None, password = None):
delay = max (int (delay or '0'), 15)
def upload_file (path, current, total):
assert isabs (path)
assert isfile (path)
logging.debug ('Uploading %r to %r', path, server)
message_template = string.Template (message or default_message)
data = {'MAX_FILE_SIZE': '3145728',
'sub': '',
'mode': 'regist',
'com': message_template.safe_substitute (current = current, total = total),
'resto': thread,
'name': username or '',
'email': email or '',
'pwd': password or random_string (20),}
files = {'upfile': path}
send_post (server, data, files)
logging.info ('Uploaded %r', path)
rand_delay = random.randint (delay, delay + 5)
logging.debug ('Sleeping for %.2f seconds------------------------------\n\n', rand_delay)
time.sleep (rand_delay)
return upload_file
def upload_directory (path, upload_file):
assert isabs (path)
assert isdir (path)
matching_filenames = []
file_matcher = re.compile (r'\.(?:jpe?g|gif|png)$', re.IGNORECASE)
for dirpath, dirnames, filenames in os.walk (path):
for name in filenames:
file_path = join (dirpath, name)
logging.debug ('Testing file_path %r', file_path)
if file_matcher.search (file_path):
matching_filenames.append (file_path)
else:
logging.info ('Ignoring non-image file %r', path)
total_count = len (matching_filenames)
for index, file_path in enumerate (matching_filenames):
upload_file (file_path, index + 1, total_count)
def run_upload (options, paths):
upload_file = make_upload_file (**options)
for arg in paths:
path = abspath (arg)
if isdir (path):
upload_directory (path, upload_file)
elif isfile (path):
upload_file (path)
else:
logging.error ('No such path: %r' % path)
logging.info ('Done!')
Looks like python requests does not handle extremely large multi-part files.
The documentation recommends you look into requests-toolbelt.
Here's the pertinent page from their documentation.
The only thing that stops you from using urlopen directly on a file object is the fact that the builtin file object lacks a len definition. A simple way is to create a subclass, which provides urlopen with the correct file.
I have also modified the Content-Type header in the file below.
import os
import urllib2
class EnhancedFile(file):
def __init__(self, *args, **keyws):
file.__init__(self, *args, **keyws)
def __len__(self):
return int(os.fstat(self.fileno())[6])
theFile = EnhancedFile('a.xml', 'r')
theUrl = "http://example.com/abcde"
theHeaders= {'Content-Type': 'text/xml'}
theRequest = urllib2.Request(theUrl, theFile, theHeaders)
response = urllib2.urlopen(theRequest)
theFile.close()
for line in response:
print line
Chris Atlee's poster library works really well for this (particularly the convenience function poster.encode.multipart_encode()). As a bonus, it supports streaming of large files without loading an entire file into memory. See also Python issue 3244.
I am trying to test django rest api and its working for me:
def test_upload_file(self):
filename = "/Users/Ranvijay/tests/test_price_matrix.csv"
data = {'file': open(filename, 'rb')}
client = APIClient()
# client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
response = client.post(reverse('price-matrix-csv'), data, format='multipart')
print response
self.assertEqual(response.status_code, status.HTTP_200_OK)
pip install http_file
#импорт вспомогательных библиотек
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import requests
#импорт http_file
from http_file import download_file
#создание новой сессии
s = requests.Session()
#соеденение с сервером через созданную сессию
s.get('URL_MAIN', verify=False)
#загрузка файла в 'local_filename' из 'fileUrl' через созданную сессию
download_file('local_filename', 'fileUrl', s)
You may also want to have a look at httplib2, with examples. I find using httplib2 is more concise than using the built-in HTTP modules.
def visit_v2(device_code, camera_code):
image1 = MultipartParam.from_file("files", "/home/yuzx/1.txt")
image2 = MultipartParam.from_file("files", "/home/yuzx/2.txt")
datagen, headers = multipart_encode([('device_code', device_code), ('position', 3), ('person_data', person_data), image1, image2])
print "".join(datagen)
if server_port == 80:
port_str = ""
else:
port_str = ":%s" % (server_port,)
url_str = "http://" + server_ip + port_str + "/adopen/device/visit_v2"
headers['nothing'] = 'nothing'
request = urllib2.Request(url_str, datagen, headers)
try:
response = urllib2.urlopen(request)
resp = response.read()
print "http_status =", response.code
result = json.loads(resp)
print resp
return result
except urllib2.HTTPError, e:
print "http_status =", e.code
print e.read()
I tried some of the options here, but I had some issue with the headers ('files' field was empty).
A simple mock to explain how I did the post using requests and fixing the issues:
import requests
url = 'http://127.0.0.1:54321/upload'
file_to_send = '25893538.pdf'
files = {'file': (file_to_send,
open(file_to_send, 'rb'),
'application/pdf',
{'Expires': '0'})}
reply = requests.post(url=url, files=files)
print(reply.text)
More at https://requests.readthedocs.io/en/latest/user/quickstart/
To test this code, you could use a simple dummy server as this one (thought to run in a GNU/Linux or similar):
import os
from flask import Flask, request, render_template
rx_file_listener = Flask(__name__)
files_store = "/tmp"
#rx_file_listener.route("/upload", methods=['POST'])
def upload_file():
storage = os.path.join(files_store, "uploaded/")
print(storage)
if not os.path.isdir(storage):
os.mkdir(storage)
try:
for file_rx in request.files.getlist("file"):
name = file_rx.filename
destination = "/".join([storage, name])
file_rx.save(destination)
return "200"
except Exception:
return "500"
if __name__ == "__main__":
rx_file_listener.run(port=54321, debug=True)

Categories