Selenium give file name when downloading - python

I am working with a selenium script where I am trying to download a Excel file and give it a specific name. This is my code:
Is there anyway that I can give the file being downloaded a specific name ?
Code:
#!/usr/bin/python
from selenium import webdriver
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
profile = FirefoxProfile()
profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "text/plain, application/vnd.ms-excel, text/csv, text/comma-separated-values, application/octet-stream")
profile.set_preference("browser.download.dir", "C:\\Downloads" )
browser = webdriver.Firefox(firefox_profile=profile)
browser.get('https://test.com/')
browser.find_element_by_partial_link_text("Excel").click() # Download file

Here is another simple solution, where you can wait until the download completed and then get the downloaded file name from chrome downloads.
Chrome:
# method to get the downloaded file name
def getDownLoadedFileName(waitTime):
driver.execute_script("window.open()")
# switch to new tab
driver.switch_to.window(driver.window_handles[-1])
# navigate to chrome downloads
driver.get('chrome://downloads')
# define the endTime
endTime = time.time()+waitTime
while True:
try:
# get downloaded percentage
downloadPercentage = driver.execute_script(
"return document.querySelector('downloads-manager').shadowRoot.querySelector('#downloadsList downloads-item').shadowRoot.querySelector('#progress').value")
# check if downloadPercentage is 100 (otherwise the script will keep waiting)
if downloadPercentage == 100:
# return the file name once the download is completed
return driver.execute_script("return document.querySelector('downloads-manager').shadowRoot.querySelector('#downloadsList downloads-item').shadowRoot.querySelector('div#content #file-link').text")
except:
pass
time.sleep(1)
if time.time() > endTime:
break
Firefox:
def getDownLoadedFileName(waitTime):
driver.execute_script("window.open()")
WebDriverWait(driver,10).until(EC.new_window_is_opened)
driver.switch_to.window(driver.window_handles[-1])
driver.get("about:downloads")
endTime = time.time()+waitTime
while True:
try:
fileName = driver.execute_script("return document.querySelector('#contentAreaDownloadsView .downloadMainArea .downloadContainer description:nth-of-type(1)').value")
if fileName:
return fileName
except:
pass
time.sleep(1)
if time.time() > endTime:
break
Once you click on the download link/button, just call the above method.
# click on download link
browser.find_element_by_partial_link_text("Excel").click()
# get the downloaded file name
latestDownloadedFileName = getDownLoadedFileName(180) #waiting 3 minutes to complete the download
print(latestDownloadedFileName)
JAVA + Chrome:
Here is the method in java.
public String waitUntilDonwloadCompleted(WebDriver driver) throws InterruptedException {
// Store the current window handle
String mainWindow = driver.getWindowHandle();
// open a new tab
JavascriptExecutor js = (JavascriptExecutor)driver;
js.executeScript("window.open()");
// switch to new tab
// Switch to new window opened
for(String winHandle : driver.getWindowHandles()){
driver.switchTo().window(winHandle);
}
// navigate to chrome downloads
driver.get("chrome://downloads");
JavascriptExecutor js1 = (JavascriptExecutor)driver;
// wait until the file is downloaded
Long percentage = (long) 0;
while ( percentage!= 100) {
try {
percentage = (Long) js1.executeScript("return document.querySelector('downloads-manager').shadowRoot.querySelector('#downloadsList downloads-item').shadowRoot.querySelector('#progress').value");
//System.out.println(percentage);
}catch (Exception e) {
// Nothing to do just wait
}
Thread.sleep(1000);
}
// get the latest downloaded file name
String fileName = (String) js1.executeScript("return document.querySelector('downloads-manager').shadowRoot.querySelector('#downloadsList downloads-item').shadowRoot.querySelector('div#content #file-link').text");
// get the latest downloaded file url
String sourceURL = (String) js1.executeScript("return document.querySelector('downloads-manager').shadowRoot.querySelector('#downloadsList downloads-item').shadowRoot.querySelector('div#content #file-link').href");
// file downloaded location
String donwloadedAt = (String) js1.executeScript("return document.querySelector('downloads-manager').shadowRoot.querySelector('#downloadsList downloads-item').shadowRoot.querySelector('div.is-active.focus-row-active #file-icon-wrapper img').src");
System.out.println("Download deatils");
System.out.println("File Name :-" + fileName);
System.out.println("Donwloaded path :- " + donwloadedAt);
System.out.println("Downloaded from url :- " + sourceURL);
// print the details
System.out.println(fileName);
System.out.println(sourceURL);
// close the downloads tab2
driver.close();
// switch back to main window
driver.switchTo().window(mainWindow);
return fileName;
}
This is how to call this in your java script.
// download triggering step
downloadExe.click();
// now waituntil download finish and then get file name
System.out.println(waitUntilDonwloadCompleted(driver));
Output:
Download deatils
File Name :-RubyMine-2019.1.2 (7).exe
Donwloaded path :- chrome://fileicon/C%3A%5CUsers%5Csupputuri%5CDownloads%5CRubyMine-2019.1.2%20(7).exe?scale=1.25x
Downloaded from url :- https://download-cf.jetbrains.com/ruby/RubyMine-2019.1.2.exe
RubyMine-2019.1.2 (7).exe

You cannot specify name of download file through selenium. However, you can download the file, find the latest file in the downloaded folder, and rename as you want.
Note: borrowed methods from google searches may have errors. but you get the idea.
import os
import shutil
filename = max([Initial_path + "\\" + f for f in os.listdir(Initial_path)],key=os.path.getctime)
shutil.move(filename,os.path.join(Initial_path,r"newfilename.ext"))

Hope this snippet is not that confusing. It took me a while to create this and is really useful, because there has not been a clear answer to this problem, with just this library.
import os
import time
def tiny_file_rename(newname, folder_of_download):
filename = max([f for f in os.listdir(folder_of_download)], key=lambda xa : os.path.getctime(os.path.join(folder_of_download,xa)))
if '.part' in filename:
time.sleep(1)
os.rename(os.path.join(folder_of_download, filename), os.path.join(folder_of_download, newname))
else:
os.rename(os.path.join(folder_of_download, filename),os.path.join(folder_of_download,newname))
Hope this saves someone's day, cheers.
EDIT: Thanks to #Om Prakash editing my code, it made me remember that I didn't explain the code thoughly.
Using the max([]) function could lead to a race condition, leaving you with empty or corrupted file(I know it from experience). You want to check if the file is completely downloaded in the first place. This is due to the fact that selenium don't wait for the file download to complete, so when you check for the last created file, an incomplete file will show up on your generated list and it will try to move that file. And even then, you are better off waiting a little bit for the file to be free from Firefox.
EDIT 2: More Code
I was asked if 1 second was enough time and mostly it is, but in case you need to wait more than that you could change the above code to this:
import os
import time
def tiny_file_rename(newname, folder_of_download, time_to_wait=60):
time_counter = 0
filename = max([f for f in os.listdir(folder_of_download)], key=lambda xa : os.path.getctime(os.path.join(folder_of_download,xa)))
while '.part' in filename:
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:
raise Exception('Waited too long for file to download')
filename = max([f for f in os.listdir(folder_of_download)], key=lambda xa : os.path.getctime(os.path.join(folder_of_download,xa)))
os.rename(os.path.join(folder_of_download, filename), os.path.join(folder_of_download, newname))

There is something i would correct for #parishodak answer:
the filename here will only return the relative path (here the name of the file) not the absolute path.
That is why #FreshRamen got the following error after:
File "/usr/local/Cellar/python/2.7.10_2/Frameworks/Python.framework/Versions/2.7/lib/‌​python2.7/genericpath.py",
line 72, in getctime return os.stat(filename).st_ctime OSError:
[Errno 2] No such file or directory: '.localized'
There is the correct code:
import os
import shutil
filepath = 'c:\downloads'
filename = max([filepath +"\"+ f for f in os.listdir(filepath)], key=os.path.getctime)
shutil.move(os.path.join(dirpath,filename),newfilename)

I've come up with a different solution. Since you only care about the last downloaded file, then why not download it into a dummy_dir? So that, that file is going to be the only file in that directory. Once it's downloaded, you can move it to your destination_dir as well as changing it's name.
Here is an example that works with Firefox:
def rename_last_downloaded_file(dummy_dir, destination_dir, new_file_name):
def get_last_downloaded_file_path(dummy_dir):
""" Return the last modified -in this case last downloaded- file path.
This function is going to loop as long as the directory is empty.
"""
while not os.listdir(dummy_dir):
time.sleep(1)
return max([os.path.join(dummy_dir, f) for f in os.listdir(dummy_dir)], key=os.path.getctime)
while '.part' in get_last_downloaded_file_path(dummy_dir):
time.sleep(1)
shutil.move(get_last_downloaded_file_path(dummy_dir), os.path.join(destination_dir, new_file_name))
You can fiddle with the sleep time and add a TimeoutException as well, as you see fit.

Here is the code sample I used to download pdf with a specific file name. First you need to configure chrome webdriver with required options. Then after clicking the button (to open pdf popup window), call a function to wait for download to finish and rename the downloaded file.
import os
import time
import shutil
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
# function to wait for download to finish and then rename the latest downloaded file
def wait_for_download_and_rename(newFilename):
# function to wait for all chrome downloads to finish
def chrome_downloads(drv):
if not "chrome://downloads" in drv.current_url: # if 'chrome downloads' is not current tab
drv.execute_script("window.open('');") # open a new tab
drv.switch_to.window(driver.window_handles[1]) # switch to the new tab
drv.get("chrome://downloads/") # navigate to chrome downloads
return drv.execute_script("""
return document.querySelector('downloads-manager')
.shadowRoot.querySelector('#downloadsList')
.items.filter(e => e.state === 'COMPLETE')
.map(e => e.filePath || e.file_path || e.fileUrl || e.file_url);
""")
# wait for all the downloads to be completed
dld_file_paths = WebDriverWait(driver, 120, 1).until(chrome_downloads) # returns list of downloaded file paths
# Close the current tab (chrome downloads)
if "chrome://downloads" in driver.current_url:
driver.close()
# Switch back to original tab
driver.switch_to.window(driver.window_handles[0])
# get latest downloaded file name and path
dlFilename = dld_file_paths[0] # latest downloaded file from the list
# wait till downloaded file appears in download directory
time_to_wait = 20 # adjust timeout as per your needs
time_counter = 0
while not os.path.isfile(dlFilename):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:
break
# rename the downloaded file
shutil.move(dlFilename, os.path.join(download_dir,newFilename))
return
# specify custom download directory
download_dir = r'c:\Downloads\pdf_reports'
# for configuring chrome pdf viewer for downloading pdf popup reports
chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option('prefs', {
"download.default_directory": download_dir, # Set own Download path
"download.prompt_for_download": False, # Do not ask for download at runtime
"download.directory_upgrade": True, # Also needed to suppress download prompt
"plugins.plugins_disabled": ["Chrome PDF Viewer"], # Disable this plugin
"plugins.always_open_pdf_externally": True, # Enable this plugin
})
# get webdriver with options for configuring chrome pdf viewer
driver = webdriver.Chrome(options = chrome_options)
# open desired webpage
driver.get('https://mywebsite.com/mywebpage')
# click the button to open pdf popup
driver.find_element_by_id('someid').click()
# call the function to wait for download to finish and rename the downloaded file
wait_for_download_and_rename('My file.pdf')
# close the browser windows
driver.quit()
Set timeout (120) to the wait time as per your needs.

I am using the following function.
It checks for a file in the download location that you specify for chrome/selenium, and only is there is a file created as maxium 10 seconds ago (max_old_time), it renames it. Otherwise, it wait a maxium of 60 seconds (max_waiting_time)..
Not sure if is the best way, but it worked for me..
import os, shutil, time
from datetime import datetime
def rename_last_file(download_folder,destination_folder,newfilename):
#Will wait for maxium max_waiting_time seconds for a new in folder.
max_waiting_time=60
#Will rename only is the file creation has less than max_old_stime seconds.
max_old_time=10
start_time=datetime.now().timestamp()
while True:
filelist=[]
last_file_time=0
for current_file in os.listdir(download_folder):
filelist.append(current_file)
current_file_fullpath=os.path.join(download_folder, current_file)
current_file_time=os.path.getctime(current_file_fullpath)
if os.path.isfile(current_file_fullpath):
if last_file_time==0:
last_file=current_file
last_file_time=os.path.getctime(os.path.join(download_folder, last_file))
if current_file_time>last_file_time and os.path.isfile(current_file_fullpath):
last_file=current_file
last_file_fullpath=os.path.join(download_folder, last_file)
if start_time-last_file_time<max_old_time:
shutil.move(last_file_fullpath,os.path.join(destination_folder,newfilename))
print(last_file_fullpath)
return(0)
elif (datetime.now().timestamp()-start_time)>max_waiting_time:
print("exit")
return(1)
else:
print("waiting file...")
time.sleep(5)

Using #dmb 's trick. Ive just made one correction: after .part control, below time.sleep(1) we must request filename again. Otherwise, the line below will try to rename a .part file, which no more exists.

Here is a browser-agnostic solution that waits for the download to finish then returns the file name.
from datetime import datetime, timedelta
def wait_for_download_and_get_file_name():
print(f'Waiting for download to finish', end='')
while True:
# Get the name of the file with the latest creation time
newest_file_name = max([os.path.join(DOWNLOAD_DIR, f) for f in os.listdir(DOWNLOAD_DIR)], key=os.path.getctime)
# Get the creation time of the file
file_creation_time = datetime.fromtimestamp(os.path.getctime(newest_file_name))
five_seconds_ago = datetime.now() - timedelta(seconds=5)
if file_creation_time < five_seconds_ago:
# The file with the latest creation time is too old to be the file that we're waiting for
print(f'.', end='')
time.sleep(0.5)
else:
print(f'\nFinished downloading "{newest_file_name}"')
break
return newest_file_name
Caveat: this will not work if you have more than one thread or process downloading files to the same directory at the same time.

In my case i downloading and rename .csv files, also i using as a reference files that has '__' in the title, but you can change '_' for your specific usage.
Add this block after download on your selenium script.
string = 'SOMETHING_OR_VARIABLE'
path = r'PATH_WHERE_FILE_ARE_BEING_DOWNLOAD'
files = [i for i in os.listdir(path) if os.path.isfile(os.path.join(path,i)) and \
'_' in i]
if files != []:
import os
files = [i for i in os.listdir(path) if os.path.isfile(os.path.join(path,i)) and \
'_' in i]
print(files[0])
os.rename(path + '\\' +files[0], path + '\\' +f'{string}.csv')
else:
print('error')

You can download the file and name it at the same time using urlretrieve:
import urllib
url = browser.find_element_by_partial_link_text("Excel").get_attribute('href')
urllib.urlretrieve(url, "/choose/your/file_name.xlsx")

Related

Python/Selenium - How to save CSV created by JavaScript directly to a variable

I currently use Selenium to click a download link to download a CSV file into directory, wait for the download to complete, then read the file from directory into a python variable.
I want to deploy the script into a Docker container and as far as i'm aware, I can't load the CSV the way I currently am.
I also don't know how the CSV file is created by the download button so I can't call webdriver.Firefox.execute_script().
Is there a way to intercept Firefox when downloading to see the file and save it straight to a variable at that point?
Is there an easy way to see how the CSV is created? (as I can't read the website code)
If no to the above, is there a way I can perform my current actions inside a Docker container that will be hosted on a cloud server?
My current code for reference
# Driver set-up
firefox_driver_path = "Path to geckodriver.exe"
downloaded_csv_path = "Path to specific .csv file when downloaded"
firefox_options = webdriver.FirefoxOptions()
firefox_options.headless = True
firefox_options.set_preference("browser.download.dir", "Path to download .csv file")
firefox_options.set_preference("browser.download.folderList", 2)
firefox_options.set_preference("browser.download.useDownloadDir", True)
firefox_options.set_preference("browser.download.viewableInternally.enabledTypes", "")
firefox_options.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/pdf;text/plain;application/text;text/xml;text/csv;application/xml")
driver = webdriver.Firefox(executable_path=firefox_driver_path, options=firefox_options)
wait = WebDriverWait(driver, 10)
# Navigate to webpage, perform actions
driver.get("website URL")
# Perform other actions to navigate to the download button
driver.find_element_by_id('DownloadButton').click()
# Check if download is complete
time_to_wait = 10
time_counter = 0
while not os.path.exists(downloaded_csv_path):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:
sys.exit("First CSV download didn't complete")
# Read the CSV data and delete the downloaded file
with open(downloaded_csv_path) as csvFile:
my_CSV = csv.reader(csvFile, delimiter=',')
os.unlink(downloaded_csv_path)
UPDATE
One download button has what seems an Angular function attached to it ng-click="$ctrl.onExportTransactions()".
I know nothing of angular and after searching every source file for the website (including 3 that are named "Angular xxx", I can't find function called "onExportTransactions". Is there a way this function can be invoked from Python/Selenium?

Using Adobe Readers Export as text function in python

I want to convert lots of PDFs into text files.
The formatting is very important and only Adobe Reader seems to get it right (PDFMiner or PyPDF2 do not.)
Is there a way to automate the "export as text" function from Adobe Reader?
The following code will do what you want for one file. I recommend organizing the script into a few little functions and then calling the functions in a loop to process many files. You'll need to install the keyboard library using pip, or some other tool.
import pathlib as pl
import os
import keyboard
import time
import io
KILL_KEY = 'esc'
read_path = pl.Path("C:/Users/Sam/Downloads/WS-1401-IP.pdf")
####################################################################
write_path = pl.Path(str(read_path.parent/read_path.stem) + ".txt")
overwrite_file = os.path.exists(write_path)
# alt -- activate keyboard shortcuts
# `F` -- open file menu
# `v` -- select "save as text" option
# keyboard.write(write_path)
# `alt+s` -- save button
# `ctrl+w` -- close file
os.startfile(read_path)
time.sleep(1)
keyboard.press_and_release('alt')
time.sleep(1)
keyboard.press_and_release('f') # -- open file menu
time.sleep(1)
keyboard.press_and_release('v') # -- select "save as text" option
time.sleep(1)
keyboard.write(str(write_path))
time.sleep(1)
keyboard.press_and_release('alt+s')
time.sleep(2)
if overwrite_file:
keyboard.press_and_release('y')
# wait for program to finish saving
waited_too_long = True
for _ in range(5):
time.sleep(1)
if os.path.exists(write_path):
waited_too_long = False
break
if waited_too_long:
with io.StringIO() as ss:
print(
"program probably saved to somewhere other than",
write_path,
file = ss
)
msg = ss.getvalue()
raise ValueError(msg)
keyboard.press_and_release('ctrl+w') # close the file

Selenium - screenshot with different names

I'm using Selenium to grab a screenshot from a list of urls. test.txt include reddit.com, stackoverflow.com and spotify.com. When iterating through this list I want it to save in the folder Screenshots with the file name being the url + '.png'. It does not work though. I'm either getting errors or it just keeps running without doing anything.
This one works but it just overwrites the old one
screenshot = driver.save_screenshot('Screenshots/foo.png')
I want it to look like this but it does not work:
screenshot = driver.save_screenshot('Screenshots/', line, '.png')
I am new to python but it doesn't work using + instead ' either.
The problem is that it takes too many arguments.
class Screenshot():
filehandle = open("test.txt", "r")
for line in filehandle:
DRIVER = 'chromedriver'
driver = webdriver.Chrome(DRIVER)
driver.get(line)
screenshot = driver.save_screenshot('Screenshots/foo.png')
driver.quit()
Creating a screenshot class is unnecessary for a simple task like this.
#!/usr/bin/env python
from __future__ import print_function
import os
from selenium import webdriver
def main():
driver = webdriver.Chrome()
# With automatically closes files when they go out of scope
with open('test.txt', 'r') as f:
for url in f.readlines():
driver.get(url)
# os.path.join should make it platform agnostic
# Also remove any '/' from the url and replace to avoid any file system save issues
sn_name = os.path.join('Screenshots', url.strip().replace('/', '-') + '.png')
print('Attempting to save:', sn_name)
# '.save_screenshot' returns false if it fails so throw exception
if not driver.save_screenshot(sn_name):
raise Exception('Could not save screen shot: ' + sn_name)
# Close browser
driver.quit()
if __name__ == '__main__':
main()

Python Import Statement bug?

**I solved this below**: I think it may be helpful to others in the future, so I'm keeping my question up vs. taking it down. It's a python vs. other language nested file import issue. However if anyone understands the intricacies of why this is so in python an explanatory answer would greatly be appreciated.
I had my code running fine with a file directory setup like this:
sniffer //folder
-__init__.py
-Sniffer.py
-database.py
I switched it to:
Main
-snifferLaunch.py
-flashy
--sniffer
---Sniffer.py
---database.py
In theory if I change the imports to find the folders it should still run the same way...
I was under the impression that importing a python file could be done even if it was nested. For example
import Sniffer // in snifferLaunch should go through each file and try to find a Sniffer.py file.
I however found this to be false, did I misunderstand this? So I tried looking at an example which imports files like this:
import flashy.sniffer.Sniffer as Sniffer
This does import a file I believe. When I run it it traces out an error on launch however:
Traceback (most recent call last):
File "snifferLaunch.py", line 19, in <module>
import flashy.sniffer.Sniffer
File "/Users/tai/Desktop/FlashY/flashy/sniffer/__init__.py", line 110, in <module>
File "/Users/tai/Desktop/FlashY/flashy/sniffer/__init__.py", line 107, in forInFile
File "/Users/tai/Desktop/FlashY/flashy/sniffer/__init__.py", line 98, in runFlashY
File "/Users/tai/Desktop/FlashY/flashy/sniffer/__init__.py", line 89, in db
AttributeError: 'module' object has no attribute 'getDecompiledFiles'
This would normally cause me to go look for a getDecompiledFiles function. The problem is that no where in the code is there a getDecompiledFiles. There is a get_Decompiled_Files function.
My code looks something like this (non essential parts removed). Do you see my bug? I searched the entire project and could not find a getDecompiledFiles function anywhere. I don't know why it is expecting to have an attribute of this...
snifferLaunch:
import flashy.sniffer.Sniffer as Sniffer
import flashy.sniffer.database as database
import flashy.sniffer.cleaner as cleaner
def open_websites(line):
#opens a list of websites from local file "urlIn.txt" and runs the Sniffer on them.
#It retrieves the swfs from each url and storing them in the local out/"the modified url" /"hashed swf.swf" and the file contains the decompiled swf
print( "opening websites")
newSwfFiles = [];
# reads in all of the lines in urlIn.txt
#for line in urlsToRead:
if line[0] !="#":
newLine = cleaner.remove_front(line);
# note the line[:9] is done to avoid the http// which creates an additional file to go into. The remaining part of the url is still unique.
outFileDirectory = decSwfsFolder + "/" + newLine
cleaner.check_or_create_dir(outFileDirectory)
try:
newSwfFiles = Sniffer.open_url(line, []);
except:
print " Sniffer.openURL failed"
pass
# for all of the files there it runs jpex on them. (in the future this will delete the file after jpex runs so we don't run jpex more than necessary)
for location in newSwfFiles:
cleaner.check_or_create_dir(outFileDirectory + "/" + location)
#creates the command for jpex flash decompiler, the command + file to save into + location of the swf to decompile
newCommand = javaCommand + "/" + newLine + "/" + location +"/ " + swfLoc +"/"+ location
os.system(newCommand)
print ("+++this is the command: " + newCommand+"\n")
# move the swf into a new swf file for db storage
oldLocation = swfFolder + location;
newLocation = decSwfsFolder + "/" + newLine + "/" + location + "/" + "theSwf"+ "/"
cleaner.check_or_create_dir(newLocation )
if(os.path.exists(oldLocation)):
# if the file already exists at that location do not move it simply delete it (the) duplicate
if(os.path.exists(newLocation +"/"+ location)):
os.remove(oldLocation)
else:
shutil.move(swfFolder + location, newLocation)
if cleanup:
cleaner.cleanSwf();
# newSwfFiles has the directory file location of each new added file: "directory/fileHash.swf"
def db():
database.get_decompiled_files()
def run_flashY(line):
#Run FlashY a program that decompiles all of the swfs found at urls defined in urlIn.txt.
#Each decompiled file will be stored in the PaperG Amazon S3 bucket: decompiled_swfs.
#run the program for each line
#open all of the websites in the url file urlIn.txt
open_websites(line)
#store the decompiled swfs in the database
db()
#remove all files from local storage
cleaner.clean_out()
#kill all instances of firefox
def for_in_file():
#run sniffer for each line in the file
#for each url, run then kill firefox to prevent firefox buildup
for line in urlsToRead:
run_flashY(line)
cleaner.kill_firefox()
#Main Functionality
if __name__ == '__main__':
#initialize and run the program on launch
for_in_file()
The Sniffer File:
import urllib2
from urllib2 import Request, urlopen, URLError, HTTPError
import shutil
import sys
import re
import os
import hashlib
import time
import datetime
from selenium import webdriver
import glob
import thread
import httplib
from collections import defaultdict
import cleaner
a=[];
b=[];
newSwfFiles=[];
theURL='';
curPath = os.path.dirname(os.path.realpath(__file__))
#firebug gets all network data
fireBugPath = curPath +'/firebug-1.12.8b1.xpi';
#netExport exports firebug's http archive (network req/res) in the form of a har file
netExportPath = curPath +'/netExport.xpi';
harLoc = curPath +"/har/";
swfLoc = curPath +"/swfs";
cleanThis=True
#remove har file(s) after reading them out to gather swf files
profile = webdriver.firefox.firefox_profile.FirefoxProfile();
profile.add_extension( fireBugPath);
profile.add_extension(netExportPath);
hashLib = hashlib.md5()
#firefox preferences
profile.set_preference("app.update.enabled", False)
profile.native_events_enabled = True
profile.set_preference("webdriver.log.file", curPath +"webFile.txt")
profile.set_preference("extensions.firebug.DBG_STARTER", True);
profile.set_preference("extensions.firebug.currentVersion", "1.12.8");
profile.set_preference("extensions.firebug.addonBarOpened", True);
profile.set_preference('extensions.firebug.consoles.enableSite', True)
profile.set_preference("extensions.firebug.console.enableSites", True);
profile.set_preference("extensions.firebug.script.enableSites", True);
profile.set_preference("extensions.firebug.net.enableSites", True);
profile.set_preference("extensions.firebug.previousPlacement", 1);
profile.set_preference("extensions.firebug.allPagesActivation", "on");
profile.set_preference("extensions.firebug.onByDefault", True);
profile.set_preference("extensions.firebug.defaultPanelName", "net");
#set net export preferences
profile.set_preference("extensions.firebug.netexport.alwaysEnableAutoExport", True);
profile.set_preference("extensions.firebug.netexport.autoExportToFile", True);
profile.set_preference("extensions.firebug.netexport.saveFiles", True);
profile.set_preference("extensions.firebug.netexport.autoExportToServer", False);
profile.set_preference("extensions.firebug.netexport.Automation", True);
profile.set_preference("extensions.firebug.netexport.showPreview", False);
profile.set_preference("extensions.firebug.netexport.pageLoadedTimeout", 15000);
profile.set_preference("extensions.firebug.netexport.timeout", 10000);
profile.set_preference("extensions.firebug.netexport.defaultLogDir",harLoc);
profile.update_preferences();
browser = webdriver.Firefox(firefox_profile=profile);
def open_url(url,s):
#open each url, find all of the har files with them and get those files.
theURL = url;
time.sleep(6);
#browser = webdriver.Chrome();
browser.get(url); #load the url in firefox
browser.set_page_load_timeout(30)
time.sleep(3); #wait for the page to load
browser.execute_script("window.scrollTo(0, document.body.scrollHeight/5);")
time.sleep(1); #wait for the page to load
browser.execute_script("window.scrollTo(0, document.body.scrollHeight/4);")
time.sleep(1); #wait for the page to load
browser.execute_script("window.scrollTo(0, document.body.scrollHeight/3);")
time.sleep(1); #wait for the page to load
browser.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
time.sleep(1); #wait for the page to load
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
searchText='';
time.sleep(20); #wait for the page to load
# print(browser.page_source);
#close the browser and get all the swfs from the created har file.
#uses the a & b arrays to find the swf files from generated har files
get_swfs_from_har()
#clean out the slashes
clean_f_slashes()
#get all files
get_all_files()
#ensure that some files were gained
assert a != []
assert b != []
assert newSwfFiles != []
#if the files (har, swf, out) should be cleaned out do so. This can be toggled for dubugging
if(cleanThis):
cleaner.clean_har()
return newSwfFiles;
def remove_non_url(t):
#remove matched urls that are not actually urls
a=[];
for b in t:
if(b.lower()[:4] !="http" and b.lower()[:4] != "www." ):
if(b[:2] == "//" and b.__len__() >10):
a.append(theURL+"/"+b[2:]);
else:
while((b.lower()[:4] !="http" or b.lower()[:4] !="www." or b.lower()[:1] !="//") and b.__len__() >10):
b=b[1:b.__len__()];
if( b.__len__() >10):
if(b[:1] == "//" ):
if not b in a:
a.append(theURL+b[2:b.__len__()]);
else:
if not b in a:
a.append(b);
else:
if not b in a:
a.append(b);
return a;
def get_swfs_from_har():
#validate that the files in the har are actual swf files
files = [f for f in os.listdir(harLoc) if re.match((theURL[7:]+ '.*.har'), f)]
for n in files:
with open (harLoc + n , "r") as theF:
textt = theF.read();
swfObjects= re.findall('\{[^\{]*(?:http:\/\/|https:\/\/|www\.|\/\/)[^}]*\.swf[^}]+', textt.lower())
#swfObjects = "".join(str(i) for i in swfObjects)
for obj in swfObjects:
l=[]
otherL=[]
links = re.findall('(?:http:\/\/|https:\/\/|www\.|\/\/)[^"]+', obj)
for url in links:
url=url[:url.__len__()-1]
ending = url[url.__len__()-6:];
if ".swf" in ending:
l.append(url);
elif "." not in ending:
otherL.append(url);
for c in l:
if not c in a and c.__len__() >20:
a.append(c);
if(otherL.__len__()>0):
theMostLikelyLink=otherL[0];
b.append(theMostLikelyLink);
##adds the 1st link after the swf
otherL.remove(theMostLikelyLink);
else:
b.append(None);
def clean_f_slashes():
#remove unrelated characters from swfs
for x in a:
newS='';
if(',' in x or ';' in x or '\\' in x):
for d in x:
if(d != '\\' and d != ',' and d != ';'):
newS+=d;
else:
newS=x;
if "http" not in newS.lower():
if "www" in newS:
newS= "http://" + newS;
else:
newS = "http://www."+newS
while(newS[:3]!="htt"):
newS=newS[1:];
a.remove(x);
if(newS.__len__() >15):
a.append(newS);
def get_all_files():
#get all of the files from the array of valid swfs
os.chdir(swfLoc);
for openUrl in a:
place = a.index(openUrl);
try:
req = Request(openUrl)
response = urlopen(req)
fData = urllib2.urlopen(openUrl)
iText = fData.read()
#get the hex hash of the file
hashLib.update(iText);
hashV =hashLib.hexdigest()+".swf";
outUrl= get_redirected_url(b[place]);
#check if file already exists, if it does do not add a duplicate
theFile = [f for f in os.listdir(swfLoc) if re.match((hashV), f)]
if hashV not in theFile:
lFile = open(outUrl+"," +hashV, "w")
lFile.write(iText)
lFile.close();
#except and then ignore are invalid urls.
except:
pass
#Remove all files less than 8kb, anything less than this size is unlikely to be an advertisement. Most flash ads seen so far are 25kb or larger
sFiles = [f for f in os.listdir(swfLoc)]
for filenames in sFiles:
sizeF = os.path.getsize(filenames);
#if the file is smaller remove it
if(sizeF<8000):
cleaner.remove_file(filenames)
else:
newSwfFiles.append(filenames);
def x_str(s):
#check if a unicode expression exists and convert it to a string
if s is None:
return ''
return str(s)
def get_redirected_url(s):
#get the url that another url will redirect to
if s is None:
return "";
if ".macromedia" in s:
return ""
browser.get(s);
time.sleep(20);
theredirectedurl=cleaner.removeFront(browser.current_url);
aUrl= re.findall("[^/]+",theredirectedurl)[0].encode('ascii','ignore')
return aUrl;
Interesting... so I actually realized I was going about it wrong.
I still don't know why it was expecting a function that didn't exist but I do have a guess.
I had pulled the __init__.py file to use as the snifferLaunch file. This was due to my original misunderstanding of __init__.py and assuming it was similar to a main in other languages.
I believe the __init__.pyc file was holding an old function that had been outdated. Essentially I believe there was a file that should never have been run, it was outdated and somehow getting called. It was the only file that existed that had that function in it, I overlooked it because I thought it shouldn't be called.
The solution is the following, and the bug was caused by my misuse of __init__.
I changed my import statements:
from flashy.sniffer import Sniffer
import flashy.sniffer.database as database
import flashy.sniffer.cleaner as cleaner
I created new blank __init__.py, and __init__.pyc files in flashy/sniffer/.
This prevented the false expectation for getDecompiledFiles, and also allowed the code to be run. I was getting a "cannot find this file" error because it wasn't correctly being identified as a module. Additional information on this would be appreciated if anyone can explain what was going on there. I thought you could run a python file without an init statement however when nested in other folders it appears that it must be opened as a python module.
My file structure looks like this now:
Main
-snifferLaunch.py //with changed import statements
-flashy
--sniffer
---Sniffer.py
---database.py
---__init__.py //blank
---__init__.pyc // blank
It appears to be python vs. other languages issue. Has anyone else experienced this?

Webdriver Screenshot

When taking a screenshot using Selenium Webdriver on windows with python, the screenshot is saved directly to the path of the program, is there a way to save the .png file to a specific directory?
Use driver.save_screenshot('/path/to/file') or driver.get_screenshot_as_file('/path/to/file'):
import selenium.webdriver as webdriver
import contextlib
#contextlib.contextmanager
def quitting(thing):
yield thing
thing.quit()
with quitting(webdriver.Firefox()) as driver:
driver.implicitly_wait(10)
driver.get('http://www.google.com')
driver.get_screenshot_as_file('/tmp/google.png')
# driver.save_screenshot('/tmp/google.png')
Inspired from this thread (same question for Java): Take a screenshot with Selenium WebDriver
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://www.google.com/')
browser.save_screenshot('screenie.png')
browser.quit()
Yes, we have a way to get screenshot extension of .png using python webdriver
use below code if you working in python webriver.it is very simple.
driver.save_screenshot('D\folder\filename.png')
driver.save_screenshot("path to save \\screen.jpeg")
This will take screenshot and place it in a directory of a chosen name.
import os
driver.save_screenshot(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'NameOfScreenShotDirectory', 'PutFileNameHere'))
Sure it isn't actual right now but I faced this issue also and my way:
Looks like 'save_screenshot' have some troubles with creating files with space in name same time as I added randomization to filenames for escaping override.
Here I got method to clean my filename of whitespaces (How do I replace whitespaces with underscore and vice versa?):
def urlify(self, s):
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", '', s)
# Replace all runs of whitespace with a single dash
s = re.sub(r"\s+", '-', s)
return s
then
driver.save_screenshot('c:\\pytest_screenshots\\%s' % screen_name)
where
def datetime_now(prefix):
symbols = str(datetime.datetime.now())
return prefix + "-" + "".join(symbols)
screen_name = self.urlify(datetime_now('screen')) + '.png'
Here they asked a similar question, and the answer seems more complete, I leave the source:
How to take partial screenshot with Selenium WebDriver in python?
from selenium import webdriver
from PIL import Image
from io import BytesIO
fox = webdriver.Firefox()
fox.get('http://stackoverflow.com/')
# now that we have the preliminary stuff out of the way time to get that image :D
element = fox.find_element_by_id('hlogo') # find part of the page you want image of
location = element.location
size = element.size
png = fox.get_screenshot_as_png() # saves screenshot of entire page
fox.quit()
im = Image.open(BytesIO(png)) # uses PIL library to open image in memory
left = location['x']
top = location['y']
right = location['x'] + size['width']
bottom = location['y'] + size['height']
im = im.crop((left, top, right, bottom)) # defines crop points
im.save('screenshot.png') # saves new cropped image
You can use below function for relative path as absolute path is not a good idea to add in script
Import
import sys, os
Use code as below :
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
screenshotpath = os.path.join(os.path.sep, ROOT_DIR,'Screenshots'+ os.sep)
driver.get_screenshot_as_file(screenshotpath+"testPngFunction.png")
make sure you create the folder where the .py file is present.
os.path.join also prevent you to run your script in cross-platform like: UNIX and windows. It will generate path separator as per OS at runtime. os.sep is similar like File.separtor in java
TakeScreenShot screenshot=new TakeScreenShot();
screenshot.screenShot("screenshots//TestScreenshot//password.png");
it will work , please try.
Have a look on the below python script to take snap of FB homepage by using selenium package of Chrome web driver.
Script:
import selenium
from selenium import webdriver
import time
from time import sleep
chrome_browser = webdriver.Chrome()
chrome_browser.get('https://www.facebook.com/') # Enter to FB login page
sleep(5)
chrome_browser.save_screenshot('C:/Users/user/Desktop/demo.png') # To take FB homepage snap
chrome_browser.close() # To Close the driver connection
chrome_browser.quit() # To Close the browser
It's quite simple, plz try this:
from selenium import webdriver
driver = webdriver.Chrome()
def take_screenshot(name):
driver.get_screenshot_as_file(f"./screenshot/{name}.png")
#Or with datetime:
def take_screenshot_with_dt(name):
datetime.now()
screenDatetime = datetime.now().strftime('%d-%m-%Y-%H-%M')
driver.get_screenshot_as_file(f"./screenshot/{name}-{screenDatetime}.png")
take_screenshot("screenshotName")
take_screenshot_with_dt("screenshotNameDt")
write this hook in conftest.py
#pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item):
pytest_html = item.config.pluginmanager.getplugin("html")
outcome = yield
report = outcome.get_result()
extra = getattr(report, "extra", [])
if report.when == "call":
# always add url to report
extra.append(pytest_html.extras.url(driver.current_url))
xfail = hasattr(report, "wasxfail")
if (report.skipped and xfail) or (report.failed and not xfail):
report_directory = os.path.dirname(item.config.option.htmlpath)
file_name = report.nodeid.replace("::", "_") + ".png"
destination_file = os.path.join(report_directory, file_name)
driver.save_screenshot(destination_file)
if file_name:
html = '<div><img src="%s" alt="screenshot" style="width:300px;height=200px"'\
'onclick="window.open(this.src)" align="right"/></div>'%file_name
# only add additional html on failure
extra.append(pytest_html.extras.html(html))
report.extra = extra
WebDriver driver = new FirefoxDriver();
driver.get("http://www.google.com/");
File scrFile = ((TakesScreenshot)driver).getScreenshotAs(OutputType.FILE);
FileUtils.copyFile(scrFile, new File("c:\\NewFolder\\screenshot1.jpg"));
I understand you are looking for an answer in python, but here is how one would do it in ruby..
http://watirwebdriver.com/screenshots/
If that only works by saving in current directory only.. I would first assign the image to a variable and then save that variable to disk as a PNG file.
eg:
image = b.screenshot.png
File.open("testfile.png", "w") do |file|
file.puts "#{image}"
end
where b is the browser variable used by webdriver. i have the flexibility to provide an absolute or relative path in "File.open" so I can save the image anywhere.

Categories