convert Ghostscript from os.popen to subsession.popen python - python

I need to create a Monkey patch for Ghostscript, I have to migrate from os.popen to subsession.popen because I can't use the shell in my system.
I tried it in this way:
def mioGhostscript(tile, size, fp):
"""Render an image using Ghostscript (Unix only)"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
import tempfile, os
file = tempfile.mktemp()
# Build ghostscript command
command = ["gs",
"-q", # quite mode
"-g%dx%d" % size, # set output geometry (pixels)
"-dNOPAUSE -dSAFER", # don't pause between pages, safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % file,# output file
"- >/dev/null 2>/dev/null"
]
#command = shlex.split(string.join(command))
# push data through ghostscript
try:
#gs = os.popen(command, "w")
args = command#['gs','-dSAFER','-dNOPAUSE','-dBATCH','-sDEVICE=jpeg','-sOutputFile=/home/user/output2.jpg /home/user/downloads/test.pdf']
gs = subprocess.Popen( args, stdout = PIPE, stderr = STDOUT, stdin=PIPE )
# adjust for image origin
if bbox[0] != 0 or bbox[1] != 0:
#gs.write("%d %d translate\n" % (-bbox[0], -bbox[1]))
gs.stdin.write("%d %d translate\n" % (-bbox[0], -bbox[1]))
fp.seek(offset)
while length > 0:
s = fp.read(8192)
if not s:
break
length = length - len(s)
raise Exception(s)
gs.stdin.write(s)
gs.communicate()[0]
status = gs.stdin.close()
#status = gs.close()
#if status:
# raise IOError("gs failed (status %d)" % status)
im = Image.core.open_ppm(file)
finally:
try: os.unlink(file)
except: pass
return im
import PIL
PIL.EpsImagePlugin.Ghostscript = mioGhostscript
but i have this traceback:
Traceback (most recent call last): File "/home/web/lib/driver_mod_python.py", line 252, in handler buf = m.__dict__[pard['program']](pard) File "/home/dtwebsite/bin/cms_gest_ordini.py", line 44, in wrapped return func(pard) File "/home/dtwebsite/bin/cms_gest_ordini.py", line 95, in wrapped return func(pard) File "/home/dtwebsite/bin/cms_gest_picking_list.py", line 341, in picking_list tr_modelllo = render_row_picking_list(pard, item, picked=0, plist_allowed=plist_allowed) File "/home/dtwebsite/bin/cms_gest_picking_list.py", line 432, in render_row_picking_list aa = a.tostring() File "/rnd/apps/interpreters/python-2.5.6/lib/python2.5/site-packages/PIL/Image.py", line 532, in tostring self.load() File "/rnd/apps/interpreters/python-2.5.6/lib/python2.5/site-packages/PIL/EpsImagePlugin.py", line 283, in load self.im = Ghostscript(self.tile, self.size, self.fp) File "/home/dtwebsite/bin/cms_gest_picking_list.py", line 64, in mioGhostscript gs.stdin.write(s) IOError: [Errno 32] Broken pipe
someone can help me please?

I found the solution at the problem.
It was with the PIL package, something didn't compile right during the installation.
After that i had a dependencies problem.
I fixed it in the following way:
import PIL.EpsImagePlugin
PIL.EpsImagePlugin.Ghostscript = mioGhostscript
Then I saw this in the command:
"- >/dev/null 2>/dev/null"
the code is a shell's code and it didn't work on my system because python tried to read a file literally named - >/dev/null 2>/dev/null and it doesn't exist.
I replaced
"- >/dev/null 2>/dev/null"
with
"-"
and the program now read from the stdin.
The final code is:
def mioGhostscript(tile, size, fp):
"""Render an image using Ghostscript (Unix only)"""
# Unpack decoder tile
decoder, tile, offset, data = tile[0]
length, bbox = data
import tempfile, os
file = tempfile.mktemp()
# Build ghostscript command
command = ["gs",
"-q", # quite mode
"-g%dx%d" % size, # set output geometry (pixels)
"-dNOPAUSE -dSAFER", # don't pause between pages, safe mode
"-sDEVICE=ppmraw", # ppm driver
"-sOutputFile=%s" % file,# output file
"-"
]
#command = shlex.split(string.join(command))
# push data through ghostscript
try:
#gs = os.popen(command, "w")
args = command#['gs','-dSAFER','-dNOPAUSE','-dBATCH','-sDEVICE=jpeg','-sOutputFile=/home/user/output2.jpg /home/user/downloads/test.pdf']
gs = subprocess.Popen( args, stdout = PIPE, stderr = STDOUT, stdin=PIPE )
# adjust for image origin
if bbox[0] != 0 or bbox[1] != 0:
#gs.write("%d %d translate\n" % (-bbox[0], -bbox[1]))
gs.stdin.write("%d %d translate\n" % (-bbox[0], -bbox[1]))
fp.seek(offset)
while length > 0:
s = fp.read(8192)
if not s:
break
length = length - len(s)
gs.stdin.write(s)
gs.communicate()[0]
status = gs.stdin.close()
#status = gs.close()
#if status:
# raise IOError("gs failed (status %d)" % status)
im = Image.core.open_ppm(file)
finally:
try: os.unlink(file)
except: pass
return im
import PIL.EpsImagePlugin
PIL.EpsImagePlugin.Ghostscript = mioGhostscript
I hope this posts can help someone.

Related

Extracting frame fails with: Invalid data found when processing input

I have the following method to create a dummy video file:
def create_dummy_mp4_video() -> None:
cmd = (
f"ffmpeg -y " # rewrite if exists
f"-f lavfi -i color=size=100x100:rate=10:color=black " # blank video
f"-f lavfi -i anullsrc=channel_layout=stereo:sample_rate=44100 " # silent audio
f"-t 1 " # video duration, seconds
"output.mp4" # file name
)
proc = subprocess.run(
shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
)
if proc.returncode != 0:
raise Exception()
#dataclass(frozen=True)
class FakeVideo:
body: bytes
width: int
height: int
fps: int
size: int
frames: int
length_s: int
def video() -> FakeVideo:
w, h, fps, sec, filename = 100, 100, 10, 1, "output.mp4"
create_dummy_mp4_video()
video_path = os.path.join(os.getcwd(), filename)
with open(video_path, "rb") as file:
body = file.read()
size = len(body)
frames = fps // sec
return FakeVideo(
body=body, width=w, height=h, fps=fps,
size=size, frames=frames, length_s=sec,
)
then I want to extract a frame at specific time, I did it like this:
async def run_shell_command(frame_millisecond, data: bytes) -> bytes:
async with aiofiles.tempfile.NamedTemporaryFile("wb") as file:
await file.write(data)
proc = await asyncio.create_subprocess_exec(
"ffmpeg",
"-i",
file.name,
"-ss",
f"{frame_millisecond}ms", # seek the position to the specific millisecond
"-vframes", "1", # only handle one video frame
"-c:v", "png", # select the output encoder
"-f", "image2pipe", "-", # force output file to stdout,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await proc.communicate()
level = logging.DEBUG if proc.returncode == 0 else logging.WARN
LOGGER.log(level, f"[cmd exited with {proc.returncode}]")
if stderr:
print(level, f"[stderr]{stderr.decode()}")
LOGGER.log(level, f"[stderr]{stderr.decode()}")
return stdout
async def runner():
v = video()
time = int(v.length_s / 2 * 1000)
res = await run_shell_command(time, v.body)
assert isinstance(res, bytes)
assert imghdr.what(h=res, file=None) == "png"
loop = asyncio.get_event_loop()
loop.run_until_complete(runner())
This code fails whit the following error:
/tmp/tmpzo786lfg: Invalid data found when processing input
Please help to find the problem with my code.
During investigation I found that it works if I change the size of the video like that:
f"-f lavfi -i color=size=1280x720:rate=25:color=black " # blank video
but I want to be able to process any video.
I use ffmpg 4.3.3-0+deb11u1
It looks like you have to make sure the data is written to the temporary file, before executing FFmpeg.
I don't have any experience with asyncio and aiofiles and I am running Windows 10, so I am not sure about the Linux behavior...
I tried to add await file.flush() after file.write(data), but the FFmpeg execution result was "Permission denied".
I solved it using the solution from the following post:
Add delete=False argument to tempfile.NamedTemporaryFile:
async with aiofiles.tempfile.NamedTemporaryFile("wb", delete=False) as file:
Add await file.close() after await file.write(data).
Closing the file is used for making sure that all the data is written to the file, before executing FFmpeg.
Add os.unlink(file.name) before return stdout.
Complete code:
import subprocess
import asyncio
from dataclasses import dataclass
import shlex
import aiofiles
import os
import logging
import imghdr
def create_dummy_mp4_video() -> None:
cmd = (
f"ffmpeg -y " # rewrite if exists
f"-f lavfi -i color=size=100x100:rate=10:color=black " # blank video
f"-f lavfi -i anullsrc=channel_layout=stereo:sample_rate=44100 " # silent audio
f"-t 1 " # video duration, seconds
"output.mp4" # file name
)
proc = subprocess.run(
shlex.split(cmd),
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL, #stderr=subprocess.PIPE,
shell=False,
)
if proc.returncode != 0:
raise Exception()
#dataclass(frozen=True)
class FakeVideo:
body: bytes
width: int
height: int
fps: int
size: int
frames: int
length_s: int
def video() -> FakeVideo:
w, h, fps, sec, filename = 100, 100, 10, 1, "output.mp4"
create_dummy_mp4_video()
video_path = os.path.join(os.getcwd(), filename)
with open(video_path, "rb") as file:
body = file.read()
size = len(body)
frames = fps // sec
return FakeVideo(
body=body, width=w, height=h, fps=fps,
size=size, frames=frames, length_s=sec,
)
async def run_shell_command(frame_millisecond, data: bytes) -> bytes:
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
async with aiofiles.tempfile.NamedTemporaryFile("wb", delete=False) as file:
await file.write(data)
#await file.flush() # Flush data to file before executing FFmpeg ?
await file.close() # Close the file before executing FFmpeg.
proc = await asyncio.create_subprocess_exec(
"ffmpeg",
"-i",
file.name,
"-ss",
f"{frame_millisecond}ms", # seek the position to the specific millisecond
"-vframes", "1", # only handle one video frame
"-c:v", "png", # select the output encoder
"-f", "image2pipe", "-", # force output file to stdout,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await proc.communicate()
level = logging.DEBUG if proc.returncode == 0 else logging.WARN
#LOGGER.log(level, f"[cmd exited with {proc.returncode}]")
if stderr:
print(level, f"[stderr]{stderr.decode()}")
#LOGGER.log(level, f"[stderr]{stderr.decode()}")
os.unlink(file.name) # Unlink is required because delete=False was used
return stdout
async def runner():
v = video()
time = int(v.length_s / 2 * 1000)
res = await run_shell_command(time, v.body)
assert isinstance(res, bytes)
assert imghdr.what(h=res, file=None) == "png"
loop = asyncio.get_event_loop()
loop.run_until_complete(runner())
Notes:
I removed the LOGGER because I couldn't find the LOGGER module.
Next time, please add all the imports to your posted code (it's not so trivial finding them).

"Convert regions extracted from wav audio file into Flac audio file " python using FFMPAG in Django

Regions extracted from wav audio file have "Invalid duration specification for ss"
For Example in my case duration is in this format
[(0.006000000000000005, 1.03), (2.0540000000000003, 4.870000000000003)]
This is for converting regions into Flac format.
class FLACConverter(object):
# pylint: disable=too-few-public-methods
"""
Class for converting a region of an input audio or video file into a FLAC audio file
"""
def __init__(self, source_path, include_before=0.25, include_after=0.25):
self.source_path = source_path
self.include_before = include_before
self.include_after = include_after
def __call__(self, region):
try:
print("regions to convert in flac:{}".format(region))
start = region
end = region
# start = max(0, start - self.include_before)
start = list(map(lambda x: tuple(max(0, y - self.include_before) for y in x), start))
end = list(map(lambda x: tuple(y + self.include_after for y in x), end))
temp = tempfile.NamedTemporaryFile(suffix='.flac', delete=False)
command = ["ffmpeg", "-ss", str(start), "-t", str([tuple(x-y for x, y in zip(x1, x2)) for (x1, x2) in zip(end, start)]),
"-y", "-i", self.source_path,
"-loglevel", "error", temp.name]
use_shell = True if os.name == "nt" else False
subprocess.check_output(command)
print(temp.name)
#subprocess.check_output(command, stdin=open(os.devnull), shell=use_shell)
read_data = temp.read()
temp.close()
os.unlink(temp.name)
print("read_data :{}".format(read_data))
print("temp :{}".format(temp.name))
return read_data
except KeyboardInterrupt:
return None
I expect the output of /var/folders/p1/6ttydjfx2sq9zl4cnmjxgjh40000gp/T/tmpwz5n4fnv.flac but it returns an error
CalledProcessError at /
Command '['ffmpeg', '-ss', '[(0.006000000000000005, 1.03), (2.0540000000000003, 4.870000000000003)]', '-t', '[(0.5, 0.5), (0.5, 0.5)]', '-y', '-i', '/var/folders/p1/6ttydjfx2sq9zl4cnmjxgjh40000gp/T/tmpuyi5spat.wav', '-loglevel', 'error', '/var/folders/p1/6ttydjfx2sq9zl4cnmjxgjh40000gp/T/tmp0vdewoyd.flac']' returned non-zero exit status 1.```

Python script won't run using path to file

If I run "python /home/pi/temp/getTemp.py" from the terminal command line I get
"Error, serial port '' does not exist!" If I cd to the temp directory and run "python getTemp.py" it runs fine. Can anyone tell me why?
#!/usr/bin/env python
import os
import sys
import socket
import datetime
import subprocess
import signal
port = "/dev/ttyUSB0"
tlog = '-o%R,%.4C'
hlog = '-HID:%R,H:%h'
clog = '-OSensor %s C: %.2C'
def logStuff(data):
with open("/home/pi/temp/templog.txt", "a") as log_file:
log_file.write(data + '\n')
def main():
try:
output = subprocess.check_output(['/usr/bin/digitemp_DS9097U', '-q', '-a'])
for line in output.split('\n'):
if len(line) == 0:
logStuff("len line is 0")
continue
if 'Error' in line:
logStuff("error in output")
sys.exit()
line = line.replace('"','')
if line.count(',') == 1:
(romid, temp) = line.split(',')
poll = datetime.datetime.now().strftime("%I:%M:%S %p on %d-%B-%y")
content =(romid + "," + poll + "," + temp)
print content
return content
except subprocess.CalledProcessError, e:
print "digitemp error:\n", e.output
except Exception as e:
logStuff('main() error: %s' %e)
os.kill(os.getpid(), signal.SIGKILL)
if __name__ == "__main__":
main()
It probably cannot find the configuration file, which is normally stored in ~/.digitemprc when you run it with -i to initialize the network. If it was created in a different directory you need to always tell digitemp where to find it by passing -c

Python-Subprocess-Popen Deadlock in a multi-threaded environment

I have following piece of code running inside thread..where 'expand' C executable produces unique string output for each input 'url':
p = Popen(["expand", url], bufsize=65536, stdout=PIPE, stderr=PIPE, close_fds=True)
output,error = p.communicate()
print output
I have implemented a Queue based multithreading solution which processes 5000 urls in a batch of 100 each..
When I run the script; it hangs.. and ps -aef shows that 2 processes are still running:
1. 10177 5721 6662 6 09:25 pts/15 00:04:36 python expandPlaylist.py -s -t
2. 10177 11004 5721 0 09:26 pts/15 00:00:00 expand http://www.sample.com
Stack trace for main python script:
# ThreadID: 140332211570432
File: "expandPlaylist.py", line 902, in <module>
Main()
File: "expandPlaylist.py", line 894, in Main
startmain(db, c, conf)
File: "expandPlaylist.py", line 834, in startmain
stream_queue.join()
File: "/usr/lib64/python2.7/Queue.py", line 82, in join
self.all_tasks_done.wait()
File: "/usr/lib64/python2.7/threading.py", line 238, in wait
waiter.acquire()
Stack trace for Thread which got deadlocked
# ThreadID: 140332016596736
File: "/usr/lib64/python2.7/threading.py", line 503, in __bootstrap
self.__bootstrap_inner()
File: "/usr/lib64/python2.7/threading.py", line 530, in __bootstrap_inner
self.run()
File: "expandPlaylist.py", line 120, in run
self.process.wait()
File: "/usr/lib64/python2.7/subprocess.py", line 1242, in wait
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
File: "/usr/lib64/python2.7/subprocess.py", line 471, in _eintr_retry_call
return func(*args)
GDB details for process_id: 11004
(gdb) bt
#0 __lll_lock_wait () at ../nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.S:136
#1 0x00007fc36bd33294 in _L_lock_999 () from /lib64/libpthread.so.0
#2 0x00007fc36bd330aa in __pthread_mutex_lock (mutex=0x6a8c20) at pthread_mutex_lock.c:61
#3 0x00007fc36c204dcd in g_mutex_lock (mutex=0x6a8c50) at gthread-posix.c:213
#4 0x00007fc36c1b11df in g_source_unref_internal (source=0x844f90, context=0x6a8c50, have_lock=0) at gmain.c:1975
#5 0x00007fc36c1b13e3 in g_source_unref (source=0x844f90) at gmain.c:2044
#6 0x00007fc36cb475a9 in soup_session_dispose (object=0x61e100) at soup-session.c:305
#7 0x00007fc36c4d270e in g_object_unref (_object=0x61e100) at gobject.c:3160
#8 0x000000000040584b in dispose_session (parser=0x618020) at al_playlist_parser.c:859
#9 0x0000000000403b0b in al_playlist_parser_dispose (obj=0x618020) at al_playlist_parser.c:129
#10 0x00007fc36c4d270e in g_object_unref (_object=0x618020) at gobject.c:3160
#11 0x0000000000403315 in main (argc=1, argv=0x7fff462cdca8) at al_expand.c:143
How can I avoid the deadlock?
Otherwise is there any way to bind timeout with self.process.wait() and terminate that thread if the subprocess is taking too long to process?
If you only have to call a subprocess on a list of arguments, I tend to do something like this:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Author: R.F. Smith <rsmith#xs4all.nl>
# $Date: 2013-11-24 11:06:39 +0100 $
#
# To the extent possible under law, Roland Smith has waived all copyright and
# related or neighboring rights to vid2mp4.py. This work is published from the
# Netherlands. See http://creativecommons.org/publicdomain/zero/1.0/
"""Convert all video files given on the command line to H.264/AAC streams in
an MP4 container."""
from __future__ import print_function, division # for compatibility with Python 2.
__version__ = '$Revision: cac4808 $'[11:-2]
import os
import sys
import subprocess
from multiprocessing import cpu_count
from time import sleep
def warn(s):
"""Print a warning message.
:param s: Message string
"""
s = ' '.join(['Warning:', s])
print(s, file=sys.stderr)
def checkfor(args, rv=0):
"""Make sure that a program necessary for using this script is
available.
:param args: String or list of strings of commands. A single string may
not contain spaces.
:param rv: Expected return value from evoking the command.
"""
if isinstance(args, str):
if ' ' in args:
raise ValueError('no spaces in single command allowed')
args = [args]
try:
with open(os.devnull, 'w') as bb:
rc = subprocess.call(args, stdout=bb, stderr=bb)
if rc != rv:
raise OSError
except OSError as oops:
outs = "Required program '{}' not found: {}."
print(outs.format(args[0], oops.strerror))
sys.exit(1)
def startencoder(fname):
"""Use ffmpeg to convert a video file to H.264/AAC
streams in an MP4 container.
:param fname: Name of the file to convert.
:returns: a 3-tuple of a Process, input path and output path
"""
basename, ext = os.path.splitext(fname)
known = ['.mp4', '.avi', '.wmv', '.flv', '.mpg', '.mpeg', '.mov', '.ogv']
if ext.lower() not in known:
warn("File {} has unknown extension, ignoring it.".format(fname))
return (None, fname, None)
ofn = basename + '.mp4'
args = ['ffmpeg', '-i', fname, '-c:v', 'libx264', '-crf', '29', '-flags',
'+aic+mv4', '-c:a', 'libfaac', '-sn', ofn]
with open(os.devnull, 'w') as bitbucket:
try:
p = subprocess.Popen(args, stdout=bitbucket, stderr=bitbucket)
print("Conversion of {} to {} started.".format(fname, ofn))
except:
warn("Starting conversion of {} failed.".format(fname))
return (p, fname, ofn)
def manageprocs(proclist):
"""Check a list of subprocesses tuples for processes that have ended and
remove them from the list.
:param proclist: a list of (process, input filename, output filename)
tuples.
"""
print('# of conversions running: {}\r'.format(len(proclist)), end='')
sys.stdout.flush()
for p in proclist:
pr, ifn, ofn = p
if pr is None:
proclist.remove(p)
elif pr.poll() is not None:
print('Conversion of {} to {} finished.'.format(ifn, ofn))
proclist.remove(p)
sleep(0.5)
def main(argv):
"""Main program.
:param argv: command line arguments
"""
if len(argv) == 1:
binary = os.path.basename(argv[0])
print("{} version {}".format(binary, __version__), file=sys.stderr)
print("Usage: {} [file ...]".format(binary), file=sys.stderr)
sys.exit(0)
checkfor(['ffmpeg', '-version'])
avis = argv[1:]
procs = []
maxprocs = cpu_count()
for ifile in avis:
while len(procs) == maxprocs:
manageprocs(procs)
procs.append(startencoder(ifile))
while len(procs) > 0:
manageprocs(procs)
if __name__ == '__main__':
main(sys.argv)
If hanging processes are an issue, you could adapt to manageprocs to kill a subprocess after a certain amount of time.

Python, fping, cron

I recently wrote a program in Python (first stab at said language) to grab ip addresses from a database, ping them using fping and throw the response times back into said database. App works great from command line, but breaks from crontab
Any help would be greatly appreciated. Thanks
PYTHON CODE -- I got most of this from the interwebs
#!/usr/bin/env python
import MySQLdb as mdb;
import threading
import shlex
from subprocess import Popen, PIPE, STDOUT
import subprocess
con = mdb.connect('localhost', '*****', '*****', '*****')
class myThread(threading.Thread):
def __init__(self, ip_list):
threading.Thread.__init__(self)
self.ip_list = ip_list
def run(self):
get_ping(self.ip_list)
def get_simple_cmd_output(cmd):
args = shlex.split(cmd)
return Popen(args, stdout=PIPE, stderr=subprocess.STDOUT, shell=False).communicate()[0]
def get_ping(ip_list):
ip_response_dict = {}
cmd = "fping -C 4 -q {host}".format(host=" ".join(ip_list))
for x in get_simple_cmd_output(cmd).strip().split(' : ', 0) :
lines = x.split("\n")
for line in lines:
if line.upper().find(":", 0) > 0:
ip_data = line.split(":")
ip_address = ip_data[0].strip()
ip_response = ip_data[1].strip().split(" ")
total = 0;
length = 0;
for ping_time in ip_response:
if ping_time != '' and ping_time != '-':
total += float(ping_time)
length += 1
if total > 0 and length > 0:
ip_response_dict[ip_address] = (total / length)
else:
ip_response_dict[ip_address] = 0
save_ping_times(ip_response_dict)
def save_ping_times(ip_list):
cursor = con.cursor()
for key, value in ip_list.items():
cursor.execute('INSERT INTO `Tech_AP_Ping_Time`(`IP_Address`,`Value`) VALUES ("' + key + '","' + str(round(value, 2)) + '")')
con.commit()
threads = []
chunk_length = 100
with con:
cur = con.cursor(mdb.cursors.DictCursor)
cur.execute("SELECT `IP_Address` FROM `Tech_APs` WHERE (`IP_Address` IS NOT NULL AND `IP_Address` != '') ORDER BY `IP_Address`")
rows = cur.fetchall()
i = 0
ip_list = []
for row in rows:
ip_list.append(row['IP_Address'])
ip_list = [ip_list[i : i + chunk_length] for i in range(0, len(ip_list), chunk_length)]
for ip_chunk in ip_list:
thread = myThread(ip_chunk)
thread.start()
threads.append(thread)
CRON COMMAND - Yes, I have a full path to the script set in my actual cron
*/5 * * * * /usr/bin/python distro_pinger.py
ERROR -- I am getting this when run from the cron
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib64/python2.6/threading.py", line 532, in __bootstrap_inner
self.run()
File "/var/www/html/poller/distro_pinger.py", line 15, in run
get_ping(self.ip_list)
File "/var/www/html/poller/distro_pinger.py", line 25, in get_ping
for x in get_simple_cmd_output(cmd).strip().split(' : ', 0) :
File "/var/www/html/poller/distro_pinger.py", line 19, in get_simple_cmd_output
return Popen(['fping','-C','4','-q','','127.0.0.1'], stdout=PIPE, stderr=subprocess.STDOUT, shell=False).communicate()[0]
File "/usr/lib64/python2.6/subprocess.py", line 639, in __init__
errread, errwrite)
File "/usr/lib64/python2.6/subprocess.py", line 1228, in _execute_child
raise child_exception
OSError: [Errno 2] No such file or directory
Any help at all would be greatly appreciated. (Even if it's you telling me that I did everything wrong :P)
Try adding the full path to fping in your script. That should do the trick.
Which user is fping installed for? Is the crontab setting for this same user? If not, does the user under which the crontab setting is have permissions to the fping directory?

Categories