import os
dictionaryfile = "/root/john.txt"
pgpencryptedfile = "helloworld.txt.gpg"
array = open(dictionaryfile).readlines()
for x in array:
x = x.rstrip('\n')
newstring = "echo " + x + " | gpg --passphrase-fd 0 " + pgpencryptedfile
os.popen(newstring)
I need to create something inside the for loop that will read gpg's output. When gpg outputs this string gpg: WARNING: message was not integrity protected, I need the loop to close and print Success!
How can I do this, and what is the reasoning behind it?
Thanks Everyone!
import subprocess
def check_file(dictfile, pgpfile):
# Command to run, constructed as a list to prevent shell-escaping accidents
cmd = ["gpg", "--passphrase-fd", "0", pgpfile]
# Launch process, with stdin/stdout wired up to `p.stdout` and `p.stdin`
p = subprocess.Popen(cmd, stdin = subprocess.PIPE, stdout = subprocess.PIPE)
# Read dictfile, and send contents to stdin
passphrase = open(dictfile).read()
p.stdin.write(passphrase)
# Read stdout and check for message
stdout, stderr = p.communicate()
for line in stdout.splitlines():
if line.strip() == "gpg: WARNING: message was not integrity protected":
# Relevant line was found
return True
# Line not found
return False
Then to use:
not_integrity_protected = check_file("/root/john.txt", "helloworld.txt.gpg")
if not_integrity_protected:
print "Success!"
If the "gpg: WARNING:" message is actually on stderr (which I would suspect it is), change the subprocess.Popen line to this:
p = subprocess.Popen(cmd, stdin = subprocess.PIPE, stderr = subprocess.PIPE)
..and the for loop from stdout to stderr, like this:
for line in stderr.splitlines():
Use subprocess.check_output to call gpg and break the loop based on its output.
Something like this (untested since I don't know anything about gpg):
import subprocess
dictionaryfile = "/root/john.txt"
pgpencryptedfile = "helloworld.txt.gpg"
with open(dictionaryfile, 'r') as f:
for line in f:
x = line.rstrip('\n')
cmd = ["echo " + x + " | gpg --passphrase-fd 0 " + pgpencryptedfile]
output = subprocess.check_output(cmd, shell=True)
if 'gpg: WARNING: message was not integrity protected' in output:
break
You could use the subprocess module which allows you to use:
subprocess.call(args, *, stdin, stdout, stderr, shell)
(See the Python Documentation for how to use the parameters.)
This is good because you can easily read in the exit code of whatever program you call.
For example if you change 'newstring' to:
"echo " + x + " | gpg --passphrase-fd 0 " + pgpencryptedfile | grep 'gpg: WARNING: message was not integrity protected'
grep will then return 0 if there is a match and a 1 if not matches are found. (Source)
This exit code from grep will be returned from the subprocess.call() function and you can easily store it in a variable and use an if statement.
Edit: As Matthew Adams mentions below, you could also read the exit code of gpg itself.
Related
I am trying to make a program in python to brute force a ctf C program where you have to find a fruit salad recipe in order to get the flag.
what I want to do : I want to be able to write on the stdin of the C program in python.
Problem : the stdin of the process returned by Popen has a none value while stdout and stderr are correct.
output from my program :
start bruteforce...
<_io.BufferedReader name=3>
<_io.BufferedReader name=5>
None
code :
as you can see I am using print then exit before the loop to debug the process std, I don't understand why I get None when I print print(process.stdin)
!/usr/bin/python3
import random
import os
import sys
from subprocess import *
from contextlib import contextmanager
from io import StringIO
fruit = ["banana", "raspberry", "orange", "lemon"]
comb = ""
found = False
print("start bruteforce...")
process = Popen(['./fruit'], stdout=PIPE, stderr=PIPE)
print(process.stdout)
print(process.stderr)
print(process.stdin)
sys.exit(1)
while True:
for i in range(4):
pick = random.choice(fruit)
inp, output = process.stdin, process.stdout
comb += pick
comb += " "
inp.write(pick)
inp.write("\n")
out = output.read().decode('utf-8')
if "flag" in out:
found = True
break
if found == True:
print("found : " + com)
break
print(comb + " : is not valid")
comb = ""
os.kill(p.pid, signal.CTRL_C_EVENT)
thanks you !
fixed thanks to Ackdari, I replaced :
process = Popen(['./fruit'], stdout=PIPE, stderr=PIPE)
with
process = Popen(['./fruit'], stdout=PIPE, stdin=PIPE)
since I am not using stderr anyways.
I want to be able to write on the stdin
This is forbidden, at least by POSIX standards, and makes no sense on Linux. As suggested by its name stdin is a standard input and you program should read not write it.
Of course, notice that pipe(7)-s have an input and an output. You are writing on your stdout and that is the stdin of the popen-ed process.
In Python3 using subprocess.Popen, I would like to capture the output and command return code for this "nc -z 192.168.25.14 22" command. Here is my sample code:
#!/usr/bin/env python
import urllib.request, urllib.error, urllib.parse
import subprocess
import time
# set up null file for pipe messages
nul_f = open('/dev/null', 'w')
# try loop for clean breakout with cntl-C
try:
with open('/mnt/usbdrive/output/Urls.txt') as f:
for line in f:
data = line.split()
commands = ['nc', '-vZ', data[1], data[0]]
print(commands)
try:
ncdmp = subprocess.Popen(commands , stderr=subprocess.PIPE, stdout=subprocess.PIPE,)
except OSError:
print ("error: popen")
exit(-1) # if the subprocess call failed, there's not much point in continuing
ncdmp.wait()
if ncdmp.returncode != 0:
print(" os.wait:exit status != 0\n")
else:
print ("os.wait:", ncdmp.pid, ncdmp.returncode)
print("STDERR is ", ncdmp.stderr)
print("STDOUT is ", ncdmp.stdout)
print("STDIN is ", ncdmp.stdin)
except KeyboardInterrupt:
print('Done', i)
# clean up pipe stuff
ncdmp.terminate()
ncdmp.kill()
nul_f.close()
and an example of the output is
*Commands is ['nc', '-vZ', '192.168.25.14', '22']
os.wait:exit status != 0
STDERR is <_io.BufferedReader name=12>
STDOUT is <_io.BufferedReader name=9>
STDIN is <_io.BufferedWriter name=8>*
I'm assuming that I have an error in my code or logic, but I can not figure it out. I have used similar code for other commands like ssh and ls without issues. For this "nc" command I get the same set of output/messages regardless of whether or not there is an open port 22 at the host address.
Thanks...RDK
OK, as I did not get any useful replies to this question, I changed the code from subprocess.Popen to subprocess.run as shown below. This modification worked for my requirements as ncdup.stderr contained the information I was looking for.
commands = shlex.split("nc -vz " + "-w 5 " + data[1] + " " + data[0])
try:
ncdmp = subprocess.run(commands , capture_output=True)
except OSError:
print ("error: popen")
exit(-1)
err_line = str(ncdmp.stderr)
I'm trying to write a Python function that transforms a given coordinate system to another using gdal. Problem is that I'm trying to execute the command as one string, but in shell, I have to press enter before entering the coordinates.
x = 1815421
y = 557301
ret = []
tmp = commands.getoutput( 'gdaltransform -s_srs \'+proj=lcc +lat_1=34.03333333333333
+lat_2=35.46666666666667 +lat_0=33.5 +lon_0=-118 +x_0=2000000 +y_0=500000 +ellps=GRS80
+units=m +no_defs\' -t_srs epsg:4326 \n' + str(x) + ' ' + str(y) )
I tried it using '\n', but that doesn't work.
My guess is that you run gdaltransform by pressing Enter and the coordinates are read by the program itself from its stdin, not the shell:
from subprocess import Popen, PIPE
p = Popen(['gdaltransform', '-s_srs', ('+proj=lcc '
'+lat_1=34.03333333333333 '
'+lat_2=35.46666666666667 '
'+lat_0=33.5 '
'+lon_0=-118 +x_0=2000000 +y_0=500000 +ellps=GRS80 '
'+units=m +no_defs'), '-t_srs', 'epsg:4326'],
stdin=PIPE, stdout=PIPE, universal_newlines=True) # run the program
output = p.communicate("%s %s\n" % (x, y))[0] # pass coordinates
from subprocess import *
c = 'command 1 && command 2 && command 3'
# for instance: c = 'dir && cd C:\\ && dir'
handle = Popen(c, stdin=PIPE, stderr=PIPE, stdout=PIPE, shell=True)
print handle.stdout.read()
handle.flush()
If i'm not mistaken, the commands will be executed over a "session" and thus keeping whatever niformation you need in between the commands.
More correctly, using shell=True (from what i've been tought) is that it's supposed to be used if given a string of commands rather than a list. If you'd like to use a list suggestions are to do as follows:
import shlex
c = shlex.split("program -w ith -a 'quoted argument'")
handle = Popen(c, stdout=PIPE, stderr=PIPE, stdin=PIPE)
print handle.stdout.read()
And then catch the output, Or you could work with a open stream and use handle.stdin.write() but it's a bit tricky.
Unless you only want to execute, read and die, .communicate() is perfect, or just .check_output(<cmd>)
Good information n how Popen works can be found here (altho different topic): python subprocess stdin.write a string error 22 invalid argument
Solution
Anyway, this should work (you have to redirect STDIN and STDOUT):
from subprocess import *
c = 'gdaltransform -s_srs \'+proj=lcc +lat_1=34.03333333333333 +lat_2=35.46666666666667 +lat_0=33.5 +lon_0=-118 +x_0=2000000 +y_0=500000 +ellps=GRS80 +units=m +no_defs\' -t_srs epsg:4326 \n' + str(x) + ' ' + str(y) + '\n'
handle = Popen(c, stdin=PIPE, stderr=PIPE, stdout=PIPE, shell=True)
print handle.stdout.read()
handle.flush()
I have the following code which appears to work, for chaining pipes together in python with subprocess while reading / writing to them line by line (without using communicate() upfront). The code just calls a Unix command (mycmd), reads its output, then writes that to the stdin of another Unix command (next_cmd), and redirects the output of that last command to a file.
# some unix command that uses a pipe: command "a"
# writes to stdout and "b" reads it and writes to stdout
mycmd = "a | b"
mycmd_proc = subprocess.Popen(mycmd, shell=True,
stdin=sys.stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# nextCmd reads from stdin, and I'm passing it mycmd's output
next_cmd = "nextCmd -stdin"
output_file = open(output_filename, "w")
next_proc = subprocess.Popen(next_cmd, shell=True,
stdin=subprocess.PIPE,
stdout=output_file)
for line in iter(mycmd.stdout.readline, ''):
# do something with line
# ...
# write it to next command
next_proc.stdin.write(line)
### If I wanted to call another command here that passes next_proc output
### line by line to another command, would I need
### to call next_proc.communicate() first?
next_proc.communicate()
output_file.close()
This appears to work, and it only calls communicate() at the end of the command.
I'm trying to extend this code to add another command so you can do:
mycmd1 | mycmd2 | mycmd3 > some_file
meaning: line by line, read output of mycmd1 from Python, process the line, feed it to mycmd2, read mycmd2's output and line by line process it and feed it to mycmd3 which in turns puts its output in some_file. Is this possible or is this bound to end in deadlock/blocking/unflushed buffers? Note that I'm not just calling three unix commands as a pipe since I want to intervene with Python in between and post-process each command's output line by line before feeding it to the next command.
I want to avoid calling communicate and loading all the output into memory - instead I want to parse it line by line. thanks.
This should handle an arbitrary number of commands:
import sys
import subprocess
def processFirst(out):
return out
def processSecond(out):
return out
def processThird(out):
return out
commands = [("a|b", processFirst), ("nextCmd -stdin", processSecond), ("thirdCmd", processThird)]
previous_output = None
for cmd,process_func in commands:
if previous_output is None:
stdin = sys.stdin
else:
stdin = subprocess.PIPE
proc = subprocess.Popen(cmd, shell=True,
stdin = stdin,
stdout = subprocess.PIPE)
if previous_output is not None:
proc.stdin.write(previous_output)
out,err = proc.communicate()
out = process_func(out)
previous_output = out
Just add any command you want to run to the list of commands along with the function that should process its output. The output from the last command will end up being in previous_output at the end of the loop.
To avoid any deadlocking/buffering/etc issues, you simply run each command to completion using proc.communicate() which will return the output(instead of reading it directly as in your example). You then feed that into the next command before letting it run to completion, so on and so forth.
Edit: Just noticed that you don't want to use communicate() upfront and that you want to react line by line. I will edit my answer in a bit to address that
This answer provides an example on how to read line-by-line from a pipe without blocking using select.select().
Below is an example that uses it for your particular case:
import sys
import subprocess
import select
import os
class LineReader(object):
def __init__(self, fd, process_func):
self._fd = fd
self._buf = ''
self._process_func = process_func
self.next_proc = None
def fileno(self):
return self._fd
def readlines(self):
data = os.read(self._fd, 4096)
if not data:
# EOF
if self.next_proc is not None:
self.next_proc.stdin.close()
return None
self._buf += data
if '\n' not in data:
return []
tmp = self._buf.split('\n')
tmp_lines, self._buf = tmp[:-1], tmp[-1]
lines = []
for line in tmp_lines:
lines.append(self._process_func(line))
if self.next_proc is not None:
self.next_proc.stdin.write("%s\n" % lines[-1])
return lines
def processFirst(line):
return line
def processSecond(line):
return line
def processThird(line):
return line
commands = [("a|b", processFirst), ("nextCmd -stdin", processSecond), ("thirdCmd", processThird)]
readers = []
previous_reader = None
for cmd,process_func in commands:
if previous_reader is None:
stdin = sys.stdin
else:
stdin = subprocess.PIPE
proc = subprocess.Popen(cmd, shell=True,
stdin = stdin,
stdout = subprocess.PIPE)
if previous_reader is not None:
previous_reader.next_proc = proc
previous_reader = LineReader(proc.stdout.fileno(), process_func)
readers.append(previous_reader)
while readers:
ready,_,_ = select.select(readers, [], [], 10.0)
for stream in ready:
lines = stream.readlines()
if lines is None:
readers.remove(stream)
I have command like this.
wmctrl -lp | awk '/gedit/ { print $1 }'
And I want its output within python script, i tried this code
>>> import subprocess
>>> proc = subprocess.Popen(["wmctrl -lp", "|","awk '/gedit/ {print $1}"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
>>> proc.stdout.readline()
'0x0160001b -1 6504 beer-laptop x-nautilus-desktop\n'
>>> proc.stdout.readline()
'0x0352f117 0 6963 beer-laptop How to get output from external command combine with Pipe - Stack Overflow - Chromium\n'
>>> proc.stdout.readline()
'0x01400003 -1 6503 beer-laptop Bottom Expanded Edge Panel\n'
>>>
It seem my code is wrong only wmctrl -lp was execute, and | awk '{print $1}' is omitted
My expect output would like 0x03800081
$ wmctrl -lp | awk '/gedit/ {print $1}'
0x03800081
Does one please help.
With shell=True, you should use a single command line instead of an array, otherwise your additional arguments are interpreted as shell arguments. From the subprocess documentation:
On Unix, with shell=True: If args is a string, it specifies the command string to execute through the shell. If args is a sequence, the first item specifies the command string, and any additional items will be treated as additional shell arguments.
So your call should be:
subprocess.Popen("wmctrl -lp | sed /gedit/ '{print $1}'", shell=True, ...
I think you may also have an unbalanced single quote in there.
Because you are passing a sequence in for the program, it thinks that the pipe is an argument to wmcrtrl, such as if you did
wmctrl -lp "|"
and thus the actual pipe operation is lost.
Making it a single string should indeed give you the correct result:
>>> import subprocess as s
>>> proc = s.Popen("echo hello | grep e", shell=True, stdout=s.PIPE, stderr=s.PIPE)
>>> proc.stdout.readline()
'hello\n'
>>> proc.stdout.readline()
''
After some research, I have the following code which works very well for me. It basically prints both stdout and stderr in real time. Hope it helps someone else who needs it.
stdout_result = 1
stderr_result = 1
def stdout_thread(pipe):
global stdout_result
while True:
out = pipe.stdout.read(1)
stdout_result = pipe.poll()
if out == '' and stdout_result is not None:
break
if out != '':
sys.stdout.write(out)
sys.stdout.flush()
def stderr_thread(pipe):
global stderr_result
while True:
err = pipe.stderr.read(1)
stderr_result = pipe.poll()
if err == '' and stderr_result is not None:
break
if err != '':
sys.stdout.write(err)
sys.stdout.flush()
def exec_command(command, cwd=None):
if cwd is not None:
print '[' + ' '.join(command) + '] in ' + cwd
else:
print '[' + ' '.join(command) + ']'
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
)
out_thread = threading.Thread(name='stdout_thread', target=stdout_thread, args=(p,))
err_thread = threading.Thread(name='stderr_thread', target=stderr_thread, args=(p,))
err_thread.start()
out_thread.start()
out_thread.join()
err_thread.join()
return stdout_result + stderr_result
When needed, I think it's easy to collect the output or error in a string and return.