UDP Tracker only gives me my ip as answer to announce request - python

I've recently been trying to create a torrent client in python, and have just got the UDP announce protocol to work.
The tracker accepts my connect request just fine but only returns my IP and port as the peer list when I announce to it...
I've tried to look at the same torrents in other torrent clients and they have multiple working peers while my request only shows me my computer (I've tried this on many torrents, all return just my IP and port)
Here's the code for the sending function itself:
async def announce_udp(self, try_num = 1):
self.sock.settimeout(15)
answer = {}
inner_while = False
while try_num < 4:
while try_num < 4:
try:
print("trying to send")
sended = self.send(1, self.announce_payload())
print("sending the following packet: {0}".format(sended))
print(self.url)
inner_while = True
break
except Exception:
print("problem in sending")
try_num += 1
if not inner_while:
break
try:
answer = self.interpret(15)
break
except Exception:
print("problem in receiving")
try_num += 1
print("announce answer is: {0}".format(answer))
return answer
here's the code for the make payload function:
def announce_payload(self, downloaded = 0, left = 0, uploaded = 0, event = 0, key = get_transaction_id()):
payload = [self.torrent.get_torrent_info_hash_decoded(), get_peer_id().encode(), downloaded,
self.torrent.get_torrent_size(), uploaded, event, 0, key, -1, 6988]
p_tosend = None
try:
p_tosend = struct.pack('!20s20sqqqiIIiH', *payload)
except Exception as e:
print("there was an error: {0}".format(e))
return p_tosend
here's the code for the interpret + process function:
def interpret(self, timeout=10):
self.sock.settimeout(timeout)
print("got to interpret")
try:
response = self.sock.recv(10240)
print("answer recieved")
except socket.timeout:
print("no answer, try again")
raise TrackerResponseException("no answer", 0)
headers = response[:8]
payload = response[8:]
action, trans_id = struct.unpack('!ll', headers)
try:
trans = self.transactions[trans_id]
except KeyError:
raise TrackerResponseException("InvalidTransaction: id not found", trans_id)
try:
trans['response'] = self.process(action, payload, trans)
except Exception as e:
trans['response'] = None
print("error occured: {0}".format(e))
trans['completed'] = True
del self.transactions[trans_id]
#print(trans)
return trans
def process_announce(self, payload, trans):
response = {}
info = payload[:struct.calcsize("!lll")]
interval, leechers, seeders = struct.unpack("!lll", info)
print(interval, leechers, seeders, "noamsssssss")
peer_data = payload[struct.calcsize("!lll"):]
peer_size = struct.calcsize("!lH")
num_of_peers = int(len(peer_data) / peer_size)
print("the number of peers is: {0} and the peer data is: {1}".format(num_of_peers, peer_data))
print()
peers = []
for peer_offset in range(num_of_peers):
off = peer_size * peer_offset
peer = peer_data[off:off + peer_size]
addr, port = struct.unpack("!lH", peer)
peers.append({
'addr': socket.inet_ntoa(struct.pack('!L', addr)),
'port': port,
})
print(payload)
return dict(interval=interval, leechers=leechers, seeders=seeders, peers=peers)
I'm sorry if any of this is irrelevant, but I want to give you all of the code incase it tells you something.
(get_peer_id() returns a random peer id per the tracker protocol specification, and the get_transaction_id() returns random.randint(0, 1 << 32 - 1))
EDIT:
Alright, I've found the problem and now I'm feeling pretty dumb...
turns out even in the udp tracker whenever you send the info hash it has to be SHA1 encoded.
Hopefully this can help someone if they are stuck in the same problem :)

Related

browser doesn't response well to https proxy

I'v built my own https proxy and when ever I send some data to a browser the browser responses with nothing and also after a lot of time.
basically all the proxy should do is just forward the message to browser, get the response and forward back to the client
the code of the proxy:
import socket
import select
serverSock = socket.socket()
serverSock.bind(('0.0.0.0', 8080))
serverSock.listen(3)
waiting_clients = {} # client : browser
users_dict = {}
open_clients = {}
browsers_clients = {} # browser : client
threading.Thread(target=browserCom).start()
while True:
try:
rlist, wlist, xlist = select.select(list(users_dict.keys()) + [serverSock], [], [], 0.3)
except:
pass
else:
for current_socket in rlist:
if current_socket is serverSock:
# new client
client, address = serverSock.accept()
print(f'{address} - connected to proxy')
# add to dictionary
users_dict[client] = address
open_clients[address] = client
else:
# receive info
receiving = True
msg = bytearray()
while receiving:
try:
data = current_socket.recv(1024)
except Exception as e:
print(e, 3)
if current_socket in users_dict.keys():
disconnect(users_dict[current_socket])
else:
current_socket.close()
break
else:
msg.extend(data)
# got the full msg
if len(data) < 1024:
receiving = False
if len(msg) == 0:
if current_socket in users_dict.keys():
disconnect(users_dict[current_socket])
else:
print("GOT FROM CLIENT", msg)
if current_socket in waiting_clients.keys():
# sending the data from client to browser
waiting_clients[current_socket].send(msg)
else:
msg = msg.decode()
msgSplit = msg.split()
address = msgSplit[1]
if address.split(':')[1].isnumeric():
if msg.startswith('CONNECT'):
browserLink, browserPort = address.split(':')
browserPort = int(browserPort)
browserIP = socket.gethostbyname(browserLink)
address = (browserIP, browserPort)
# connect to the site
browserSocket = socket.socket()
print(address)
browserSocket.connect((browserIP, browserPort))
waiting_clients[current_socket] = browserSocket
browsers_clients[browserSocket] = current_socket
msg_ret = "HTTP/1.1 200 Connection established\r\n\r\n"
sendMsg(users_dict[current_socket], msg_ret)
m
The proxy is able to make the connection after the CONNECT and notify to the client but after I send to the browser I got from the data with a function running in the background:
def browserCom():
while True:
try:
rlist, wlist, xlist = select.select(list(browsers_clients.keys()), [], [], 0.3)
except:
pass
else:
for current_browser in rlist:
# receive data from the browser
receiving = True
resp_msg = bytearray()
while receiving:
try:
data = current_browser.recv(1024)
except Exception as e:
print(e)
del waiting_clients[browsers_clients[current_browser]]
current_browser.close()
browsers_clients[current_browser].close()
del browsers_clients[current_browser]
else:
resp_msg.extend(data)
# got the full msg
if len(data) < 1024:
receiving = False
print("RESPONSE FROM BROWSER", resp_msg)
# sending the msg to the client
sendMsg(users_dict[browsers_clients[current_browser]], resp_msg)
I need to wait a lot of time for the response and most of the responses come empty the responses are mostly bytearray(b'') and even when I get the response even though I sent the response back to the client:
# sending the msg to the client
sendMsg(users_dict[browsers_clients[current_browser]],resp_msg)
using this
def sendMsg(address, msg):
"""
:param ip: ip to send to
:param msg: msg to send
:return: sends the msg to the ip
"""
if address in open_clients.keys():
sock = open_clients[address]
if type(msg) == str:
msg = msg.encode()
try:
sock.send(msg)
except Exception as e:
print(e, 4)
disconnect(address)
I hope you are abled to understand my code, please if something is unclear ask me in the comments and I will try to help you understand as soon as possible
this is the best that I can do to keep the code minimal for this problem without removing crucial parts
My mistake was that I didn't understand that while tunneling both the browser and the client exchange messages, adding another select to all the browser helped me check all the data from all the browsers and now it works.
The code above is updated and works
basically what I added is :
def browserCom():
while True:
try:
rlist, wlist, xlist = select.select(list(browsers_clients.keys()), [], [], 0.3)
except:
pass
else:
for current_browser in rlist:
# receive data from the browser
receiving = True
resp_msg = bytearray()
while receiving:
try:
data = current_browser.recv(1024)
except Exception as e:
print(e)
del waiting_clients[browsers_clients[current_browser]]
current_browser.close()
browsers_clients[current_browser].close()
del browsers_clients[current_browser]
else:
resp_msg.extend(data)
# got the full msg
if len(data) < 1024:
receiving = False
# disconnecting browser
if resp_msg == bytearray(b''):
del waiting_clients[browsers_clients[current_browser]]
current_browser.close()
browsers_clients[current_browser].close()
del browsers_clients[current_browser]
print("RESPONSE FROM BROWSER", resp_msg)
# sending the msg to the client
if current_browser in browsers_clients and browsers_clients[current_browser] in users_dict:
sendMsg(users_dict[browsers_clients[current_browser]], resp_msg)

Only sometimes, I get _pickle.UnpicklingError: pickle data was truncated

i'm getting some occasional Unpickling errors, but more times than not, it works fine. Essentially I'm generating images on the server side, and using pickle to transmit them to the client side.
Essentially, I am using my send() function to let the client know how many bytes the pickled data is, so it can use rscSock.recv() with the amount of bytes+1 when I use conn.send(graphs) to prevent this exact thing. And it works, most of the time. Occasionally I'm getting pickle truncated, and I can't seem to find out why. I tried using a while loop to receive it in blocks of 4096 from code I found on here (python 3.6 socket pickle data was truncated), but it hangs on the recv. Not sure what to do.
Server Code:
elif cmd['cmd'] == 'RSC_VIEW_GRAPHS':
graphs = pickle.dumps(genGraphs(userSession['uid'], cmd['arg0'], cmd['arg1']))
send(conn, 'RSC_IMG_DATA', len(graphs))
conn.send(graphs)
del graphs
Client Code
send(rscSock, 'RSC_VIEW_GRAPHS', radioVar.get(), str(dateObj.date()))
resp = receive(rscSock)
if resp['resp'] == 'RSC_IMG_DATA':
graphs = pickle.loads(rscSock.recv(int(resp['arg0'])+1))
The graphs variable being fulfilled by the genGraphs() function is returning an array of BytesIO objects, as shown here by the end of the genGraphs() function:
imgs = []
for x in statDict:
# Filler Code removed, irrelevant to post
imgs.append(io.BytesIO())
plt.savefig(imgs[-1], format='png')
plt.close()
return imgs
And lastly, here are the send() and receive() functions for both the client and server:
Client
def send(conn, cmd, *argv):
try:
cmdObj = {'cmd': cmd}
y = 0
for x in argv:
cmdObj['arg'+str(y)] = x
y+=1
cmdObj['key'] = sessionKey
obj = str.encode(json.dumps(cmdObj))
objLen = str(len(obj)).encode()
if conn.send(objLen):
if conn.recv(12).decode() == "RSC_LEN_OK":
if conn.send(obj):
if debug == 1: print("Sending '", obj, "' with length '", objLen ,"'")
return True
return False
except (ConnectionResetError, ConnectionAbortedError):
if cmdObj['arg1'] == 0:
return True
else:
messagebox.showerror("Real Estate Stat Counter", "Lost server connection. Please log back in.")
return False
def receive(conn):
try:
dataSize = int(conn.recv(8))
if dataSize < 16384:
conn.send(str.encode("RSC_LEN_OK"))
data = json.loads(conn.recv(dataSize).decode())
if debug == 1: print("Received '", data, "' with length '", dataSize ,"'")
return data
else:
conn.send(str.encode("RSC_LEN_NO"))
return False
except (OSError, UnicodeDecodeError, json.decoder.JSONDecodeError) as e:
return False
Server
def send(conn, resp, *argv):
try:
respObj = {'resp': resp}
y = 0
for x in argv:
respObj["arg"+str(y)] = x
y+=1
obj = str.encode(json.dumps(respObj))
objLen = str(len(obj)).encode()
if conn.send(objLen):
if conn.recv(12).decode() == "RSC_LEN_OK":
if conn.send(obj):
if debug == 1: print("Sending '", obj, "' with length '", objLen ,"'")
return True
logging.log("WARN: send() failure")
return False
except ConnectionResetError:
logging.log("INFO: Client connection lost, terming socket")
conn.close()
return False
def receive(conn):
try:
dataSize = int(conn.recv(8))
if dataSize < 16384:
conn.send(str.encode("RSC_LEN_OK"))
data = json.loads(conn.recv(dataSize).decode())
if debug == 1: print("Received '", data, "' with length '", dataSize ,"'")
return data
else:
conn.send(str.encode("RSC_LEN_NO"))
return False
except (OSError, UnicodeDecodeError, json.decoder.JSONDecodeError) as e:
logging.log("WARN: receive() received raw data:", conn.recv(16384).decode())
logging.log("WARN: receive() exception:", e)
return False
except ValueError:
logging.log("WARN: receive() did not get a valid byte length first")
return False
So based on jasonharper's comment, I ended up revising only the client code:
data = []
while len(b"".join(data)) < int(resp['arg0']):
data.append(rscSock.recv(4096))
graphs = pickle.loads(b"".join(data))
Now it will check in a loop if it's received the amount of bytes that it was originally told before sending the BytesIO object array.
Working good so far!!

How to fix: "TypeError: 'bool' object is not subscriptable"

I am currently working with a basic client/server application and implementing a simple RSA / public-key authentication system. I have ran into this error and can not, for the life of me, figure it out.
I am using the latest version of python.
server.py
def getUserData(username):
global privateKeysFilename
filename = privateKeysFilename
with open(filename, "r") as keysFile:
for line in keysFile:
line = [token.rstrip("\n") for token in line.split(",")]
if(username == line[0]):
if DEBUG:
print("\n=== DEBUG\nUser data = %s\n===\n" %(line))
return line
return False
def running(self):
global BUFFER, DEBUG, start, final
while 1:
print('Waiting for a connection')
connection, client_address = self.server_socket.accept()
connection.send("Successful connection!".encode())
x = randint(start, final)
self.fx = function(x)
connection.send(str(x).encode())
try:
# Output that a client has connected
print('connection from', client_address)
write_connection()
# Set the time that the client connected
start_time = datetime.datetime.now()
# Loop until the client disconnects from the server
while 1:
# Receive information from the client
userData = connection.recv(BUFFER)
#data = connection.recv(1024).decode()
if(userData != "0"):
#define split character
ch = ","
userData = userData.split(ch.encode())
username = userData[0]
r = int(userData[1])
userData = getUserData(username)
e, n = int(userData[1]), int(userData[2])
y = modularPower(r, e, n)
if DEBUG:
print("=== DEBUG\ne = %d\nn = %d\nr = %d\ny = %d\n===\n" %(e, n, r, y))
if(self.fx == y):
#if authentication passed
connection.send("Welcome!!!".encode())
else:
connection.send("Failure!!!".encode())
if (userData != 'quit') and (userData != 'close'):
print('received "%s" ' % userData)
connection.send('Your request was successfully received!'.encode())
write_data(userData)
# Check the dictionary for the requested artist name
# If it exists, get all their songs and return them to the user
if userData in self.song_dictionary:
songs = ''
for i in range(len(self.song_dictionary.get(userData))):
songs += self.song_dictionary.get(userData)[i] + ', '
songs = songs[:-2]
print('sending data back to the client')
connection.send(songs.encode())
print("Sent", songs)
# If it doesn't exist return 'error' which tells the client that the artist does not exist
else:
print('sending data back to the client')
connection.send('error'.encode())
else:
# Exit the while loop
break
# Write how long the client was connected for
write_disconnection(start_time)
except socket.error:
# Catch any errors and safely close the connection with the client
print("There was an error with the connection, and it was forcibly closed.")
write_disconnection(start_time)
connection.close()
data = ''
finally:
if data == 'close':
print('Closing the connection and the server')
# Close the connection
connection.close()
# Exit the main While loop, so the server does not listen for a new client
break
else:
print('Closing the connection')
# Close the connection
connection.close()
# The server continues to listen for a new client due to the While loop
and here is the output with error:
Traceback <most recent call last>:
File "server.py", line 165, in running
e, n = int(userData[1]), int(userData[2])
TypeError: 'bool' object is not subscriptable
Any help would be greatly appreciated! :)
By using userData[n] you are trying to access the nth element in a subscriptable object.
This can be a list, dict, tuple or even a string.
The error you see means that your object userData is neither of the previous mentioned types, and it's a bool ( True or False )
Since it's the result of calling the function getUserData(), I recommend you check the return type of this function and make sure it's of the mentioned types and revise your code logic.
[Update]
By checking the function getUserData() it looks it only returns line if the username is included, if not it returns False which is not handled in the main code.
I suggest this edit to the code to inlclude success status to the return value as follows.
def getUserData(username):
global privateKeysFilename
filename = privateKeysFilename
with open(filename, "r") as keysFile:
for line in keysFile:
line = [token.rstrip("\n") for token in line.split(",")]
if(username == line[0]):
if DEBUG:
print("\n=== DEBUG\nUser data = %s\n===\n" %(line))
return True, line
return False, None
And then in your code when you call getUserData() you check for the success first before parsing data like this
userData = getUserData(username)
if userData [0]:
e, n = int(userData[1]), int(userData[2])
y = modularPower(r, e, n)
else:
# Your failure condition

PYZMQ not receiving message between Python 2.7 and Python 3.5

It seems that the byte array and str types between python 2.7 and python 3.5 is an issue for pyzmq PUB/SUB.
I have to pub/sub brokers one in python 2.7 and the other in python 3.5.
I have a subscriber that subscribes to both pub/sub brokers but it does not receive all published messages.
How do I get my pub/sub brokers to subscribe and re-publish all messages publish on there IP:Port?
sample code:
def subscribeformessages(self):
context = zmq.Context(1)
xsub = context.socket(zmq.SUB)
xsub_url = "tcp://%s:%s" % (self.ipaddress, self.xsub_url)
xsub.setsockopt_string(zmq.SUBSCRIBE, '')
xsub.setsockopt(zmq.SUBSCRIBE, b'')
if not is_py2:
xsub.setsockopt_string(zmq.SUBSCRIBE, "")
else:
xsub.setsockopt(zmq.SUBSCRIBE, "")
xsub.setsockopt(zmq.SUBSCRIBE, b"")
xsub.setsockopt_unicode(zmq.SUBSCRIBE, u"", encoding='utf-8')
xsub.setsockopt_string(zmq.SUBSCRIBE, u"")
xsub.connect(xsub_url)
try:
while self.running:
try:
time.sleep(.2)
receive = xsub.recv_multipart()
self.print_message_queue.put("sub recv\'d: %s" % receive)
self.pub_local_que.put(receive)
self.publish_queue.put(receive)
except zmq.ZMQError as err:
print(err)
....
Publisher sample:
def sendtopicerequesttoexchange(self):
context = zmq.Context(1).instance()
sock = context.socket(zmq.PUB)
sock.linger = 0
try:
sock.bind("tcp://ip:port")
except:
sock.connect("tcp://ip:port")
topicxml = xmltree.Element("MessageXML")
topicxml.attrib['NodeAddr'] = '040000846'
topicxml.attrib['Payload'] = 'HeyBob'
replymsg = xmltree.tostring(topicxml)
msg = [str.encode("send.downlink"), str(replymsg).encode('utf-8')]
msg[0] = str(msg[0]).encode('utf-8')
try:
count = 0
while True:
time.sleep(4)
sock.send_multipart(msg)
print("msg %s" %(msg))
count += 1
if count > 1:
break
time.sleep(.2)
except Exception as bob:
print(bob)
finally:
time.sleep(5)
sock.setsockopt(zmq.LINGER, 0)
sock.close()
Any Ideas?
I found the an answer here: http://pyzmq.readthedocs.io/en/latest/pyversions.html
I changes the publications to:
def sendtoexchange_pete(self):
context = zmq.Context(1).instance()
sock = context.socket(zmq.PUB)
sock.linger = 0
try:
sock.bind("tcp://ip:port")
except:
sock.connect("tcp://ip:port")
topicxml = xmltree.Element("Downlink_75")
topicxml.attrib["NodeAddr"] = "$301$0-0-0-040000846"
topicxml.attrib["Payload"] = "HeyBob Pete\'s Xchnge 75"
replymsg = xmltree.tostring(topicxml)
# By changing to force bytes I was able to get the to work
msg = [b'send.downlink', b'%s' % replymsg]
try:
count = 0
while True:
time.sleep(4)
sock.send_multipart(msg)
print("msg %s" %(msg))
.....

detect disconnect persistant curl connection

Where should I check for a disconnect in a pycurl persistant connection?
Somewhere in my script the connection is dying/timing out/throwing an error but the script stays open. I need to detect the problem so I can restart the script.
We are connecting to gnip (a social media data provider)
My code is here: https://gist.github.com/3353033
I've read over the options for libcurl and I read through the php curl_setopts docs because they also leverage libcurl.
class Client:
time_start = time.time()
content = ""
def __init__(self,options):
self.options = options
self.buffer = ""
self.conn = pycurl.Curl()
self.conn.setopt(pycurl.USERPWD, "%s:%s" % (USER, PASS))
self.conn.setopt(pycurl.ENCODING,'gzip')
self.conn.setopt(pycurl.URL, STREAM_URL)
self.conn.setopt(pycurl.WRITEFUNCTION, self.on_receive)
self.conn.setopt(pycurl.FOLLOWLOCATION,1)
self.conn.setopt(pycurl.MAXREDIRS, 5)
self.conn.setopt(pycurl.COOKIEFILE,"cookie.txt")
try:
self.conn.perform()
except Exception,e:
print e.message
def on_receive(self, data):
self.buffer += data
if data.endswith("\r\n") and self.buffer.strip():
if(self.triggered()):
if(len(self.buffer) != 0 ):
try:
SaveThread(self.buffer).start()
except Exception, e:
print "something i commented would have told you there was an error"
system.exit(1)
self.buffer = ""
def triggered(self):
# First trigger based on size then based on time..
if (len(self.buffer) > SAVE_FILE_LENGTH):
return True
time_end = time.time()
if (((time_end - self.time_start) > ROLL_DURATION)): #for the time frame
self.time_start=time.time()
return True
return False
edit: i've fixed the gist
In the above code system.exit(1) should be sys.exit(1) right?
Other than that do you have any more bare except clauses that might be catching the SystemExit exception raised by sys.exit(1)?

Categories