I have the example echo server
import asyncio
class EchoServer(asyncio.Protocol):
def connection_made(self, transport):
peername = transport.get_extra_info('peername')
print('connection from {}'.format(peername))
self.transport = transport
def data_received(self, data):
self.transport.write(data)
# Client piece goes here
loop = asyncio.get_event_loop()
coro = loop.create_server(EchoServer, '127.0.0.1', 8888)
server = loop.run_until_complete(coro)
print('serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
print("exit")
finally:
server.close()
loop.close()
What I'm trying to do is add a client piece where I've commented that will connect to a new server and send the data off that-a-way. There's the echo client, but I need a process that looks like this:
+-----------+ +-----------+ +--------------+
| My Server | | My Client | | Other Server |
+-----------+ +-----------+ +--------------+
| | |
===>Get some data | |
| | |
Send data ---------->| |
| | |
| Send data ----------->|
| | |
| | Do Stuff
| | |
| | <-----------Send Data
| | |
| <--------- Send data |
| | |
<=== Send data | |
| | |
| | |
| | |
| | |
Obviously I can do this synchronously, but I'm trying to make the client -> other server bit async, and I'm not really figuring out how to use the asyncio methods to communicate between my server piece and a client piece.
What do I need to do here?
Here is a simple proxy which allow you to wget 127.0.0.1:8888 and get a html response from google:
import asyncio
class Client(asyncio.Protocol):
def connection_made(self, transport):
self.connected = True
# save the transport
self.transport = transport
def data_received(self, data):
# forward data to the server
self.server_transport.write(data)
def connection_lost(self, *args):
self.connected = False
class Server(asyncio.Protocol):
clients = {}
def connection_made(self, transport):
# save the transport
self.transport = transport
#asyncio.coroutine
def send_data(self, data):
# get a client by its peername
peername = self.transport.get_extra_info('peername')
client = self.clients.get(peername)
# create a client if peername is not known or the client disconnect
if client is None or not client.connected:
protocol, client = yield from loop.create_connection(
Client, 'google.com', 80)
client.server_transport = self.transport
self.clients[peername] = client
# forward data to the client
client.transport.write(data)
def data_received(self, data):
# use a task so this is executed async
asyncio.Task(self.send_data(data))
#asyncio.coroutine
def initialize(loop):
# use a coroutine to use yield from and get the async result of
# create_server
server = yield from loop.create_server(Server, '127.0.0.1', 8888)
loop = asyncio.get_event_loop()
# main task to initialize everything
asyncio.Task(initialize(loop))
# run
loop.run_forever()
Related
I have been trying to figure out context manager more and more and the more I am into it the more problem I seem to find. My current problem is that I have currently no lock which could cause that two or more threads could end up having the same shared value as I only want one value to be in used.
import random
import threading
import time
list_op_proxy = [
"https://123.123.12.21:12345",
"http://123.123.12.21:54321",
]
proxy_dict = dict(zip(list_op_proxy, ['available'] * len(list_op_proxy)))
proxy_dict['http://123.123.12.21:987532'] = "busy"
class AvailableProxies:
def __enter__(self):
while True:
available = [att for att, value in proxy_dict.items() if "available" in value]
if available:
self.proxy = random.choice(available)
proxy_dict[self.proxy] = "busy"
return self.proxy
else:
continue
def __exit__(self, exc_type, exc_val, exc_tb):
proxy_dict[self.proxy] = "available"
def handler(name):
with AvailableProxies() as proxy:
print(f"{name} | Proxy in use: {proxy}")
# Adding 2 seconds as we want to see if it actually wait for the availability
time.sleep(2)
for i in range(5):
threading.Thread(target=handler, args=(f'Thread {i}',)).start()
as you can see in my context manager I want to random loop through a dict key:value that has the value set to available and if it is available then we set it to busy -> do some stuff and then exit it (release by settings the same value to available) - However my problem is that in rare cases it seems like more than 2 threads are able to get the same proxy which I want to block, I want only one thread to be able to access the context manager at the time so we can set the proxy value to busy so no other threads can take it.
How can I lock so only one thread can set the proxy to busy so it doesnt happend that two or more threads set busy on the same proxy?
You just need to lock when looking for a proxy and release the lock after a proxy was found (usage is the same as in your previous question, no matter if you are using a context manager), I just added some more debug messages:
import random
import threading
import time
list_op_proxy = [
"https://123.123.12.21:12345",
"http://123.123.12.21:54321",
]
proxy_dict = dict(zip(list_op_proxy, ['available'] * len(list_op_proxy)))
proxy_dict['http://123.123.12.21:987532'] = "busy"
proxy_lock = threading.Lock()
class AvailableProxies:
def __enter__(self):
proxy_lock.acquire()
self.proxy = None
while not self.proxy:
available = [
att for att, value in proxy_dict.items() if "available" in value
]
if available:
print('%d proxies available' % len(available))
self.proxy = random.choice(available)
proxy_dict[self.proxy] = "busy"
break
else:
print("Waiting ... not proxy available")
time.sleep(.2)
continue
proxy_lock.release()
return self.proxy
def __exit__(self, exc_type, exc_val, exc_tb):
proxy_dict[self.proxy] = "available"
def handler(name):
with AvailableProxies() as proxy:
print(f"{name} | Proxy in use: {proxy}")
# Adding 2 seconds as we want to see if it actually wait for the availability
time.sleep(.1)
for j in range(5):
threads = [threading.Thread(target=handler, args=(i, )) for i in range(3)]
[t.start() for t in threads]
[t.join() for t in threads]
print("---")
Out:
2 proxies available
0 | Proxy in use: http://123.123.12.21:54321
1 proxies available
1 | Proxy in use: https://123.123.12.21:12345
Waiting ... not proxy available
2 proxies available
2 | Proxy in use: https://123.123.12.21:12345
---
2 proxies available
0 | Proxy in use: http://123.123.12.21:54321
1 proxies available
1 | Proxy in use: https://123.123.12.21:12345
Waiting ... not proxy available
2 proxies available
2 | Proxy in use: http://123.123.12.21:54321
---
2 proxies available
0 | Proxy in use: https://123.123.12.21:12345
1 proxies available
1 | Proxy in use: http://123.123.12.21:54321
Waiting ... not proxy available
2 proxies available
2 | Proxy in use: https://123.123.12.21:12345
---
2 proxies available
0 | Proxy in use: https://123.123.12.21:12345
1 proxies available
1 | Proxy in use: http://123.123.12.21:54321
Waiting ... not proxy available
2 proxies available
2 | Proxy in use: https://123.123.12.21:12345
---
2 proxies available
0 | Proxy in use: http://123.123.12.21:54321
1 proxies available
1 | Proxy in use: https://123.123.12.21:12345
Waiting ... not proxy available
2 proxies available
2 | Proxy in use: http://123.123.12.21:54321
---
I am trying to receive data using mosquitto and save it as csv file using python pandas. The data is continuos until I stop the script.
mqtt_pub.py
import paho.mqtt.client as mqtt
import random
import schedule
import time
mqttc = mqtt.Client("python_pub")
mqttc.connect("localhost", 1883)
def job():
mqttc.publish("hello/world", random.randint(1, 10))
schedule.every(1).seconds.do(job)
while True:
schedule.run_pending()
time.sleep(1)
mqttc.loop(2)
mqtt_sub.py
import paho.mqtt.client as mqtt
import pandas as pd
def on_connect(client, userdata, rc):
print("Connected with result code "+str(rc))
client.subscribe("hello/world")
def on_message(client, userdata, msg):
datas = map(int, msg.payload)
for num in datas:
df = pd.DataFrame(data=datas, columns=['the_number'])
df.to_csv("testing.csv")
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("localhost", 1883, 60)
client.loop_forever()
from above mqtt_sub.py script, I get testing.csv that looks like this
| the _number
0 | 2
2 is the last digit that I receive before I stop the mqtt_sub.py script
Connected with result code 0
[3]
[9]
[5]
[3]
[7]
[2]
...
...
KeyboardInterrupt
I was hoping to get testing.csv like this
| the_number
0 | 3
1 | 9
2 | 5
...
...
5 | 2
To achieve that I try to change the following df = pd.DataFrame(data=datas, columns=['the_number']) to df = pd.DataFrame(data=num, columns=['the_number']) and the following error occured
pandas.core.common.PandasError: DataFrame constructor not properly called!
Do anyone have any idea how to solve the error? I also feel that I did not use the for loop properly in here.
Thank you for your suggestion and help.
[UPDATE]
I add/change the following line in on_message method
def on_message(client, userdata, msg):
datas = map(int, msg.payload)
df = pd.DataFrame(data=datas, columns=['the_number'])
f = open("test.csv", 'a')
df.to_csv(f)
f.close()
With help from Nulljack, I am able to get the result like this in my CSV file
| the_number
0 | 3
| the_number
0 | 9
| the_number
0 | 5
| the_number
0 | 3
| the_number
0 | 7
My goal is to achieve something like this in the CSV file
| the_number
0 | 3
1 | 9
2 | 5
3 | 3
4 | 7
Having never used mosquitto before I apologize if i my understanding is wrong.
It seems to me like the on_message method in your mqtt_sub.py is run every time your mqtt_pub.py publishes a message (ie every one second) this would cause your testing.csv file to be overwritten every time you publish a message
to fix this I would initialize a dataframe in your on_connect method and then in the on_message add the new value to the dataframe via df.append
as for writing to csv after you terminate, I am unsure.
Hope this helps
other thread was crowded so I moved my response here
try using the bellow code
import paho.mqtt.client as mqtt
import pandas as pd
# Move df here
df = pd.DataFrame(columns=['the_number'])
def on_connect(client, userdata, rc):
print("Connected with result code "+str(rc))
client.subscribe("hello/world")
def on_message(client, userdata, msg):
datas = map(int, msg.payload)
# this adds the data to the dataframe at the correct index
df.iloc[df.size] = datas
# I reverted this line back to what you originally had
# This will overwrite the testing.csv file every time your subscriber
# receives a message, but since the dataframe is formatted like you want
# it shouldn't matter
df.to_csv("testing.csv")
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("localhost", 1883, 60)
client.loop_forever()
I downloaded and ran the fitnesse-standalone to test how does the SLIM protocol work.
Below is the directory structure
/Users
|
-redmont
|
-fitnesse-standalone.jar
|
-Calc.py
|
-FitNesseRoot/
FitNesse Wiki
!contents -R2 -g -p -f -h
!This is a test page
!define TEST_SYSTEM {slim}
!define SLIM_VERSION {0.1}
!path /Users/redmont/Calc.py
!path /Users/redmont/fitnesse-standalone.jar
!define COMMAND_PATTERN {python -m waferslim.server --syspath 8080}
|import|
|waferslim.examples.decision_table|
|Calc.MyCalc|
|my calc|
|A |B |multiply? |
|1 |2 |2 |
|1 |0 |0 |
|3 |5 |15 |
Calc.py
from waferslim.converters import convert_arg, convert_result, YesNoConverter
class MyCalc(object):
"""
Base test class
"""
def __init__(self):
"""
Initialise instance variables a and b to multiply
"""
self._A = 0
self._B = 0
self._multiply = 0
#convert_arg(to_type=int)
def setA(self, A):
"""
Decorated method to the variable 'a' as an int.
The decorator uses the implicitly registered int converter to
translate from a standard slim string value to an int.
"""
self._A = A
#convert_arg(to_type=int)
def setB(self, B):
self._B = B
#convert_result(to_type=str)
def multiply(self):
return self._A * self._B
I start the fitnesse-standalone using
java -jar fitnesse-standalone.jar -p 8080 -v
The logs in the terminal are
Socket class: class java.net.Socket
Remote address = /0:0:0:0:0:0:0:1:57853
Local socket address = /0:0:0:0:0:0:0:1:8080
Closed = false
Connected = true
Bound = true
isInputShutdown = false
isOutputShutdown = false
Creating plain socket on port: 0
Trying to connect to host: localhost on port: 57859 SSL=false timeout setting: 10
Creating plain client: localhost:57859
Socket class: class java.net.Socket
Connected to host: localhost on port: 57859 SSL=false timeout setting: 10
Remote address = /127.0.0.1:57860
Local socket address = /127.0.0.1:57859
Closed = false
Connected = true
Bound = true
isInputShutdown = false
isOutputShutdown = false
Read Slim Header: >Slim -- V0.4<
Got Slim Header: Slim -- V0.4, and Version 0.4
When I run this fixture in FitNesse with debug, I get the following error:
Could not invoke constructor for MyCalc[0]
1 The instance decisionTable_1.setA. does not exist
2 The instance decisionTable_1.setB. does not exist
2 The instance decisionTable_1.multiply. does not exist
I am unable to understand why isn't FitNesse able to find my fixture code?
Is it because of some permission?
I have also installed waferslim using its egg from cheese shop.
Reinstalling waferslim somehow solved my problem.
How do I create a health check for route53 using python boto? There are no examples and documentation is very lacking http://boto.readthedocs.org/en/latest/ref/route53.html
So.if given ip address, port, and path.....then what?
Even though boto documentation is lacking, you can understand it by the code and AWS API.
Take a look at boto.route53.healthcheck.HealthCheck and implement as
route = boto.connect_route53()
hc = boto.route53.healthcheck.HealthCheck(...)
route = create_health_check(hc)
... will be filled out by the help page of HealthCheck:
Help on HealthCheck in module boto.route53.healthcheck object:
class HealthCheck(__builtin__.object)
| An individual health check
|
| Methods defined here:
|
| __init__(self, ip_addr, port, hc_type, resource_path, fqdn=None, string_match=None, request_interval=30, failure_threshold=3)
| HealthCheck object
|
| :type ip_addr: str
| :param ip_addr: IP Address
|
| :type port: int
| :param port: Port to check
|
| :type hc_type: str
| :param ip_addr: One of HTTP | HTTPS | HTTP_STR_MATCH | HTTPS_STR_MATCH | TCP
|
| :type resource_path: str
| :param resource_path: Path to check
|
| :type fqdn: str
| :param fqdn: domain name of the endpoint to check
|
| :type string_match: str
| :param string_match: if hc_type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string to search for in the response body from the specified resource
|
| :type request_interval: int
| :param request_interval: The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.
|
| :type failure_threshold: int
| :param failure_threshold: The number of consecutive health checks that an endpoint must pass or fail for Amazon Route 53 to change the current status of the endpoint from unhealthy to healthy or vice versa.
|
| to_xml(self)
|
| ----------------------------------------------------------------------
| Data descriptors defined here:
|
| __dict__
| dictionary for instance variables (if defined)
|
| __weakref__
| list of weak references to the object (if defined)
|
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|
| POSTXMLBody = '\n <HealthCheckConfig>\n <IPAddr...il...
|
| XMLFQDNPart = '<FullyQualifiedDomainName>%(fqdn)s</FullyQualifiedDomai...
|
| XMLRequestIntervalPart = '<RequestInterval>%(request_interval)d</Reque...
|
| XMLStringMatchPart = '<SearchString>%(string_match)s</SearchString>'
|
| valid_request_intervals = (10, 30)
i have a sniff/log file for VoIP/SIP generated by python scapy in format
time | src | srcport | dst | dstport | payload
the sniff python script looks like this:
## Import Scapy module
from scapy.all import *
import sys
sys.stdout = open('data.txt', 'w')
pkts = sniff(filter="udp and port 5060 and not port 22", count=0,prn=lambda x:x.sprintf("%sent.time% | %IP.src% | %IP.sport% | %IP.dst% | %IP.dport% | Payload {Raw:%Raw.load%\n}"))
each packet in one line and each line can have different size depends on SIP message type (Register, 200 OK, Invite, Notify and so on...)
What i would like to get from the file are fields
time, src, srcport, dst, dstport and from Payload type (just right after Payload) of SIP message, From, To, Call-iD, Contact
and the whole payload and then prepare these to insert into MySQL database.
1st msg:
07:57:01.894990 | 192.168.1.10 | 5060 | 192.168.1.1 | 5060 | Payload 'INVITE sip:210#test-lab.org SIP/2.0\r\nVia:
SIP/2.0/UDP 192.168.1.10:5060;rport;branch=z9hG4bK-9cbb0ba8\r\nRoute: <sip:192.168.1.1:5060;lr>\r\nFrom: "test-311" <sip:311#test-lab.org>;tag=3d13bd6f\r\n
To: <sip:210#test-lab.org>\r\nCall-ID: 21b0e2c755973976d6d06702ca33b32f#10.193.40.249\r\nCSeq: 1 INVITE\r\n
Contact: "test-311" <sip:311#192.168.1.10:5060;transport=UDP>\r\nMax-Forwards: 70\r\n
Supported: 100rel,replaces\r\nAllow: ACK, BYE, CANCEL, INFO, INVITE, OPTIONS, NOTIFY, PRACK, REFER, UPDATE, MESSAGE\r\nContent-Type: application/sdp\r\nContent-Length: 276\r\n\r\nv=0\r\no=- 3506863524 285638052 IN IP4 192.168.1.10\r\ns=-\r\nc=IN IP4 192.168.1.10\r\nt=0 0\r\nm=audio 8000 RTP/AVP 8 0 18 101\r\nc=IN IP4 192.168.1.10\r\na=rtpmap:8 PCMA/8000\r\na=rtpmap:0 PCMU/8000\r\na=rtpmap:18 G729/8000\r\na=rtpmap:101 telephone-event/8000\r\na=fmtp:101 0-15\r\na=ptime:20\r\n'
2nd msg:
07:57:01.902618 | 192.168.1.1 | 5060 | 192.168.1.10 | 5060 | Payload 'SIP/2.0 100 Trying\r\nVia: SIP/2.0/UDP 192.168.1.10:5060;received=192.168.1.10;branch=z9hG4bK-9cbb0ba8;rport=5060\r\nFrom: "test-311" <sip:+38551311#test-lab.org>;tag=3d13bd6f\r\nTo: <sip:210#test-lab.org>\r\nCall-ID: 21b0e2c755973976d6d06702ca33b32f#192.168.1.10\r\nCSeq: 1 INVITE\r\n\r\n'
I have tried to read line by line and split but I do not know how to split and take data from payload part.
Any help is more then welcome.
Well, you can enter the data into mysql straight from this program too; it might very well be the easiest approach.
from scapy.all import *
import sys
# connect to mysql
connection = ...
def insert_into_mysql(packet):
# now you can use packet.src, packet.sport, packet.dst, packet.dport, and
# I believe packet['Raw'].load
connection.execute(...)
# to not print the packet
return None
# to print the packet
return x.sprintf("%sent.time% | %IP.src% | %IP.sport% | %IP.dst% | %IP.dport% | Payload {Raw:%Raw.load%\n}"
pkts = sniff(filter="udp and port 5060", count=0, store=0, prn=insert_into_mysql)
But if you need to use the existing log, I think you need to use:
for line in open('log.txt'):
sent_time, src, sport, dst, dport, payload = line.split(' | ', 6)
payload = payload.replace('Payload ', '')
# to get the unquoted payload, I'd guess (can't test SIP though)
from ast import literal_eval
payload = literal_eval(payload)
from scapy.all import *
import MySQLdb, string, sys
def insert_into_mysql(packet):
db = MySQLdb.connect("localhost","test","testpwd","my_db" )
cursor = db.cursor()
# now you can use packet.src, packet.sport, packet.dst, packet.dport,
# and packet['Raw'].load
add_sip = ("INSERT INTO py_sniff "
"(time, src_ip, src_port, dst_ip, dst_port, message) "
"VALUES (%s, %s, %s, %s, %s, %s)")
# data from sniff
add_sip = {
'time': packet.sprintf("%sent.time%"),
'src_ip': packet.sprintf("%IP.src%"),
'src_port': packet.sprintf("%IP.sport%"),
'dst_ip': packet.sprintf("%IP.dst%"),
'dst_port': packet.sprintf("%IP.dport%"),
'message': packet.sprintf("{Raw:%Raw.load%}"),
}
# to print the packet
# return packet.sprintf("%sent.time% | %IP.src% | %IP.sport% | %IP.dst% | %IP.dport% | Payload {Raw:%Raw.load%\n}"
cursor.execute(add_sip)
db.commit()
pkts = sniff(iface="eth0", filter="udp and port 5060", count=0, store=0, prn=insert_into_mysql)