based on following code example of a simple historical data request and the Python API example provided by Bloomberg I constructed the bdh function below which works fine when directly called from ipython (see the testing lines after the function definition).
import blpapi
import pandas as pd
import datetime as dt
from optparse import OptionParser
def parseCmdLine():
parser = OptionParser(description="Retrieve reference data.")
parser.add_option("-a",
"--ip",
dest="host",
help="server name or IP (default: %default)",
metavar="ipAddress",
default="localhost")
parser.add_option("-p",
dest="port",
type="int",
help="server port (default: %default)",
metavar="tcpPort",
default=8194)
(options, args) = parser.parse_args()
return options
def bdh(secList, fieldList,startDate,endDate=dt.date.today().strftime('%Y%m%d'),periodicity='Daily'):
""" Sends a historical request to Bloomberg.
Returns a panda.Panel object.
"""
options = parseCmdLine()
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost(options.host)
sessionOptions.setServerPort(options.port)
print "Connecting to %s:%s" % (options.host, options.port)
# Create a Session
session = blpapi.Session(sessionOptions)
# Start a Session
if not session.start():
print "Failed to start session."
return
try:
# Open service to get historical data from
if not session.openService("//blp/refdata"):
print "Failed to open //blp/refdata"
return
# Obtain previously opened service
refDataService = session.getService("//blp/refdata")
# Create and fill the requestuest for the historical data
request = refDataService.createRequest("HistoricalDataRequest")
for s in secList:
request.getElement("securities").appendValue(s)
for f in fieldList:
request.getElement("fields").appendValue(f)
request.set("periodicityAdjustment", "ACTUAL")
request.set("periodicitySelection", "DAILY")
request.set("startDate", startDate)
request.set("endDate", endDate)
print "Sending Request:", request
# Send the request
session.sendRequest(request)
# Process received events
response={}
while(True):
# We provide timeout to give the chance for Ctrl+C handling:
ev = session.nextEvent(500)
if ev.eventType() == blpapi.Event.RESPONSE or ev.eventType() == blpapi.Event.PARTIAL_RESPONSE:
for msg in ev:
secData = msg.getElement('securityData')
name = secData.getElement('security').getValue()
response[name] = {}
fieldData = secData.getElement('fieldData')
for i in range(fieldData.numValues()):
fields = fieldData.getValue(i)
for n in range(1, fields.numElements()):
date = fields.getElement(0).getValue()
field = fields.getElement(n)
try:
response[name][field.name()][date] = field.getValue()
except KeyError:
response[name][field.name()] = {}
response[name][field.name()][date] = field.getValue()
if ev.eventType() == blpapi.Event.RESPONSE:
# Response completly received, so we could exit
break
#converting the response to a panda pbject
tempdict = {}
for r in response:
td = {}
for f in response[r]:
td[f] = pd.Series(response[r][f])
tempdict[r] = pd.DataFrame(td)
data = pd.Panel(tempdict)
finally:
# Stop the session
session.stop()
return(data)
#------------------------------------------------------------
secList = ['SP1 Index', 'GC1 Comdty']
fieldList = ['PX_LAST']
beg = (dt.date.today() - dt.timedelta(30)).strftime('%Y%m%d')
testData = bdh.bdh(secList,fieldList,beg)
testData = testData.swapaxes('items','minor')
print(testData['PX_LAST'])
However, when I try to run exactly the same example (see the lines after the bdh function definition) from ipython notebook, then I get following error:
SystemExit Traceback (most recent call last)
<ipython-input-6-ad6708eabe39> in <module>()
----> 1 testData = bbg.bdh(tickers,fields,begin)
2 #testData = testData.swapaxes('items','minor')
3 #print(testData['PX_LAST'])
C:\Python27\bbg.py in bdh(secList, fieldList, startDate, endDate, periodicity)
33 """
34
---> 35 options = parseCmdLine()
36
37 # Fill SessionOptions
C:\Python27\bbg.py in parseCmdLine()
24 default=8194)
25
---> 26 (options, args) = parser.parse_args()
27
28 return options
C:\Python27\lib\optparse.pyc in parse_args(self, args, values)
1400 stop = self._process_args(largs, rargs, values)
1401 except (BadOptionError, OptionValueError), err:
-> 1402 self.error(str(err))
1403
1404 args = largs + rargs
C:\Python27\lib\optparse.pyc in error(self, msg)
1582 """
1583 self.print_usage(sys.stderr)
-> 1584 self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
1585
1586 def get_usage(self):
C:\Python27\lib\optparse.pyc in exit(self, status, msg)
1572 if msg:
1573 sys.stderr.write(msg)
-> 1574 sys.exit(status)
1575
1576 def error(self, msg):
SystemExit: 2
My understanding is that the options needed to connect to Bloomberg work fine if I call the bdh function from a local ipython session but are wrong if bdh is called from the kernel notebook initiates???
Hope to get some help, thanks a lot in advance.
When you call parseCmdLine(), it looks at sys.argv, which is probably not what you're expecting.
What about this?
def parseCmdLine():
parser = OptionParser(description="Retrieve reference data.")
parser.add_option("-a",
"--ip",
dest="host",
help="server name or IP (default: %default)",
metavar="ipAddress",
default="localhost")
parser.add_option("-p",
dest="port",
type="int",
help="server port (default: %default)",
metavar="tcpPort",
default=8194)
(options, args) = parser.parse_args()
return options
def bdh(secList, fieldList,startDate,endDate=dt.date.today().strftime('%Y%m%d'),periodicity='Daily', host='localhost', port=8194):
""" Sends a historical request to Bloomberg.
Returns a panda.Panel object.
"""
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost(host)
sessionOptions.setServerPort(port)
...
if __name__ == '__main__':
options = parseCmdLine()
secList = ['SP1 Index', 'GC1 Comdty']
fieldList = ['PX_LAST']
beg = (dt.date.today() - dt.timedelta(30)).strftime('%Y%m%d')
testData = bdh.bdh(secList,fieldList,beg, host=options.host, port=options.port)
testData = testData.swapaxes('items','minor')
print(testData['PX_LAST'])
Related
Need some help to set the configuration for sasl.mechanism PLAIN (API) and GSSAPI (Kerberos) authentication.
We are using confluent Kafka here, there are two scripts, one a python script and the second one is a bash script which calls the python one. You can find the script below.
Thanks for the help in advance!
import json
import os
import string
import random
import socket
import uuid
import re
from datetime import datetime
import time
import hashlib
import math
import sys
from functools import cache
from confluent_kafka import Producer, KafkaError, KafkaException
topic_name = os.environ['TOPIC_NAME']
partition_count = int(os.environ['PARTITION_COUNT'])
message_key_template = json.loads(os.environ['KEY_TEMPLATE'])
message_value_template = json.loads(os.environ['VALUE_TEMPLATE'])
message_header_template = json.loads(os.environ['HEADER_TEMPLATE'])
bootstrap_servers = os.environ['BOOTSTRAP_SERVERS']
perf_counter_batch_size = int(os.environ.get('PERF_COUNTER_BATCH_SIZE', 100))
messages_per_aggregate = int(os.environ.get('MESSAGES_PER_AGGREGATE', 1))
max_message_count = int(os.environ.get('MAX_MESSAGE_COUNT', sys.maxsize))
def error_cb(err):
""" The error callback is used for generic client errors. These
errors are generally to be considered informational as the client will
automatically try to recover from all errors, and no extra action
is typically required by the application.
For this example however, we terminate the application if the client
is unable to connect to any broker (_ALL_BROKERS_DOWN) and on
authentication errors (_AUTHENTICATION). """
print("Client error: {}".format(err))
if err.code() == KafkaError._ALL_BROKERS_DOWN or \
err.code() == KafkaError._AUTHENTICATION:
# Any exception raised from this callback will be re-raised from the
# triggering flush() or poll() call.
raise KafkaException(err)
def acked(err, msg):
if err is not None:
print("Failed to send message: %s: %s" % (str(msg), str(err)))
producer_configs = {
'bootstrap.servers': bootstrap_servers,
'client.id': socket.gethostname(),
'error_cb': error_cb
}
# TODO: Need to support sasl.mechanism PLAIN (API) and GSSAPI (Kerberos) authentication.
# TODO: Need to support truststores for connecting to private DCs.
producer = Producer(producer_configs)
# generates a random value if it is not cached in the template_values dictionary
def get_templated_value(term, template_values):
if not term in template_values:
template_values[term] = str(uuid.uuid4())
return template_values[term]
def fill_template_value(value, template_values):
str_value = str(value)
template_regex = '{{(.+?)}}'
templated_terms = re.findall(template_regex, str_value)
for term in templated_terms:
str_value = str_value.replace(f"{{{{{term}}}}}", get_templated_value(term, template_values))
return str_value
def fill_template(template, templated_terms):
# TODO: Need to address metadata field, as it's treated as a string instead of a nested object.
return {field: fill_template_value(value, templated_terms) for field, value in template.items()}
#cache
def get_partition(lock_id):
bits = 128
bucket_size = 2**bits / partition_count
partition = (int(hashlib.md5(lock_id.encode('utf-8')).hexdigest(), 16) / bucket_size)
return math.floor(partition)
sequence_number = int(time.time() * 1000)
sequence_number = 0
message_count = 0
producing = True
start_time = time.perf_counter()
aggregate_message_counter = 0
# cache for templated term values so that they match across the different templates
templated_values = {}
try:
while producing:
sequence_number += 1
aggregate_message_counter += 1
message_count += 1
if aggregate_message_counter % messages_per_aggregate == 0:
# reset templated values
templated_values = {}
else:
for term in list(templated_values):
if term not in ['aggregateId', 'tenantId']:
del(templated_values[term])
# Fill in templated field values
message_key = fill_template(message_key_template, templated_values)
message_value = fill_template(message_value_template, templated_values)
message_header = fill_template(message_header_template, templated_values)
ts = datetime.utcnow().isoformat()[:-3]+'Z'
message_header['timestamp'] = ts
message_header['sequence_number'] = str(sequence_number)
message_value['timestamp'] = ts
message_value['sequenceNumber'] = sequence_number
lock_id = message_header['lock_id']
partition = get_partition(lock_id) # partition by lock_id, since key could be random, but a given aggregate_id should ALWAYS resolve to the same partition, regardless of key.
# Send message
producer.produce(topic_name, partition=partition, key=json.dumps(message_key), value=json.dumps(message_value), headers=message_header, callback=acked)
if sequence_number % perf_counter_batch_size == 0:
producer.flush()
end_time = time.perf_counter()
total_duration = end_time - start_time
messages_per_second=(perf_counter_batch_size/total_duration)
print(f'{messages_per_second} messages/second')
# reset start time
start_time = time.perf_counter()
if message_count >= max_message_count:
break
except Exception as e:
print(f'ERROR: %s' % e)
sys.exit(1)
finally:
producer.flush()
Hello I'm working on simple python ssh tunnel scrpit but I allways receive Could not resolve hostname error, but it works if run it manually. this is my code:
#!/usr/bin/env python
import subprocess
import time
import tempfile
class TunnelSSH():
def __init__(self, ssh_user: str, ssh_password: str, ssh_host: str, ssh_port: int,
local_tunnel_port:int, remote_tunnel_host:str, remote_tunnel_port:int):
self.ssh_user = ssh_user
self.ssh_password = ssh_password
self.ssh_host = ssh_host
self.ssh_port = ssh_port
self.local_tunnel_port = local_tunnel_port
self.remote_tunnel_port = remote_tunnel_port
self.remote_tunnel_host = remote_tunnel_host
_socket_file = tempfile.NamedTemporaryFile()
_socket_file.close()
self.socket = _socket_file.name
self.connected = False
def start(self):
ssh_conection = ['ssh', '-CN',
f'"{self.ssh_user}:{self.ssh_password}"#{self.ssh_host} -p {self.ssh_port}',
f'-L {self.local_tunnel_port}:{self.remote_tunnel_host}:{self.remote_tunnel_port}',
f'-S {self.socket}',
'-o ExitOnForwardFailure=True'
]
if not self.connected:
status = subprocess.call(ssh_conection)
self._check_connection(status)
time.sleep(self.retry_sleep)
else:
raise Exception('Tunnel is open')
def stop(self):
if self.connected:
if self._send_control_command('exit') != 0:
raise Exception('SSH tunnel failed to exit')
self.connected = False
def _check_connection(self, status) -> None:
"""Check connection status and set connected to True is tunnel is open"""
if status != 0:
raise Exception(f'SSH tunnel failed status: {status}')
if self._send_control_command('check'):
raise Exception(f'SSH tunnel failed to check')
self.connected = True
def _send_control_command(self, ctl_cmd:str):
call = ['ssh',f'-S {self.socket}',f'-O {self.ctl_cmd}', f'-l {self.ssh_user}', f'{self.ssh_host}']
return subprocess.check_call(call)
if __name__ == "__main__":
tunnel = TunnelSSH(ssh_user='...',
ssh_password='...',
ssh_host='...',
ssh_port=...,
local_tunnel_port=...,
remote_tunnel_host='...',
remote_tunnel_port=...
)
retry = 10 # times
wait_for_retry = 5 #s
for i in range(retry):
print(f'Connection attempt: {i}')
try:
tunnel.start()
except Exception as err:
tunnel.stop()
print(err)
time.sleep(wait_for_retry)
print(f'Connected: {tunnel.connected}')
subprocess.call expects a list of arguments. When ssh_conection is formed, several arguments are slapped together, so e.g. this part gets quoted into a single argument:
'"{self.ssh_user}:{self.ssh_password}"#{self.ssh_host} -p {self.ssh_port}'
Fix: properly split the arguments:
...
ssh_conection = ['ssh', '-CN',
f'{self.ssh_user}:{self.ssh_password}#{self.ssh_host}', # will be quoted automatically
'-p', f'{self.ssh_port}',
'-L', f'{self.local_tunnel_port}:{self.remote_tunnel_host}:{self.remote_tunnel_port}',
'-S', f'{self.socket}',
'-o', 'ExitOnForwardFailure=True'
]
...
What hinted the problem: IP addresses are used directly. 'cannot be resolved' on an IP address says that it is interpreted as a symbolic name, which makes spotting this easier.
so i'm trying to learn both python and BACnet using the BACpypes library, and i'm a little bit stuck right now.
I'm trying to make the "WhoIs-IAm" sample application to do an automatic "IAm" broadcast when launched but regarding my newbie skill i'm having trouble to build it.
So there's the sample.
#!/usr/bin/python
"""
This application presents a 'console' prompt to the user asking for Who-Is and I-Am
commands which create the related APDUs, then lines up the coorresponding I-Am
for incoming traffic and prints out the contents.
"""
import sys
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.core import run
from bacpypes.pdu import Address, GlobalBroadcast
from bacpypes.app import LocalDeviceObject, BIPSimpleApplication
from bacpypes.apdu import WhoIsRequest, IAmRequest
from bacpypes.basetypes import ServicesSupported
from bacpypes.errors import DecodingError
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
this_device = None
this_application = None
this_console = None
#
# WhoIsIAmApplication
#
class WhoIsIAmApplication(BIPSimpleApplication):
def __init__(self, *args):
if _debug: WhoIsIAmApplication._debug("__init__ %r", args)
BIPSimpleApplication.__init__(self, *args)
# keep track of requests to line up responses
self._request = None
def request(self, apdu):
if _debug: WhoIsIAmApplication._debug("request %r", apdu)
# save a copy of the request
self._request = apdu
# forward it along
BIPSimpleApplication.request(self, apdu)
def confirmation(self, apdu):
if _debug: WhoIsIAmApplication._debug("confirmation %r", apdu)
# forward it along
BIPSimpleApplication.confirmation(self, apdu)
def indication(self, apdu):
if _debug: WhoIsIAmApplication._debug("indication %r", apdu)
if (isinstance(self._request, WhoIsRequest)) and (isinstance(apdu, IAmRequest)):
device_type, device_instance = apdu.iAmDeviceIdentifier
if device_type != 'device':
raise DecodingError, "invalid object type"
if (self._request.deviceInstanceRangeLowLimit is not None) and \
(device_instance < self._request.deviceInstanceRangeLowLimit):
pass
elif (self._request.deviceInstanceRangeHighLimit is not None) and \
(device_instance > self._request.deviceInstanceRangeHighLimit):
pass
else:
# print out the contents
sys.stdout.write('pduSource = ' + repr(apdu.pduSource) + '\n')
sys.stdout.write('iAmDeviceIdentifier = ' + str(apdu.iAmDeviceIdentifier) + '\n')
sys.stdout.write('maxAPDULengthAccepted = ' + str(apdu.maxAPDULengthAccepted) + '\n')
sys.stdout.write('segmentationSupported = ' + str(apdu.segmentationSupported) + '\n')
sys.stdout.write('vendorID = ' + str(apdu.vendorID) + '\n')
sys.stdout.flush()
# forward it along
BIPSimpleApplication.indication(self, apdu)
bacpypes_debugging(WhoIsIAmApplication)
#
# WhoIsIAmConsoleCmd
#
class WhoIsIAmConsoleCmd(ConsoleCmd):
def do_whois(self, args):
"""whois [ <addr>] [ <lolimit> <hilimit> ]"""
args = args.split()
if _debug: WhoIsIAmConsoleCmd._debug("do_whois %r", args)
try:
# build a request
request = WhoIsRequest()
if (len(args) == 1) or (len(args) == 3):
request.pduDestination = Address(args[0])
del args[0]
else:
request.pduDestination = GlobalBroadcast()
if len(args) == 2:
request.deviceInstanceRangeLowLimit = int(args[0])
request.deviceInstanceRangeHighLimit = int(args[1])
if _debug: WhoIsIAmConsoleCmd._debug(" - request: %r", request)
# give it to the application
this_application.request(request)
except Exception, e:
WhoIsIAmConsoleCmd._exception("exception: %r", e)
def do_iam(self, args):
"""iam"""
args = args.split()
if _debug: WhoIsIAmConsoleCmd._debug("do_iam %r", args)
try:
# build a request
request = IAmRequest()
request.pduDestination = GlobalBroadcast()
# set the parameters from the device object
request.iAmDeviceIdentifier = this_device.objectIdentifier
request.maxAPDULengthAccepted = this_device.maxApduLengthAccepted
request.segmentationSupported = this_device.segmentationSupported
request.vendorID = this_device.vendorIdentifier
if _debug: WhoIsIAmConsoleCmd._debug(" - request: %r", request)
# give it to the application
this_application.request(request)
except Exception, e:
WhoIsIAmConsoleCmd._exception("exception: %r", e)
def do_rtn(self, args):
"""rtn <addr> <net> ... """
args = args.split()
if _debug: WhoIsIAmConsoleCmd._debug("do_rtn %r", args)
# safe to assume only one adapter
adapter = this_application.nsap.adapters[0]
if _debug: WhoIsIAmConsoleCmd._debug(" - adapter: %r", adapter)
# provide the address and a list of network numbers
router_address = Address(args[0])
network_list = [int(arg) for arg in args[1:]]
# pass along to the service access point
this_application.nsap.add_router_references(adapter, router_address, network_list)
bacpypes_debugging(WhoIsIAmConsoleCmd)
#
# __main__
#
try:
# parse the command line arguments
args = ConfigArgumentParser(description=__doc__).parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(
objectName=args.ini.objectname,
objectIdentifier=int(args.ini.objectidentifier),
maxApduLengthAccepted=int(args.ini.maxapdulengthaccepted),
segmentationSupported=args.ini.segmentationsupported,
vendorIdentifier=int(args.ini.vendoridentifier),
)
# build a bit string that knows about the bit names
pss = ServicesSupported()
pss['whoIs'] = 1
pss['iAm'] = 1
pss['readProperty'] = 1
pss['writeProperty'] = 1
# set the property value to be just the bits
this_device.protocolServicesSupported = pss.value
# make a simple application
this_application = WhoIsIAmApplication(this_device, args.ini.address)
# get the services supported
services_supported = this_application.get_services_supported()
if _debug: _log.debug(" - services_supported: %r", services_supported)
# let the device object know
this_device.protocolServicesSupported = services_supported.value
# make a console
this_console = WhoIsIAmConsoleCmd()
_log.debug("running")
run()
except Exception, e:
_log.exception("an error has occurred: %s", e)
finally:
_log.debug("finally")
I just don't know how to call the do_iam so it starts automatically when the app launches.
Any help ?
Thanks.
After the line this_console = WhoIsIAmConsoleCmd(), can you write this_console.do_iam('')?
I am trying to find the tweets using this code but it is resulting a traceback
Please help me to resolve the problem.
import time
import pycurl
import urllib
import json
import oauth2 as oauth
API_ENDPOINT_URL = 'https://stream.twitter.com/1.1/statuses/filter.json'
USER_AGENT = 'TwitterStream 1.0' # This can be anything really
# You need to replace these with your own values
OAUTH_KEYS = {'consumer_key': 'ABC',
'consumer_secret': 'ABC',
'access_token_key': 'ABC',
'access_token_secret': 'ABC'}
# These values are posted when setting up the connection
POST_PARAMS = {'include_entities': 0,
'stall_warning': 'true',
'track': 'iphone,ipad,ipod'}
# twitter streaming is here
class TwitterStream:
def __init__(self, timeout=False):
self.oauth_token = oauth.Token(key=OAUTH_KEYS['access_token_key'], secret=OAUTH_KEYS['access_token_secret'])
self.oauth_consumer = oauth.Consumer(key=OAUTH_KEYS['consumer_key'], secret=OAUTH_KEYS['consumer_secret'])
self.conn = None
self.buffer = ''
self.timeout = timeout
self.setup_connection()
def setup_connection(self):
""" Create persistant HTTP connection to Streaming API endpoint using cURL.
"""
if self.conn:
self.conn.close()
self.buffer = ''
self.conn = pycurl.Curl()
# Restart connection if less than 1 byte/s is received during "timeout" seconds
if isinstance(self.timeout, int):
self.conn.setopt(pycurl.LOW_SPEED_LIMIT, 1)
self.conn.setopt(pycurl.LOW_SPEED_TIME, self.timeout)
self.conn.setopt(pycurl.URL, API_ENDPOINT_URL)
self.conn.setopt(pycurl.USERAGENT, USER_AGENT)
# Using gzip is optional but saves us bandwidth.
self.conn.setopt(pycurl.ENCODING, 'deflate, gzip')
self.conn.setopt(pycurl.POST, 1)
self.conn.setopt(pycurl.POSTFIELDS, urllib.urlencode(POST_PARAMS))
self.conn.setopt(pycurl.HTTPHEADER, ['Host: stream.twitter.com',
'Authorization: %s' % self.get_oauth_header()])
# self.handle_tweet is the method that are called when new tweets arrive
self.conn.setopt(pycurl.WRITEFUNCTION, self.handle_tweet)
def get_oauth_header(self):
""" Create and return OAuth header.
"""
params = {'oauth_version': '1.0',
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time())}
req = oauth.Request(method='POST', parameters=params, url='%s?%s' % (API_ENDPOINT_URL,
urllib.urlencode(POST_PARAMS)))
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), self.oauth_consumer, self.oauth_token)
return req.to_header()['Authorization'].encode('utf-8')
def start(self):
""" Start listening to Streaming endpoint.
Handle exceptions according to Twitter's recommendations.
"""
backoff_network_error = 0.25
backoff_http_error = 5
backoff_rate_limit = 60
while True:
self.setup_connection()
try:
self.conn.perform()
except:
# Network error, use linear back off up to 16 seconds
print 'Network error: %s' % self.conn.errstr()
print 'Waiting %s seconds before trying again' % backoff_network_error
time.sleep(backoff_network_error)
backoff_network_error = min(backoff_network_error + 1, 16)
continue
# HTTP Error
sc = self.conn.getinfo(pycurl.HTTP_CODE)
if sc == 420:
# Rate limit, use exponential back off starting with 1 minute and double each attempt
print 'Rate limit, waiting %s seconds' % backoff_rate_limit
time.sleep(backoff_rate_limit)
backoff_rate_limit *= 2
else:
# HTTP error, use exponential back off up to 320 seconds
print 'HTTP error %s, %s' % (sc, self.conn.errstr())
print 'Waiting %s seconds' % backoff_http_error
time.sleep(backoff_http_error)
backoff_http_error = min(backoff_http_error * 2, 320)
def handle_tweet(self, data):
""" This method is called when data is received through Streaming endpoint.
"""
self.buffer += data
if data.endswith('\r\n') and self.buffer.strip():
# complete message received
message = json.loads(self.buffer)
self.buffer = ''
msg = ''
if message.get('limit'):
print 'Rate limiting caused us to miss %s tweets' % (message['limit'].get('track'))
elif message.get('disconnect'):
raise Exception('Got disconnect: %s' % message['disconnect'].get('reason'))
elif message.get('warning'):
print 'Got warning: %s' % message['warning'].get('message')
else:
print 'Got tweet with text: %s' % message.get('text')
if __name__ == '__main__':
ts = TwitterStream()
ts.setup_connection()
ts.start()
Traceback call:
Traceback (most recent call last):
File "C:\Python27\nytimes\2062014\pycurltweets.py", line 115, in <module>
ts = TwitterStream()
File "C:\Python27\nytimes\2062014\pycurltweets.py", line 23, in __init__
self.oauth_token = oauth.token(key=OAUTH_KEYS['access_token_key'], secret=OAUTH_KEYS['access_token_secret'])
AttributeError: 'module' object has no attribute 'Token'
are you sure oauth2 is installed properly / is the correct version?
see http://data-scientist.ch/install-oauth2-for-python-on-windows/
open a python REPL shell and
import oauth2 as oauth
print oauth.OAUTH_VERSION
dir(oauth)
and post result
I'm trying to transmit TCP/IP over a radio that is connected to my computer (specifically, the USRP). Right now, it's done very simply using Tun/Tap to set up a new network interface. Here's the code:
from gnuradio import gr, gru, modulation_utils
from gnuradio import usrp
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import random
import time
import struct
import sys
import os
# from current dir
from transmit_path import transmit_path
from receive_path import receive_path
import fusb_options
#print os.getpid()
#raw_input('Attach and press enter')
# Linux specific...
# TUNSETIFF ifr flags from <linux/tun_if.h>
IFF_TUN = 0x0001 # tunnel IP packets
IFF_TAP = 0x0002 # tunnel ethernet frames
IFF_NO_PI = 0x1000 # don't pass extra packet info
IFF_ONE_QUEUE = 0x2000 # beats me ;)
def open_tun_interface(tun_device_filename):
from fcntl import ioctl
mode = IFF_TAP | IFF_NO_PI
TUNSETIFF = 0x400454ca
tun = os.open(tun_device_filename, os.O_RDWR)
ifs = ioctl(tun, TUNSETIFF, struct.pack("16sH", "gr%d", mode))
ifname = ifs[:16].strip("\x00")
return (tun, ifname)
# /////////////////////////////////////////////////////////////////////////////
# the flow graph
# /////////////////////////////////////////////////////////////////////////////
class my_top_block(gr.top_block):
def __init__(self, mod_class, demod_class,
rx_callback, options):
gr.top_block.__init__(self)
self.txpath = transmit_path(mod_class, options)
self.rxpath = receive_path(demod_class, rx_callback, options)
self.connect(self.txpath);
self.connect(self.rxpath);
def send_pkt(self, payload='', eof=False):
return self.txpath.send_pkt(payload, eof)
def carrier_sensed(self):
"""
Return True if the receive path thinks there's carrier
"""
return self.rxpath.carrier_sensed()
# /////////////////////////////////////////////////////////////////////////////
# Carrier Sense MAC
# /////////////////////////////////////////////////////////////////////////////
class cs_mac(object):
"""
Prototype carrier sense MAC
Reads packets from the TUN/TAP interface, and sends them to the PHY.
Receives packets from the PHY via phy_rx_callback, and sends them
into the TUN/TAP interface.
Of course, we're not restricted to getting packets via TUN/TAP, this
is just an example.
"""
def __init__(self, tun_fd, verbose=False):
self.tun_fd = tun_fd # file descriptor for TUN/TAP interface
self.verbose = verbose
self.tb = None # top block (access to PHY)
def set_top_block(self, tb):
self.tb = tb
def phy_rx_callback(self, ok, payload):
"""
Invoked by thread associated with PHY to pass received packet up.
#param ok: bool indicating whether payload CRC was OK
#param payload: contents of the packet (string)
"""
if self.verbose:
print "Rx: ok = %r len(payload) = %4d" % (ok, len(payload))
if ok:
os.write(self.tun_fd, payload)
def main_loop(self):
"""
Main loop for MAC.
Only returns if we get an error reading from TUN.
FIXME: may want to check for EINTR and EAGAIN and reissue read
"""
min_delay = 0.001 # seconds
while 1:
payload = os.read(self.tun_fd, 10*1024)
if not payload:
self.tb.send_pkt(eof=True)
break
if self.verbose:
print "Tx: len(payload) = %4d" % (len(payload),)
delay = min_delay
while self.tb.carrier_sensed():
sys.stderr.write('B')
time.sleep(delay)
if delay < 0.050:
delay = delay * 2 # exponential back-off
self.tb.send_pkt(payload)
# /////////////////////////////////////////////////////////////////////////////
# main
# /////////////////////////////////////////////////////////////////////////////
def main():
mods = modulation_utils.type_1_mods()
demods = modulation_utils.type_1_demods()
parser = OptionParser (option_class=eng_option, conflict_handler="resolve")
expert_grp = parser.add_option_group("Expert")
parser.add_option("-m", "--modulation", type="choice", choices=mods.keys(),
default='gmsk',
help="Select modulation from: %s [default=%%default]"
% (', '.join(mods.keys()),))
parser.add_option("-v","--verbose", action="store_true", default=False)
expert_grp.add_option("-c", "--carrier-threshold", type="eng_float", default=30,
help="set carrier detect threshold (dB) [default=%default]")
expert_grp.add_option("","--tun-device-filename", default="/dev/net/tun",
help="path to tun device file [default=%default]")
transmit_path.add_options(parser, expert_grp)
receive_path.add_options(parser, expert_grp)
for mod in mods.values():
mod.add_options(expert_grp)
for demod in demods.values():
demod.add_options(expert_grp)
fusb_options.add_options(expert_grp)
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help(sys.stderr)
sys.exit(1)
if options.rx_freq is None or options.tx_freq is None:
sys.stderr.write("You must specify -f FREQ or --freq FREQ\n")
parser.print_help(sys.stderr)
sys.exit(1)
# open the TUN/TAP interface
(tun_fd, tun_ifname) = open_tun_interface(options.tun_device_filename)
# Attempt to enable realtime scheduling
r = gr.enable_realtime_scheduling()
if r == gr.RT_OK:
realtime = True
else:
realtime = False
print "Note: failed to enable realtime scheduling"
# If the user hasn't set the fusb_* parameters on the command line,
# pick some values that will reduce latency.
if options.fusb_block_size == 0 and options.fusb_nblocks == 0:
if realtime: # be more aggressive
options.fusb_block_size = gr.prefs().get_long('fusb', 'rt_block_size', 1024)
options.fusb_nblocks = gr.prefs().get_long('fusb', 'rt_nblocks', 16)
else:
options.fusb_block_size = gr.prefs().get_long('fusb', 'block_size', 4096)
options.fusb_nblocks = gr.prefs().get_long('fusb', 'nblocks', 16)
#print "fusb_block_size =", options.fusb_block_size
#print "fusb_nblocks =", options.fusb_nblocks
# instantiate the MAC
mac = cs_mac(tun_fd, verbose=True)
# build the graph (PHY)
tb = my_top_block(mods[options.modulation],
demods[options.modulation],
mac.phy_rx_callback,
options)
mac.set_top_block(tb) # give the MAC a handle for the PHY
if tb.txpath.bitrate() != tb.rxpath.bitrate():
print "WARNING: Transmit bitrate = %sb/sec, Receive bitrate = %sb/sec" % (
eng_notation.num_to_str(tb.txpath.bitrate()),
eng_notation.num_to_str(tb.rxpath.bitrate()))
print "modulation: %s" % (options.modulation,)
print "freq: %s" % (eng_notation.num_to_str(options.tx_freq))
print "bitrate: %sb/sec" % (eng_notation.num_to_str(tb.txpath.bitrate()),)
print "samples/symbol: %3d" % (tb.txpath.samples_per_symbol(),)
#print "interp: %3d" % (tb.txpath.interp(),)
#print "decim: %3d" % (tb.rxpath.decim(),)
tb.rxpath.set_carrier_threshold(options.carrier_threshold)
print "Carrier sense threshold:", options.carrier_threshold, "dB"
print
print "Allocated virtual ethernet interface: %s" % (tun_ifname,)
print "You must now use ifconfig to set its IP address. E.g.,"
print
print " $ sudo ifconfig %s 192.168.200.1" % (tun_ifname,)
print
print "Be sure to use a different address in the same subnet for each machine."
print
tb.start() # Start executing the flow graph (runs in separate threads)
mac.main_loop() # don't expect this to return...
tb.stop() # but if it does, tell flow graph to stop.
tb.wait() # wait for it to finish
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
(Anyone familiar with GNU Radio will recognize this as tunnel.py)
My question is, is there a better way to move packets to and from the kernel than tun/tap? I've been looking at ipip or maybe using sockets, but I'm pretty sure those won't be very fast. Speed is what I'm most concerned with.
Remember that tunnel.py is a really, really rough example, and hasn't been updated in a while. It's not really meant to be a basis for other code, so be careful of how much you rely on the code.
Also, remember that TCP over unreliable radio links has significant issues:
http://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_over_wireless_networks