I have the following code which returns the public IP's
def gather_public_ip():
ACCESS_KEY = config.get('aws','access_key')
SECRET_KEY = config.get('aws','secret_key')
regions = regions = ['us-west-2','eu-central-1','ap-southeast-1']
# regions = config.get('aws','region').split(',')
all_EIP = []
for region in regions:
client = boto3.client('ec2',aws_access_key_id=ACCESS_KEY,aws_secret_access_key=SECRET_KEY,region_name=region,)
addresses_dict = client.describe_addresses()
for eip_dict in addresses_dict['Addresses']:
if 'PrivateIpAddress' in eip_dict:
print eip_dict['PublicIp']
# return str(eip_dict['PublicIp'])
all_EIP.append(eip_dict['PublicIp'])
print all_EIP
# print str(all_EIP)
return str(all_EIP)
This is called and returned as :
net_range = gather_public_ip()
for ip in net_range:
r = s.run(ip)
run looks like :
def run(self, targets="" ,options="-Pn"):
#start a new nmap scan on localhost with some specific options
syslog.syslog("Scan started")
parsed = None
nmproc = NmapProcess(targets,options)
rc = nmproc.run()
if rc != 0:
syslog.syslog("nmap scan failed: {0}".format(nmproc.stderr))
try:
parsed = NmapParser.parse(nmproc.stdout)
self.report = parsed
except NmapParserException as e:
syslog.syslog("Exception raised while parsing scan: {0}".format(e.msg))
syslog.syslog("Scan complete")
syslog.syslog("Scan duration: "+ str(parsed.elapsed))
self.report = parsed
return parsed
after printing the list , this throws me :
Traceback (most recent call last):
File "portwatch.py", line 300, in <module>
r = s.run(ip)
File "portwatch.py", line 239, in run
rc = nmproc.run()
File "/usr/local/lib/python2.7/dist-packages/libnmap/process.py", line 257, in run
else shlex.split(self.__nmap_command_line)
File "/usr/lib/python2.7/shlex.py", line 279, in split
return list(lex)
File "/usr/lib/python2.7/shlex.py", line 269, in next
token = self.get_token()
File "/usr/lib/python2.7/shlex.py", line 96, in get_token
raw = self.read_token()
File "/usr/lib/python2.7/shlex.py", line 172, in read_token
raise ValueError, "No closing quotation"
ValueError: No closing quotation
Make sure your ip is not "" or shlex will fail, cf Which exception to raise if a given string does not match some format?
Related
I am trying to pull a huge amount of data (in millions) and I am getting the following error when running my code. If I run the same code with a small range (to be exact a range of 2) it runs successfully. Please assist in helping me know if this is my issue or is coming from the API side
Thanks
The Error I am getting
DEBUG:google.api_core.bidi:Started helper thread Thread-ConsumeBidirectionalStream
DEBUG:google.api_core.bidi:Thread-ConsumeBidirectionalStream caught error 400 Request contains an invalid argument. and will exit. Generally this is due to the RPC itself being cancelled and the error will be surfaced to the calling code.
Traceback (most recent call last):
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/grpc_helpers.py", line 147, in error_remapped_callable
return _StreamingResponseIterator(
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/grpc_helpers.py", line 73, in __init__
self._stored_first_result = next(self._wrapped)
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/grpc/_channel.py", line 426, in __next__
return self._next()
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/grpc/_channel.py", line 826, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.INVALID_ARGUMENT
details = "Request contains an invalid argument."
debug_error_string = "{"created":"#1652904360.179503883","description":"Error received from peer ipv4:173.194.76.95:443","file":"src/core/lib/surface/call.cc","file_line":952,"grpc_message":"Request contains an invalid argument.","grpc_status":3}"
>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/bidi.py", line 636, in _thread_main
self._bidi_rpc.open()
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/bidi.py", line 279, in open
call = self._start_rpc(iter(request_generator), metadata=self._rpc_metadata)
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/cloud/bigquery_storage_v1/services/big_query_write/client.py", line 678, in append_rows
response = rpc(
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/gapic_v1/method.py", line 154, in __call__
return wrapped_func(*args, **kwargs)
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/retry.py", line 283, in retry_wrapped_func
return retry_target(
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/retry.py", line 190, in retry_target
return target()
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/api_core/grpc_helpers.py", line 151, in error_remapped_callable
raise exceptions.from_grpc_error(exc) from exc
google.api_core.exceptions.InvalidArgument: 400 Request contains an invalid argument.
INFO:google.api_core.bidi:Thread-ConsumeBidirectionalStream exiting
DEBUG:google.cloud.bigquery_storage_v1.writer:Finished stopping manager.
Traceback (most recent call last):
File "write_data_to_db2.py", line 207, in <module>
p.append_rows_pending(project_id='dwingestion', dataset_id='ke',
File "write_data_to_db2.py", line 188, in append_rows_pending
response_future_1 = append_rows_stream.send(request)
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/cloud/bigquery_storage_v1/writer.py", line 234, in send
return self._open(request)
File "/home/coyugi/teltel_env/lib/python3.8/site-packages/google/cloud/bigquery_storage_v1/writer.py", line 207, in _open
raise request_exception
google.api_core.exceptions.Unknown: None There was a problem opening the stream. Try turning on DEBUG level logs to see the error.
Summary Of My Code
# PULLING DATA FROM THE API
def whole_teltel_raw_data():
# Creating a session to introduce network consistency
session = requests.Session()
retry = Retry(connect=3, backoff_factor=1.0)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
url = "https://my_api_url"
the_headers = {"X-API-KEY": 'my key'}
offset_limit = 1249500
teltel_data = []
# Loop through the results and if present extend the teltel_data list
#======================================================================================================================
# WRITE THE DATA TO THE DATA WAREHOUSE
# ======================================================================================================================
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'dwingestion-b033d9535e9d.json'
def create_row_data(tuple_data):
call_id, starttime, stoptime, direction, type, status, duration_sec, rate, cost, transfer, extra_prefix, audio_url, \
hangup_element, caller_number, caller_type, caller_cid, caller_dnid, caller_user_id, caller_user_short, \
callee_number, calle_type, callee, hangup_element_name, hangup_element_element, callee_user_id, callee_user_short, \
caller = tuple_data
row = teltel_call_data_pb2.TeltelCall()
row.call_id = call_id
row.starttime = starttime
row.stoptime = stoptime
row.direction = direction
row.type = type
row.status = status
row.duration_sec = duration_sec
row.rate = rate
row.cost = cost
row.transfer = transfer
row.extra_prefix = extra_prefix
row.audio_url = audio_url
row.hangup_element = hangup_element
row.caller_number = caller_number
row.caller_type = caller_type
row.caller_cid = caller_cid
row.caller_dnid = caller_dnid
row.caller_user_id = caller_user_id
row.caller_user_short = caller_user_short
row.callee_number = callee_number
row.calle_type = calle_type
row.callee = callee
row.hangup_element_name = hangup_element_name
row.hangup_element_title = hangup_element_element
row.callee_user_id = callee_user_id
row.callee_user_short = callee_user_short
row.caller = caller
return row.SerializeToString()
# Creating connection to the data warehouse
def create_bigquery_storage_client(google_credentials):
return bigquery_storage_v1.client.BigQueryWriteClient(
credentials=google_credentials
)
class GcpBigqueryStorageService(object):
def __init__(self, google_credentials=None, gcp_config=None):
self.client = create_bigquery_storage_client(google_credentials)
self.config = gcp_config
def append_rows_pending(self, project_id: str, dataset_id: str, table_id: str):
"""Create a write stream, write some sample data, and commit the stream."""
# write_client = self.client
parent = self.client.table_path(project_id, dataset_id, table_id)
write_stream = types.WriteStream()
# When creating the stream, choose the type. Use the PENDING type to wait
write_stream.type_ = types.WriteStream.Type.PENDING
write_stream = self.client.create_write_stream(
parent=parent, write_stream=write_stream
)
stream_name = write_stream.name
# Create a template with fields needed for the first request.
request_template = types.AppendRowsRequest()
# The initial request must contain the stream name.
request_template.write_stream = stream_name
# So that BigQuery knows how to parse the serialized_rows, generate a
# protocol buffer representation of your message descriptor.
proto_schema = types.ProtoSchema()
proto_descriptor = descriptor_pb2.DescriptorProto()
teltel_call_data_pb2.TeltelCall.DESCRIPTOR.CopyToProto(proto_descriptor)
proto_schema.proto_descriptor = proto_descriptor
proto_data = types.AppendRowsRequest.ProtoData()
proto_data.writer_schema = proto_schema
request_template.proto_rows = proto_data
# Some stream types support an unbounded number of requests. Construct an
# AppendRowsStream to send an arbitrary number of requests to a stream.
append_rows_stream = writer.AppendRowsStream(self.client, request_template)
# Create a batch of row data by appending proto2 serialized bytes to the
# serialized_rows repeated field.
proto_rows = types.ProtoRows()
row_number = 0
for row in whole_teltel_raw_data():
proto_rows.serialized_rows.append(create_row_data(row))
# checking the writing progress
row_number = row_number + 1
print("Writing to the database row number", row_number)
# The first request must always have an offset of 0.
request = types.AppendRowsRequest()
proto_data = types.AppendRowsRequest.ProtoData()
proto_data.rows = proto_rows
request.proto_rows = proto_data
append_rows_stream.close()
# A PENDING type stream must be "finalized" before being committed. No new
# records can be written to the stream after this method has been called.
self.client.finalize_write_stream(name=write_stream.name)
# Commit the stream you created earlier.
batch_commit_write_streams_request = types.BatchCommitWriteStreamsRequest()
batch_commit_write_streams_request.parent = parent
batch_commit_write_streams_request.write_streams = [write_stream.name]
self.client.batch_commit_write_streams(batch_commit_write_streams_request)
print(f"Writes to stream: '{write_stream.name}' have been committed.")
p = GcpBigqueryStorageService()
p.append_rows_pending(project_id='my_project', dataset_id='my_id', table_id='teltel_call_2')
During a small test to learn how to run a small server, the method 'start' returns an error when I tried to start the server afet including the endpoint and some variables:
from opcua import Server
import datetime
import time
my_server = Server()
url = 'opc.tcp//192.168.1.5:4841'
my_server.set_endpoint(url)
name = "OPCUA_TEST_Server"
addspace = my_server.register_namespace(name)
node = my_server.get_objects_node()
param = node.add_object(addspace, "Parameters")
t_text1 = param.add_variable(addspace, "Text 1", "Text_1")
i_int1 = param.add_variable(addspace, "myInteger1", 0)
b_bool1 = param.add_variable(addspace, "myBool1", False)
t_text1.set_writable()
i_int1.set_writable()
b_bool1.set_writable()
my_server.start()
print("Server started at {}".format(url))
print("At" + str(datetime.datetime.now()))
while True:
time.sleep(0.5)
And in line of
my_server.start()
Returns the following error:
Endpoints other than open requested but private key and certificate are not set.
Traceback (most recent call last):
File "C:/Users/a767611/Desktop/Repositorios/flexigrid/opc-ua-server/test-opc-ua-server.py", line 23, in <module>
my_server.start()
File "C:\Users\a767611\Anaconda3\lib\site-packages\opcua\server\server.py", line 347, in start
raise exp
File "C:\Users\a767611\Anaconda3\lib\site-packages\opcua\server\server.py", line 344, in start
self.bserver.start()
File "C:\Users\a767611\Anaconda3\lib\site-packages\opcua\server\binary_server_asyncio.py", line 116, in start
self._server = self.loop.run_coro_and_wait(coro)
File "C:\Users\a767611\Anaconda3\lib\site-packages\opcua\common\utils.py", line 197, in run_coro_and_wait
return task.result()
File "C:\Users\a767611\Anaconda3\lib\asyncio\base_events.py", line 1393, in create_server
raise ValueError('Neither host/port nor sock were specified')
ValueError: Neither host/port nor sock were specified
Your endpoint URL is malformed.
It should be:
url = 'opc.tcp://192.168.1.5:4841'
note the missing colon after opc.tcp.
In below code I am trying to get count of u_count and h_count, but everytime i get the keyerror
import numpy as np
import pandas as pd
import matplotlib
import re
import datetime
pattern = '^(([0-2]?[0-9]/[0-9]?[0-9]/[0-9][0-9]), ([0-9]?[0-9]:[0-9][0-9]\s\w{2}) - (\w+\s\w+|\w+|):( [\w ]+))'
def startsWithDateTime(pattern,s):
result = re.match(pattern, s)
if result:
return True
return False
def getDataPoint(pattern,s):
result = re.match(pattern, s)
date = result[2]
time = result[3]
author = result[4]
message = result[5]
return date, time, author, message
parsedData = [] # List to keep track of data so it can be used by a Pandas dataframe
conversationPath ="WhatsApp_Chat_with_Umesh.txt" # text file
with open(conversationPath, encoding="utf-8") as fp:
fp.readline()
messageBuffer = [] # Buffer to capture intermediate output for multi-line messages
date, time, author = None, None, None # Intermediate variables to keep track of the current message being processed
while True:
line = fp.readline()
if not line: # Stop reading further if end of file has been reached
break
line = line.strip() # Guarding against erroneous leading and trailing whitespaces
if startsWithDateTime(pattern,line): # If a line starts with a Date Time pattern, then this indicates the beginning of a new message
if len(messageBuffer) > 0: # Check if the message buffer contains characters from previous iterations
parsedData.append([date, time, author, ' '.join(messageBuffer)]) # Save the tokens from the previous message in parsedData
messageBuffer.clear() # Clear the message buffer so that it can be used for the next message
date, time, author, message = getDataPoint(pattern,line) # Identify and extract tokens from the line
messageBuffer.append(message) # Append message to buffer
else:
messageBuffer.append(line)
df = pd.DataFrame(parsedData, columns=['Date', 'Time', 'Author', 'Message'])
def count(df):
df['Letter_count'] = df['Message'].apply(lambda s: len(s))
df['Word_count'] = df['Message'].apply(lambda s: len(s.split()))
# count(df)
# print(df.head(50))
# print(df['Date'][0])
temp =0
i = 0
h_count= 0
u_count = 0
while True:
temp = df['Date'][i]
filter =df[df['Date']==temp]
data = filter.iloc[0]
# print(data.loc['Author'])
# print(type(data))
if data.loc['Author'] == 'Umesh Yadav':
u_count +=1
else:
h_count +=1
i =i+1
Error Log: (whatsup_env)
L:\whatsup_chat_analyzer\WhatsApp-Chat-Analyzer> (whatsup_env)
L:\whatsup_chat_analyzer\WhatsApp-Chat-Analyzer>C:/Users/Harish/Anaconda3/python.exe
l:/whatsup_chat_analyzer/WhatsApp-Chat-Analyzer/analyzer.py Traceback
(most recent call last): File
"l:/whatsup_chat_analyzer/WhatsApp-Chat-Analyzer/analyzer.py", line
66, in
temp = df['Date'][i] File "C:\Users\Harish\Anaconda3\lib\site-packages\pandas\core\series.py",
line 1068, in getitem
result = self.index.get_value(self, key) File "C:\Users\Harish\Anaconda3\lib\site-packages\pandas\core\indexes\base.py",
line 4730, in get_value
return self._engine.get_value(s, k, tz=getattr(series.dtype, "tz", None)) File "pandas_libs\index.pyx", line 80, in
pandas._libs.index.IndexEngine.get_value File
"pandas_libs\index.pyx", line 88, in
pandas._libs.index.IndexEngine.get_value File
"pandas_libs\index.pyx", line 131, in
pandas._libs.index.IndexEngine.get_loc File
"pandas_libs\hashtable_class_helper.pxi", line 992, in
pandas._libs.hashtable.Int64HashTable.get_item File
"pandas_libs\hashtable_class_helper.pxi", line 998, in
pandas._libs.hashtable.Int64HashTable.get_item KeyError: 3327
The following python script worked like a charm last month:
Script:
import SoftLayer
client = SoftLayer.Client(username='someUser', api_key='someKey')
LastInvoice = client['Account'].getAllBillingItems()
print LastInvoice
Today's result:
C:\Python27\python.exe C:/Users/username/Documents/Python/Softlayer/Softlayer5.py
Traceback (most recent call last):
File "C:/Users/username/Documents/Python/Softlayer/Softlayer5.py", line 8, in <module>
LastInvoice = client['Account'].getAllBillingItems()
File "C:\Python27\lib\site-packages\SoftLayer\API.py", line 392, in call_handler
return self(name, *args, **kwargs)
File "C:\Python27\lib\site-packages\SoftLayer\API.py", line 360, in call
return self.client.call(self.name, name, *args, **kwargs)
File "C:\Python27\lib\site-packages\SoftLayer\API.py", line 263, in call
return self.transport(request)
File "C:\Python27\lib\site-packages\SoftLayer\transports.py", line 197, in __call__
raise exceptions.TransportError(ex.response.status_code, str(ex))
SoftLayer.exceptions.TransportError: TransportError(500): 500 Server Error: Internal Server Error for url: https://api.softlayer.com/xmlrpc/v3.1/SoftLayer_Account
Other api actions work fine... any thoughts?
well the charm has a defect and it is when the response has a big amount of data, that causes timeouts in the response and the conection is closed.
but this issue can be easily solved by using result limits take a look to this example:
import SoftLayer
# Your SoftLayer API username and key.
USERNAME = 'set me'
API_KEY = 'set me'
client = SoftLayer.Client(username=USERNAME, api_key=API_KEY)
offset = 0
limit = 50
accountService = client['SoftLayer_Account']
while True:
try:
result = accountService.getAllBillingItems(limit=limit, offset=offset)
offset = offset + limit
limit = limit + limit
print(result)
if not result:
break
except SoftLayer.SoftLayerAPIError as e:
print("Unable to retrieve the servers . " % (e.faultCode, e.faultString))
exit(1)
Regards
i'm trying to create a layer 2 protocole for pratice. There are my imports (in both files) :
from pystack.layers.ethernet import EthernetProtocol
from pystack.layers.arp import ARPProtocol
from pystack.layers.ethernet import Ether
from scapytrame import EtherData
from scapy.all import *
It works perfectly when i do those lines in a file (Sender.py)
ethData = EtherData(src="00:21:70:a2:b7:6d", dst="d4:3d:7e:6c:66:e9", data="Kikouuu!", seq_ack=9)
sendp(ethData)
print ethData.show()
So i receive the packet and i can see it in WireShark
But now i want to do it by using a function and this doesn't work. There is the second file :
def sender(frames, macSrc="00:21:70:a2:b7:6d", iface="eth0") :
i = 0
sentBack = False
while(i < len(frames)):
# Sending packet
frames[i].seq_ack = hex(i%2);
frames[i].show()
Scapy.sendp(frames[i])
# Acknowledge + Next packet
macDst = frames[i].dst
filtre = "ether src " + macDst
ack = sniff(iface=str(iface), filter=str(filtre), count=1, timeout=5)
if not ack:
sentBack = True
continue
ackData = EtherData(str(ack[0]))
if int(ackData.seq_ack) == 2:
sentBack = False
i = i + 1
else:
continue
def receiver(macSrc="00:21:70:a2:b7:6d", iface="eth0") :
nextFrame = 0
while(True):
ack = sniff(iface=str(iface), filter=str(filtre), count=1)
ackData = EtherData(str(ack[0]))
if int(ackData.seq_ack) == nextFrame%2:
ackData.show()
sender([EtherData(src="00:21:70:a2:b7:6d", dst=ackData.dst, seq_ack=2)])
nextFrame = nextFrame + 1
frames_test = [ EtherData(src="00:21:70:a2:b7:6d", dst="d4:3d:7e:6c:66:e9", data="Kikouuu!", seq_ack=9),
EtherData(src="00:21:70:a2:b7:6d", dst="d4:3d:7e:6c:66:e9", data="Kikouuu!", seq_ack=9),
EtherData(src="00:21:70:a2:b7:6d", dst="d4:3d:7e:6c:66:e9", data="Kikouuu!", seq_ack=9),
EtherData(src="00:21:70:a2:b7:6d", dst="d4:3d:7e:6c:66:e9", data="Kikouuu!", seq_ack=9),
EtherData(src="00:21:70:a2:b7:6d", dst="d4:3d:7e:6c:66:e9", data="Kikouuu!", seq_ack=9)]
sender(frames_test)
Then i get an error as :
Traceback (most recent call last):
File "ProtAckPosAndTrans.py", line 47, in <module>
sender(frames_test)
File "ProtAckPosAndTrans.py", line 15, in sender
sendp(frames[i])
File "/usr/lib/python2.7/dist-packages/scapy/sendrecv.py", line 259, in sendp
__gen_send(conf.L2socket(iface=iface, *args, **kargs), x, inter=inter, loop=loop, count=count, verbose=verbose, realtime=realtime)
File "/usr/lib/python2.7/dist-packages/scapy/sendrecv.py", line 234, in __gen_send
s.send(p)
File "/usr/lib/python2.7/dist-packages/scapy/supersocket.py", line 32, in send
sx = str(x)
File "/usr/lib/python2.7/dist-packages/scapy/packet.py", line 261, in __str__
return self.build()
File "/usr/lib/python2.7/dist-packages/scapy/packet.py", line 319, in build
p = self.do_build()
File "/usr/lib/python2.7/dist-packages/scapy/packet.py", line 308, in do_build
pkt = self.self_build()
File "/usr/lib/python2.7/dist-packages/scapy/packet.py", line 299, in self_build
p = f.addfield(self, p, val)
File "/usr/lib/python2.7/dist-packages/scapy/fields.py", line 70, in addfield
return s+struct.pack(self.fmt, self.i2m(pkt,val))
struct.error: cannot convert argument to integer
The frames[i].show() works perfectly and i also did a 'print frames[i].class' everything is find... So i can't get why it is not working.
I searched everywhere can't find an anwser. Thanks for answers :)