Instancemethod object is not iterable (AirPi) (Python) - python

I got ERROR:Exception during output: 'instancemethod' object is not iterable when debugging this AirPi code from https://github.com/haydnw/AirPi/blob/master/outputs/ubidots.py
This suppose to upload my sensor data to the Ubidots server.
*I'd put my correct token and variable ID inside the configuration file for this AirPi.
requiredSpecificParams = ["token"]
optionalSpecificParams = ["showcost",
"ID-BMP085-temp",
"ID-BMP085-pres",
"ID-DHT22-hum",
"ID-DHT22-temp",
"ID-LDR",
"ID-TGS2600",
"ID-MiCS-2710",
"ID-MiCS-5525",
"ID-Microphone",
"ID-Raingauge"
]
def __init__(self, config):
super(Ubidots, self).__init__(config)
self.token = self.params["token"]
if "showcost" in self.params:
self.showcost = self.params["showcost"]
else:
self.showcost = False
self.ubivariables = {}
for key, value in self.params.iteritems():
if key[:3] == "ID-":
if value:
self.ubivariables[key[3:]] = value
def output_data(self, datapoints, dummy):
"""Output data.
Output data in the format stipulated by the plugin. Calibration
is carried out first if required.
Because this particular plugin (ubidots) does not show time, the
third argument (normally called 'sampletime') is called 'dummy'
to facilitate compliance with pylint.
Args:
self: self.
datapoints: A dict containing the data to be output.
dummy: datetime representing the time the sample was taken.
Returns:
boolean True if data successfully output to Ubidots; False if
not
"""
if self.params["calibration"]:
datapoints = self.cal.calibrate(datapoints)
payload = []
for point in datapoints:
for ubivariablename, ubivariableid in self.ubivariables.iteritems():
if point["sensor"] == ubivariablename:
if point["value"] is not None:
thisvalue = {}
thisvalue["variable"] = ubivariableid
thisvalue["value"] = point["value"]
payload.append(thisvalue)
break
headers = {'Accept': 'application/json; indent=4', 'Content-Type': 'application/json', 'X-Auth-Token': self.token}
url = "http://things.ubidots.com/api/v1.6/collections/values"
req = None
cost = 0
try:
req = requests.post(url, data=json.dumps(payload), headers=headers)
except Exception, e:
print("ERROR: Failed to contact the Ubidots service.")
print("ERROR: " + str(e))
return False
for response in req.json:
if response["status_code"] is not 201:
print("ERROR: Ubidots responded with an error for one of the values.")
return False
else:
cost += 1
if self.showcost:
print("Ubidots upload cost " + str(cost) + " dots.")
return True

for response in req.json:
According to the documentation, json is a method and must be called, so this should be:
for response in req.json():
In the future it is helpful to include just as much of your code as is necessary to reproduce the problem, and to include the complete error message with traceback.

Related

I'm trying to write a program that can scan IPs against OSINT databases, but I keep running into TypeError. I'm not sure where to go from here

So, someone started this project and it fell on my lap so I could fix it.
This is my first attempt at coding outside of school, so I'm not very experienced or good. Apologies if the solution was something obvious.
Basically this program is supposed to do a few things.
Accept one or more inputs of IP addresses
Scan the databases written into it
Return each database that flags the IP as malicious
Return any database that has an error related to the API key.
Return the location of the IP as reported by AbuseIPDB.(but I haven't even started that yet)
Right now, this is the error I'm getting.
Traceback (most recent call last):
File "script.py", line 119, in <module>
is_malicious, flagged_databases = check_ip_reputation(ip)
TypeError: cannot unpack non-iterable bool object
I have no idea how to correct that. I've rewritten a few lines to fix other errors but something new always comes up.
This is the code. Something to note, is that two databases are missing APIs. But those should return an error as mentioned above.
'''
import requests
def check_ip_reputation(ip_address):
# Set up a list to store the names of the databases that flag the IP as malicious
flagged_databases = []
# Set up the parameters for the AbuseIPDB request
params = {
'key': 'db327100238564236c6e25fe412ed23d80cfecab28691b0e672bd2a0798156250de5473bc648d255',
'ipAddress': ip_address
}
# Make the request to AbuseIPDB
try:
response = requests.get('https://api.abuseipdb.com/api/v2/check', params=params)
except requests.exceptions.RequestException as e:
print(f'Error making request to AbuseIPDB: {e}')
return False
# Extract the "abuseConfidenceScore" field from the response
abuse_score = response.json()['data']['abuseConfidenceScore']
# Set a threshold for the AbuseIPDB score
abuse_threshold = 50
# Check if the abuse score is above the threshold
if abuse_score >= abuse_threshold:
flagged_databases.append('AbuseIPDB')
# Set up the parameters for the VirusTotal request
params = {
'apikey': '7f21d9a126b73adf22ea100f883e38496f44412933a27cf1740858f3568be5e4',
'ip': ip_address
}
# Make the request to VirusTotal
try:
response = requests.get('https://www.virustotal.com/vtapi/v2/ip-address/report', params=params)
except requests.exceptions.RequestException as e:
print(f'Error making request to VirusTotal: {e}')
return False
# Extract the "response_code" field from the response
response_code = response.json()['response_code']
# Check if the response code indicates that the IP is listed
if response_code == 1:
flagged_databases.append('VirusTotal')
# Set up the parameters for the MXtoolbox request
params = {
'key': 'API_KEY',
'ip': ip_address
}
# Make the request to MXtoolbox
try:
response = requests.get('https://mxtoolbox.com/api/v1/lookup/blacklist/' + ip_address, params=params)
except requests.exceptions.RequestException as e:
print(f'Error making request to MXtoolbox: {e}')
return False
# Try to extract the "blacklist" field from the response
try:
blacklist = response.json()['blacklist']
except TypeError:
# If the response is a string, then the IP is not blacklisted
return False
# Check if the IP is listed in any of the blacklists
is_blacklisted = len(blacklist) > 0
# Return the result
return is_blacklisted
# Set up the parameters for the Talos request
params = {
'key': 'API_KEY',
'ip': ip_address
}
# Make the request to Talos
try:
response = requests.get('https://talosintelligence.com/documents/ip-blacklist', params=params)
except requests.exceptions.RequestException as e:
print(f'Error making request to Talos: {e}')
return False
# Check if the response code indicates that the IP is listed
if response.status_code == 200:
flagged_databases.append('Talos Intelligence')
##############################################################################
# Combine the results from all four databases
if(len(flagged_databases) > 0):
is_malicious = len(flagged_databases)
else:
is_malicious = 0
# Return the result
return is_malicious, flagged_databases;
##############################################################################
# Prompt the user for a list of IP addresses
ip_addresses_str = input("Enter a list of IP addresses separated by commas: ")
# Split the input string into a list of IP addresses
ip_addresses = ip_addresses_str.split(',')
# Strip any leading or trailing whitespace from the IP addresses
ip_addresses = [ip.strip() for ip in ip_addresses]
# Check the reputation of each IP address
for ip in ip_addresses:
is_malicious, flagged_databases = check_ip_reputation(ip)
if is_malicious:
print(f'{ip} has been flagged as malicious by the following databases: {", ".join(flagged_databases)}')
else:
print(f'{ip} has not been flagged as malicious by any of the OSINT databases.')
'''
Any help would be so, so appreciated.
Listed above, but I did try changing it so it could read strings and dictionary.
Here are some suggestions for making the code more robust.
I noticed that you were returning from the function in a few places, with different semantics for the result:
Errors were being returned as just a boolean
One of the checks, you were returning just a boolean rather than adding to the flagged databases
You had a return statement that matched the calling code, but you would not reach it because of the above return statements
The calling code expected a boolean and a list, but it would only ever get a boolean, which is why you got the error.
When you are querying multiple sources, and presuming you want to return all the information that is available rather than give up with one error, it may be good to put results from all of them into a data structure and then return what you have, including the errors. Let the calling code decide whether having errors is a problem since some of the results may be useful.
import requests
def check_ip_reputation(ip_address):
# Set up a list to store the names of the databases that flag the IP as malicious
databaseResults = {'Errors': [], 'ReportingMalicious': [], 'ReportingClean': []}
# Set up the parameters for the AbuseIPDB request
params = {
'key': 'db327100238564236c6e25fe412ed23d80cfecab28691b0e672bd2a0798156250de5473bc648d255',
'ipAddress': ip_address
}
# Make the request to AbuseIPDB
try:
response = requests.get('https://api.abuseipdb.com/api/v2/check', params=params)
except requests.exceptions.RequestException as e:
databaseResults['Errors'].append('AbuseIPDB')
# Extract the "abuseConfidenceScore" field from the response
abuse_score = response.json()['data']['abuseConfidenceScore']
# Set a threshold for the AbuseIPDB score
abuse_threshold = 50
# Check if the abuse score is above the threshold
if abuse_score >= abuse_threshold:
databaseResults['ReportingMalicious'].append('AbuseIPDB')
else:
databaseResults['ReportingClean'].append('AbuseIPDB')
# Set up the parameters for the VirusTotal request
params = {
'apikey': '7f21d9a126b73adf22ea100f883e38496f44412933a27cf1740858f3568be5e4',
'ip': ip_address
}
# Make the request to VirusTotal
try:
response = requests.get('https://www.virustotal.com/vtapi/v2/ip-address/report', params=params)
except requests.exceptions.RequestException as e:
databaseResults['Errors'].append('VirusTotal')
# Extract the "response_code" field from the response
response_code = response.json()['response_code']
# Check if the response code indicates that the IP is listed
if response_code == 1:
databaseResults['ReportingMalicious'].append('VirusTotal')
else:
databaseResults['ReportingClean'].append('VirusTotal')
# Set up the parameters for the MXtoolbox request
params = {
'key': 'API_KEY',
'ip': ip_address
}
# Make the request to MXtoolbox
try:
response = requests.get('https://mxtoolbox.com/api/v1/lookup/blacklist/' + ip_address, params=params)
except requests.exceptions.RequestException as e:
databaseResults['Errors'].append('MXtoolbox')
# Try to extract the "blacklist" field from the response
try:
blacklist = response.json()['blacklist']
is_blacklisted = len(blacklist) > 0
except TypeError:
is_blacklisted = False
# Return the result
if is_blacklisted:
databaseResults['ReportingMalicious'].append('MXtoolbox')
else:
databaseResults['ReportingClean'].append('MXtoolbox')
# Set up the parameters for the Talos request
params = {
'key': 'API_KEY',
'ip': ip_address
}
# Make the request to Talos
try:
response = requests.get('https://talosintelligence.com/documents/ip-blacklist', params=params)
except requests.exceptions.RequestException as e:
databaseResults['Errors'].append('TalosIntelligence')
# Check if the response code indicates that the IP is listed
if response.status_code == 200:
databaseResults['ReportingMalicious'].append('TalosIntelligence')
else:
databaseResults['ReportingClean'].append('TalosIntelligence')
##############################################################################
# Combine the results from all four databases
is_malicious = len(databaseResults['ReportingMalicious']) > 0
# Return the result
return is_malicious, databaseResults;
##############################################################################
# Prompt the user for a list of IP addresses
ip_addresses_str = input("Enter a list of IP addresses separated by commas: ")
# Split the input string into a list of IP addresses
ip_addresses = ip_addresses_str.split(',')
# Strip any leading or trailing whitespace from the IP addresses
ip_addresses = [ip.strip() for ip in ip_addresses]
# Check the reputation of each IP address
for ip in ip_addresses:
is_malicious, flagged_databases = check_ip_reputation(ip)
if is_malicious:
print(f'{ip} has been flagged as malicious by the following databases: ' + ", ".join([db for db in flagged_databases['ReportingMalicious']]) + '}')
else:
print(f'{ip} has not been flagged as malicious by any of the OSINT databases.')

Mocking requests.post [duplicate]

This question already has answers here:
How can I mock requests and the response?
(20 answers)
Closed 2 years ago.
This is my first time writing unit tests, apologies for the annoyances inevitably present, despite my best efforts. I am trying to mock requests.post but my test function is not having the desired effect, to induce a 404 status code so that I can test error handling.
mymodule.py
def scrape(data):
logger.debug(f'\nBeginning scrape function')
result = {}
exceptions = {}
for id, receipts in data.items():
logger.debug(f'Looking up Id # {id} and receipts: \n{receipts}')
dispositions = []
for receipt in receipts:
logger.debug(f'The length of receipts is:' + str(len(receipts)))
attempts = 1
while attempts < 6:
logger.debug(
f'This is attempt number {attempts} to search for {receipt}')
payload = {'receipt': 'receipt',
'button': 'CHECK+STATUS', }
try:
NOW = datetime.today().strftime('%c')
logger.debug(NOW)
logger.debug(f'Making post request for: {receipt}')
response = requests.post(URL, data=payload, headers=HEADERS, timeout=10)
except Exception as e:
logger.debug(f'There was an exception: {e}')
exceptions[id] = receipt + f': {e}'
time.sleep(3)
attempts += 1
else:
logger.debug(f'It worked {response.status_code}')
attempts = 6
disp = parse(response)
dispositions.append(f'{receipt}: {disp}')
result[id] = dispositions
logger.debug(f'Here is the result: {result}')
return result
test_mymodule.py
def test_scrape(self):
print(f'\ntest_scrape running')
# mock a 404 in scrape() here
with patch("mymodule.requests") as patched_post:
# mock a request response
patched_post.return_value.status_code = 404
print('404 mocked')
# verify the function returns nothing due to 404
result = scrape(test_data)
print(f'\n{result}')
mock_requests.post.assert_called_once()
self.assertEqual(result, {})
def test_scrape(self):
print(f'\ntest_scrape running')
# mock a 404 in scrape() here
with patch("mymodule.requests") as patched_post:
# mock a request response
patched_post.return_value.status_code = 404
print('404 mocked')
# verify the function returns nothing due to 404
result = scrape(test_data)
print(f'\n{result}')
mock_requests.post.assert_called_once()
self.assertEqual(result, {})

Google Clasroom Python API Use batch request and get the request back

I am new to google classroom api, just started a few days ago. I want to use batch request to make the code faster. My problem is that I don't know how to get the data back from the batch. I tried printing it in the console, but it returned null.
def get_all_courses(service):
nextpageToken = ""
list_id = []
while nextpageToken is not None:
result = service.courses().list(
pageSize=500,
pageToken=nextpageToken,
fields="nextPageToken,courses(id)"
)
result = result.execute()
lista_curs = result.get("courses")
for curs in lista_curs:
list_id.append(curs.get('id'))
nextpageToken = result.get("nextPageToken")
print("Ther are :" + str(len(list_id)))
return list_id
This is the normal usual code. How can I pass the request to a batch and get the results back?
batch1 = service.new_batch_http_request()
result = service.courses().list(
pageSize=500,
pageToken=nextpageToken,
fields="nextPageToken,courses(id)"
)
batch1.add(result)
batch1.execute()# how do I get back the result? do I have to call result.execute() again?
This is the code from the docs: https://developers.google.com/classroom/guides/batch
course_id = '123456'
student_emails = ['alice#example.edu', 'bob#example.edu']
def callback(request_id, response, exception):
if exception is not None:
print 'Error adding user "{0}" to the course course: {1}'.format(
request_id, exception)
else:
print 'User "{0}" added as a student to the course.'.format(
response.get('profile').get('name').get('fullName'))
batch = service.new_batch_http_request(callback=callback)
for student_email in student_emails:
student = {
'userId': student_email
}
request = service.courses().students().create(courseId=course_id,
body=student)
batch.add(request, request_id=student_email)
batch.execute(http=http) # what is http? what can I pass there? what kind of object is that?
Thank you in advance
Did you get a response from this? How have you handled the next page tokens and added them to the batch call?
def makeRequestWithExponentialBackoff(analytics):
for n in range(0, 5):
try:
response = service_two.courses().courseWork().studentSubmissions().list(pageToken=None, courseId=v['id'],
courseWorkId='-', pageSize=100000)
print(response.get("studentSubmissions", []))
return assignments.extend(response.get("nextPageToken", []))
except:
time.sleep((2 ** n) + random.random())
continue
print("There has been an error, the request never succeeded.")
def callback(request_id, response, exception):
if exception is not None:
print('Error getting assignments "{0}" for course: "{1}"'.format(request_id, exception))
makeRequestWithExponentialBackoff(request_id)
else:
assignments.extend(response.get("studentSubmissions", []))
nextPageToken = response.get("nextPageToken", None)
if nextPageToken:
iftoken(nextPageToken, request_id)
else:
pass
for k, v in filtered.iterrows():
if count % 1000 == 0:
submit = batch.execute(http=http)
batch_count += 1
print(batch_count)
time.sleep(30)
batch_array.append({'batchSent {}'.format(v['id'])})
batch = None
batch = service_two.new_batch_http_request(callback=callback)
response = service_two.courses().courseWork().studentSubmissions().list(pageToken=None, courseId=v['id'],
courseWorkId='-', pageSize=100000)
batch.add(response, request_id=v['id'])
array.append({'email': v['primaryEmail'], 'count': count, 'classid': v['id']})
count = 1
elif count % 1000 != 0 and batch == None:
batch = service_two.new_batch_http_request(callback=callback)
response = service_two.courses().courseWork().studentSubmissions().list(pageToken=None, courseId=v['id'],
courseWorkId='-', pageSize=100000)
batch.add(response, request_id=v['id'])
array.append({'email': v['primaryEmail'], 'count': count, 'classid': v['id']})
count += 1
else:
response = service_two.courses().courseWork().studentSubmissions().list(pageToken=None, courseId=v['id'],
courseWorkId='-', pageSize=100000)
batch.add(response, request_id=v['id'])
array.append({'email': v['primaryEmail'], 'count': count, 'classid': v['id']})
count += 1
where filtered is a dataframe of classroom Ids. However got an issue with the nextpagetokens and how to add these to the dataframe?

How get info about few posts on facebook?

I need to fetch information about likes, comments and etc. from only one post object and here's the request code I send.
Example of my requests:
class StatsSN:
def init(self, fb_post_id, fb_token):
self.fb_post_id = fb_post_id
self.fb_token = fb_token
def req_stats(self, url_method):
req = requests.get(url_method)
if req.status_code != 200:
# return req.json().get('error')
# return 'error'
log.info('FB_Statistics: %s' % req.json())
return -1
return req.json().get('summary').get('total_count')
def fb_likes(self):
url_method = fb_api_url + '%s/likes?summary=true&access_token=%s' % (self.fb_post_id, self.fb_token)
return self.req_stats(url_method)
def fb_reactions(self):
url_method = fb_api_url + '%s/reactions?summary=total_count&access_token=%s' % (self.fb_post_id, self.fb_token)
return self.req_stats(url_method)
def fb_comments(self):
url_method = fb_api_url + '%s/comments?summary=true&access_token=%s' % (self.fb_post_id, self.fb_token)
return self.req_stats(url_method)
def fb_sharedposts(self):
url_method = fb_api_url + '%s/sharedposts?access_token=%s' % (self.fb_post_id, self.fb_token)
req = requests.get(url_method)
if req.status_code != 200:
log.info('FB_Statistics: %s' % req.json())
return -1
return len(req.json().get('data'))
def fb_stats(self):
fb_likes, fb_reactions, fb_comments, fb_sharedposts = self.fb_likes(), self.fb_reactions(), self.fb_comments(), \
self.fb_sharedposts()
return int(fb_likes), int(fb_reactions), int(fb_comments), int(fb_sharedposts)
Is there a method in the Graph API to get info about few posts in one request?
You can achieve it by sending a batch request; If you only need public data, a normal page token is good enough. However if you need private information, you will need a specific page token of the page post you want to get the metrics of.
As the metrics you are referring to are public, you should be able to send a GET request with following syntax:
https://graph.facebook.com/v2.12/?fields=id,comments.limit(0).summary(true),shares,reactions.limit(0).summary(true)&ids=STATUS_ID1,STATUS_ID2,STATUS_ID3,...,STATUS_ID50&access_token=PAGE_TOKEN
You can request up to 50 status id's in one call.
limit(0).summary(true)
This part you need to add with comments and reactions as it is the best practice to retrieve the total amount of comments/reactions.

Pickleing error: connot pickle Request object

I know That it is not possible to pickle a pyramid request object, but I cant seem to find where I am sending the Request object.
Consider the following:
#task
def do_consignment_task(store, agent):
print "GOTHERE IN TASK"
s = sqlahelper.get_session()
consign = store.gen_consignment()
ca = Agents.by_id(store.consignment_agents_id)
consign.consignment_agents_id = ca.id
consign.consignment_teamleader_id = ca.ou[0].lead_agents_id
consign.consignment_timestamp = func.now()
consign.created_by_agent_id = agent.id
consign.complete_stamp = func.now()
consign.sims = store.sims
consign.status = "SUCCESS"
print "GOT BEFORE LOOP "
for sim in store.sims:
if sim in consign.sims:
continue
else:
consign.sims.append(sim)
s.add(consign)
transaction.savepoint()
print "GOT AFTER SAVEPOINT"
for sim in consign.sims:
is_reconsign = sim.consignment_agent or sim.consignment_teamlead
if is_reconsign:
if not sim.consignment_history:
sim.consignment_history = []
sim.consignment_history.append(dict(
stamp=sim.consignment_timestamp,
consignment_agent_id=sim.consignment_agents_id,
consignment_teamleader_id=sim.consignment_teamleader_id,
by_agent_id=agent.id
))
s.query(
Sims
).filter(
Sims.iccid == sim.iccid
).update(
{
"consignment_agents_id": consign.consignment_agents_id,
"consignment_history": sim.consignment_history,
"consignment_teamleader_id": ca.ou[0].lead_agents_id,
"consignment_timestamp": func.now(),
"modify_stamp": func.now(),
"consignments_id": consign.id
},
synchronize_session=False
)
print "GOT BEFORE COMMIT"
transaction.savepoint()
print "THIS IS THE ID ID ID ID ID ID : ", consign.id
I call this function like:
if self.store.finalise:
try:
store = self.store
agent = self.agent
do_consignment_task.delay(store, agent)
transaction.commit()
self.check_and_purge()
return "Consignmnet is being processed"
except Exception, exc:
self.check_and_purge()
self.log.exception(exc)
exc_error = "CONSIGNERR:", exc.message
raise USSDFailure(exc_error)
else:
self.store.status = "CANCELLED"
if "fullconfirm" in self.session:
del self.session["fullconfirm"]
self.check_and_purge()
return "CONSIGNMENT Cancelled"
When I run this code I get the following error:
EncodeError: Can't pickle <class 'pyramid.util.Request'>: attribute lookup pyramid.util.Request failed
I am not sending self or request objects - at least not that I can see.
How can solve this problem? Am I sending a request object, because I can not see one?
The traceback can be seen here
EDIT:
okay So I have tried to change the data I send to the function - I am not passing a sqlalchemy object and I am making a copy of the store object, that changes my code to:
#task
def do_consignment_task(agent_id, **store):
print "GOTHERE IN TASK"
s = sqlahelper.get_session()
cObj = USSDConsignmentsObject()
consign = cObj.gen_consignment()
ca = Agents.by_id(store.consignment_agents_id)
consign.consignment_agents_id = ca.id
consign.consignment_teamleader_id = ca.ou[0].lead_agents_id
consign.consignment_timestamp = func.now()
consign.created_by_agent_id = agent_id
# etc
and:
if self.store.finalise:
try:
# del self.service
store = self.store.__dict__.copy()
agent_id = self.agent.id
print store
print agent_id
# print help(store)
do_consignment_task.delay(agent_id, **store)
transaction.commit()
#etc
This however still gives me the same error :|
Try not to serialise a Pyramid request object. When you interact with a celery task you should think of it as an independent process.
Provide it all the information it needs to do it's work. Be aware that you need to serialise that information.
So self.store possibly contains attribute references that may be unrealistic to serialise.
Perhaps create a method on the store object that returns a clean dictionary object.
def serialize(self):
data = {}
data["element1"] = self.element1
data["element2"] = self.element2
data["element3"] = self.element3
return data
Then when you want to call the delay method make sure to use store.serialize() instead of store or the dict.

Categories