I have two test defined using django-rest-framework test library:
def test_read_site_API(self):
"""
Read a Site trough API
"""
self.client.login(email='test#mail.org', password='testing' )
response = self.client.get('/xos/sites/', format='json')
parsed = json.loads(response.content)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(parsed), 1)
self.assertEqual(parsed[0]['login_base'], 'test_')
def test_create_site_API(self):
"""
Create a Site trough API
"""
data = {
'name': "Another Test Site",
'login_base': "another_test_",
'location': [10, 20],
'abbreviated_name': 'test'
}
self.client.login(email='test#mail.org', password='testing' )
response = self.client.post('/xos/sites/', data, format='json')
print(response.content)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Site.objects.count(), 2)
self.assertEqual(Site.objects.get(name="Another Test Site").count(), 1)
If I run only the first test it is working.
If I run both tests the result is:
======================================================================
ERROR: test_read_site_API (core.tests.SiteTestAPI)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/opt/xos/core/tests.py", line 75, in test_read_site_API
self.client.login(email='test#mail.org', password='testing' )
File "/usr/local/lib/python2.7/site-packages/django/test/client.py", line 563, in login
login(request, user)
File "/usr/local/lib/python2.7/site- packages/django/contrib/auth/__init__.py", line 102, in login
user_logged_in.send(sender=user.__class__, request=request, user=user)
File "/usr/local/lib/python2.7/site- packages/django/dispatch/dispatcher.py", line 198, in send
response = receiver(signal=self, sender=sender, **named)
File "/opt/xos/core/admin.py", line 1908, in cache_credentials
auth = {'username': request.POST['username'],
KeyError: 'username'
Any idea on what can be happening?
Thanks in advance
Related
I am trying to use Reddit's developer API to build a simple scraper that grabs posts and their replies in a target subreddit and produces JSON with the information.
I am getting a 404 error that I don't understand.
This is my code:
import praw
import json
def scrape(subreddit, limit):
r = praw.Reddit(user_agent='Reddit data organizer 1.0 by /u/reallymemorable', client_id='none of your business', client_secret='none of your business')
submissions = r.subreddit(subreddit).get_hot(limit=limit)
for submission in submissions:
data = {}
data['title'] = submission.title
data['score'] = submission.score
data['url'] = submission.url
data['author'] = str(submission.author)
data['subreddit'] = str(submission.subreddit)
data['num_comments'] = submission.num_comments
data['over_18'] = submission.over_18
data['selftext'] = submission.selftext
data['is_self'] = submission.is_self
data['name'] = submission.name
data['created_utc'] = submission.created_utc
data['permalink'] = submission.permalink
data['domain'] = submission.domain
data['id'] = submission.id
data['kind'] = submission.kind
json.dumps(data)
scrape('https://www.reddit.com/r/funny/', 25)
When I run it, I get this:
reallymemorable#Christians-MBP Desktop % python3 fetch-data-subreddit.py
Traceback (most recent call last):
File "/Users/reallymemorable/Desktop/fetch-data-subreddit.py", line 26, in <module>
scrape('https://www.reddit.com/r/augmentedreality/comments/yv7sn8/ar_maximum_distance/', 25)
File "/Users/reallymemorable/Desktop/fetch-data-subreddit.py", line 6, in scrape
submissions = r.subreddit(subreddit).get_hot(limit=limit)
File "/opt/homebrew/lib/python3.9/site-packages/praw/models/reddit/base.py", line 34, in __getattr__
self._fetch()
File "/opt/homebrew/lib/python3.9/site-packages/praw/models/reddit/subreddit.py", line 583, in _fetch
data = self._fetch_data()
File "/opt/homebrew/lib/python3.9/site-packages/praw/models/reddit/subreddit.py", line 580, in _fetch_data
return self._reddit.request(method="GET", params=params, path=path)
File "/opt/homebrew/lib/python3.9/site-packages/praw/util/deprecate_args.py", line 43, in wrapped
return func(**dict(zip(_old_args, args)), **kwargs)
File "/opt/homebrew/lib/python3.9/site-packages/praw/reddit.py", line 941, in request
return self._core.request(
File "/opt/homebrew/lib/python3.9/site-packages/prawcore/sessions.py", line 330, in request
return self._request_with_retries(
File "/opt/homebrew/lib/python3.9/site-packages/prawcore/sessions.py", line 266, in _request_with_retries
raise self.STATUS_EXCEPTIONS[response.status_code](response)
prawcore.exceptions.NotFound: received 404 HTTP response
r.subreddit(subreddit) - subreddit should just be the name of the subreddit e.g. "funny" and not the full URL.
See the docs here: https://praw.readthedocs.io/en/stable/getting_started/quick_start.html#obtain-a-subreddit
I've been trying to add unit tests to my AWS scripts. I've been using botocore.stub to stub the API calls.
I needed to add pagination to various calls, and I can't seem to find a way to write the tests to include pagination.
Here's an example of the non-paginated test, I'm wondering how I can refactor this test and function to use pagination:
# -*- coding: utf-8 -*-
import unittest
import boto3
from botocore.stub import Stubber
from datetime import datetime
def describe_images(client, repository):
return client.describe_images(repositoryName=repository)
class TestCase(unittest.TestCase):
def setUp(self):
self.client = boto3.client('ecr')
def test_describe_images(self):
describe_images_response = {
'imageDetails': [
{
'registryId': 'string',
'repositoryName': 'string',
'imageDigest': 'string',
'imageTags': [
'string',
],
'imageSizeInBytes': 123,
'imagePushedAt': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
stubber = Stubber(self.client)
expected_params = {'repositoryName': 'repo_name'}
stubber.add_response(
'describe_images',
describe_images_response,
expected_params
)
with stubber:
response = describe_images(self.client, 'repo_name')
self.assertEqual(describe_images_response, response)
if __name__ == '__main__':
unittest.main()
If I update the function to include pagination like this:
def describe_images(client, repository):
paginator = client.get_paginator('describe_images')
response_iterator = paginator.paginate(
repositoryName=repository
)
return response_iterator
we seem to be getting somewhere. The test fails as it should as equality has changed:
F
======================================================================
FAIL: test_describe_images (__main__.TestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "desc_imgs_paginated.py", line 47, in test_describe_images
self.assertEqual(describe_images_response, response)
AssertionError: {'imageDetails': [{'registryId': 'string'[178 chars]ing'} != <botocore.paginate.PageIterator object at 0x1058649b0>
----------------------------------------------------------------------
Ran 1 test in 0.075s
FAILED (failures=1)
When I try to iterate over the generator::
def describe_images(client, repository):
paginator = client.get_paginator('describe_images')
response_iterator = paginator.paginate(
repositoryName=repository
)
return [r for r in response_iterator]
I get the following error:
E
======================================================================
ERROR: test_describe_images (__main__.TestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "desc_imgs_paginated.py", line 45, in test_describe_images
response = describe_images(self.client, repo_name)
File "desc_imgs_paginated.py", line 14, in describe_images
return '.join([r for r in response_iterator])
File "desc_imgs_paginated.py", line 14, in <listcomp>
return '.join([r for r in response_iterator])
File "lib/python3.6/site-packages/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "lib/python3.6/site-packages/botocore/paginate.py", line 332, in _make_request
return self._method(**current_kwargs)
File "lib/python3.6/site-packages/botocore/client.py", line 312, in _api_call
return self._make_api_call(operation_name, kwargs)
File "lib/python3.6/site-packages/botocore/client.py", line 579, in _make_api_call
api_params, operation_model, context=request_context)
File "lib/python3.6/site-packages/botocore/client.py", line 631, in _convert_to_request_dict
params=api_params, model=operation_model, context=context)
File "lib/python3.6/site-packages/botocore/hooks.py", line 227, in emit
return self._emit(event_name, kwargs)
File "lib/python3.6/site-packages/botocore/hooks.py", line 210, in _emit
response = handler(**kwargs)
File "lib/python3.6/site-packages/botocore/stub.py", line 337, in _assert_expected_params
self._assert_expected_call_order(model, params)
File "lib/python3.6/site-packages/botocore/stub.py", line 323, in _assert_expected_call_order
pformat(params)))
botocore.exceptions.StubResponseError: Error getting response stub for operation DescribeImages: Unexpected API Call: called with parameters:
{nextToken: string, repositoryName: repo_name}
----------------------------------------------------------------------
Ran 1 test in 0.051s
FAILED (errors=1)
Am I missing the correct approach to testing this? or is this a bug in boto3/botocore?
It's been a while since this question was asked but since there isn't an answer ..
In your set up you provide a response dictionary as below
describe_images_response = {
'imageDetails': [
{
'registryId': 'string',
'repositoryName': 'string',
'imageDigest': 'string',
'imageTags': [
'string',
],
'imageSizeInBytes': 123,
'imagePushedAt': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
The key here is that the first response will include a nextToken value. This will result in a second request from the paginator. So you have to provide an additional response for the stub, ultimately you need to end with a response the does not include a nextToken
Now looking back at you set up, there is only a single add_response call to the stubber
stubber.add_response(
'describe_images',
describe_images_response,
expected_params
)
The net result in that when the paginator makes the second request, there is not response specified in the setup.
This results in the exception, the message on which hopefully now makes more sense
botocore.exceptions.StubResponseError: Error getting response stub for operation DescribeImages: Unexpected API Call: called with parameters:
{nextToken: string, repositoryName: repo_name}
Since the second response hasn't been set up, you get an exception with the request that was unexpected, in this request you can see the specification of the nextToken parameter.
I have validator in the model. It does validation and then validated data will be saved to database.
Problem:
Validation and commit to database takes very long time.
Therefore I decided to use Django-rq to handle the time consuming task
views.py
def save_serializer(serializer, request):
serializer.save()
if bool(serializer.errors):
# If it has no errors it will be empty dictionary and bool({}) is False
msg = serializer.errors
else:
msg = serializer.data
email = EmailMessage(
'Message From jobs.siamsbrand.com',
msg,
settings.C0D1UM_SENDER,
[request.user.email]
)
email.send()
class PriceListExcelFileList(generics.ListCreateAPIView):
permission_classes = (DRYPermissions,)
queryset = PriceListExcelFile.objects.all()
serializer_class = PriceListExcelFileSerializer
def perform_create(self, serializer, request):
"""
:param serializer:
:return:
"""
django_rq.enqueue(save_serializer, serializer, request)
def create(self, request, *args, **kwargs):
"""
I need to override the default behaviour because I am going to use rq and let it be an email notification
:param request:
:param args:
:param kwargs:
:return:
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer, request)
headers = self.get_success_headers(serializer.data)
data = {
"id": "Processing",
"file": "The response will be email to " + str(request.user.email) + " shortly",
"permission": "-"
}
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
Error Messages:
File "/Users/el/Code/siam-sbrand/portal/apps/price_list_excel_files/views.py", line 52, in create
self.perform_create(serializer, request)
File "/Users/el/Code/siam-sbrand/portal/apps/price_list_excel_files/views.py", line 40, in perform_create
django_rq.enqueue(save_serializer, serializer, request)
File "/Users/el/.pyenv/versions/siam-sbrand/lib/python3.6/site-packages/django_rq/queues.py", line 208, in enqueue
return get_queue().enqueue(func, *args, **kwargs)
File "/Users/el/.pyenv/versions/siam-sbrand/lib/python3.6/site-packages/rq/queue.py", line 274, in enqueue
job_id=job_id, at_front=at_front, meta=meta)
File "/Users/el/.pyenv/versions/siam-sbrand/lib/python3.6/site-packages/django_rq/queues.py", line 60, in enqueue_call
return self.original_enqueue_call(*args, **kwargs)
File "/Users/el/.pyenv/versions/siam-sbrand/lib/python3.6/site-packages/django_rq/queues.py", line 56, in original_enqueue_call
return super(DjangoRQ, self).enqueue_call(*args, **kwargs)
File "/Users/el/.pyenv/versions/siam-sbrand/lib/python3.6/site-packages/rq/queue.py", line 227, in enqueue_call
job = self.enqueue_job(job, at_front=at_front)
File "/Users/el/.pyenv/versions/siam-sbrand/lib/python3.6/site-packages/rq/queue.py", line 292, in enqueue_job
job.save(pipeline=pipe)
File "/Users/el/.pyenv/versions/siam-sbrand/lib/python3.6/site-packages/rq/job.py", line 465, in save
connection.hmset(key, self.to_dict())
File "/Users/el/.pyenv/versions/siam-sbrand/lib/python3.6/site-packages/rq/job.py", line 429, in to_dict
obj['data'] = self.data
File "/Users/el/.pyenv/versions/siam-sbrand/lib/python3.6/site-packages/rq/job.py", line 227, in data
self._data = dumps(job_tuple)
TypeError: cannot serialize '_io.BufferedReader' object
"POST /api/price-list-excel-files/ HTTP/1.1" 500 20430
It can handle plain function not an instance. Then I have to use thread to do long time consuming task
I'm trying to write a script to programmatically login to Google Finance, view my portfolio and then display results on my desktop. I'm using the requests module, currently stuck on the 'login' part.
I keep getting this error requests.cookies.CookieConflictError: There are multiple cookies with name, 'APISID'
Here is the entire script, the error throws on line 48. I'm guessing it has something to do with requests keep-alive and the connection isn't recycling properly?
#!/usr/bin/env python
import getpass
import re
import requests
email = raw_input("Enter your Google username: ")
password = getpass.getpass("Enter your password: ")
session = requests.Session()
# Define URLs
login_page_url = 'https://accounts.google.com/ServiceLogin?passive=true&service=finance'
authenticate_url = 'https://accounts.google.com/ServiceLoginAuth?service=finance'
gf_home_page_url = 'http://www.google.com/finance/portfolio'
login_page_contents = session.get(login_page_url).text
# Find GALX value
galx_match_obj = re.search(r'name="GALX"\s*value="([^"]+)"', login_page_contents, re.IGNORECASE)
galx_value = galx_match_obj.group(1) if galx_match_obj.group(1) is not None else ''
# Find DSH value
dsh_match_obj = re.search(r'id="dsh"\s*value="([^"]+)"', login_page_contents, re.IGNORECASE)
dsh_value = dsh_match_obj.group(1) if dsh_match_obj.group(1) is not None else ''
# Set up login credentials
login_params = {
'Email': email,
'Passwd': password,
'continue': 'http://www.google.com/finance/portfolio',
'followup': 'http://www.google.com/finance/portfolio',
'service': 'finance',
'GALX': galx_value,
'pstMsg': 0,
'dnConn': '',
'checkConnection': '',
'timeStmp': '',
'secTok': '',
'bgresponse': 'js_disabled',
'PersistentCookie': 'yes'
}
print galx_value
print dsh_value
# Login
r = session.post(authenticate_url, params=login_params) # <- Error thrown here
print r.text
exit
Traceback:
Traceback (most recent call last):
File "crawl.py", line 48, in <module>
r = session.post(authenticate_url, params=login_params)
File "/Users/nathan/Development/Scripts/google-finance-crawler/requests/sessions.py", line 358, in post
return self.request('POST', url, data=data, **kwargs)
File "/Users/nathan/Development/Scripts/google-finance-crawler/requests/sessions.py", line 312, in request
resp = self.send(prep, **send_kwargs)
File "/Users/nathan/Development/Scripts/google-finance-crawler/requests/sessions.py", line 426, in send
history = [resp for resp in gen] if allow_redirects else []
File "/Users/nathan/Development/Scripts/google-finance-crawler/requests/sessions.py", line 163, in resolve_redirects
resp.cookies.update(cookiejar)
File "/usr/local/Cellar/python/2.7.3/Frameworks/Python.framework/Versions/2.7/lib/python2.7/_abcoll.py", line 494, in update
self[key] = other[key]
File "/Users/nathan/Development/Scripts/google-finance-crawler/requests/cookies.py", line 246, in __getitem__
return self._find_no_duplicates(name)
File "/Users/nathan/Development/Scripts/google-finance-crawler/requests/cookies.py", line 285, in _find_no_duplicates
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
requests.cookies.CookieConflictError: There are multiple cookies with name, 'APISID'
It's a bug in requests, see issue 1189.
The current proposed fix is to simply delete line 163 of requests/sessions.py:
resp.cookies.update(cookiejar)
I want to write a testcase for sending post data to login page. It does not work. I post my code here and wish you can help me. Thanks.
def setUp(self):
"""set up"""
un = 'abc#gmail.com'
pw = '123'
self.user = User.objects.create_user(un, un)
self.user.is_staff = True
self.user.is_superuser = True
self.user.firstname = "John"
self.user.lastname = "Smith"
self.user.password = '123'
self.user.save()
print '*** password: ', self.user.password
def testPost(self):
"""test POST requests"""
post_data = {
'email': 'abc#gmail.com',
'password': '123',
}
response = self.client.post(reverse('myapp_home', post_data))
print response.status_code
The error output is at below.
ERROR: testPost (submngr.tests.model_tests.model_tests.FormsTestCase)
test POST requests
----------------------------------------------------------------------
Traceback (most recent call last):
File "tests/model_tests/model_tests.py", line 117, in testPost
response = self.client.post('/', post_data)
File "/usr/local/lib/python2.7/dist-packages/django/test/client.py", line 449, in post
response = super(Client, self).post(path, data=data, content_type=content_type, **extra)
File "/usr/local/lib/python2.7/dist-packages/django/test/client.py", line 262, in post
return self.request(**r)
File "/usr/local/lib/python2.7/dist-packages/django/core/handlers/base.py", line 111, in get_response
response = callback(request, *callback_args, **callback_kwargs)
File "views.py", line 84, in homepage
print results[0].check_password(form.cleaned_data['password'])
File "/usr/local/lib/python2.7/dist-packages/django/contrib/auth/models.py", line 304, in check_password
return check_password(raw_password, self.password, setter)
File "/usr/local/lib/python2.7/dist-packages/django/contrib/auth/hashers.py", line 42, in check_password
hasher = get_hasher(algorithm)
File "/usr/local/lib/python2.7/dist-packages/django/contrib/auth/hashers.py", line 115, in get_hasher
"setting?" % algorithm)
ValueError: Unknown password hashing algorithm '123'. Did you specify it in the PASSWORD_HASHERS setting?
You have directly stored the user passowrd as a plain string self.user.password = 123 but django stores user passwords using hashing algorithm that is why you are receiving the error. You can set user password by using set_password method of user which will apply hashing algorithm before saving it:
user.set_password('123')
user.save()