I need to change the default AccountId 123456789012 to a different value.
I tried this fixture:
#pytest.fixture(autouse=True):
def sts(monkeypatch):
import moto.iam.models as models
monkeypatch.setattr(models,'ACCOUNT_ID','111111111111')
from moto import mock_sts
with mock_sts():
sts=boto3.client('sts',region_name='us-east-1')
assert(sts.get_caller_identity().get('Account')=='111111111111')
yield sts
But that assert fails, the AccountId is still the default...
The code included hardcoded account ids in a bunch of places. The code currently on git lets that be overwritten by setting the environment variable MOTO_ACCOUNT_ID, no need to monkey patch.
Based on #FábioDias's excellent hint about the environment variable 'MOTO_ACCOUNT_ID', we can combine this with moto getting started as follow:
#pytest.fixture(autouse=True)
def aws_credentials(monkeypatch):
"""Mocked AWS credentials to prevent production accidents"""
monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'testing')
monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'testing')
monkeypatch.setenv('AWS_SECURITY_TOKEN', 'testing')
monkeypatch.setenv('AWS_SESSION_TOKEN', 'testing')
monkeypatch.setenv('AWS_DEFAULT_REGION', 'us-east-1')
monkeypatch.setenv('MOTO_ACCOUNT_ID', '111111111111')
# see note 2022-06-03
# since moto deals with ACCOUNT_ID in a "set-once" manner, we need
# to reload moto and all of its sub-modules
importlib.reload(sys.modules['moto'])
to_reload = [m for m in sys.modules if m.startswith('moto.')]
for m in to_reload:
importlib.reload(sys.modules[m])
#pytest.fixture(scope='function')
def s3(aws_credentials):
with mock_s3():
yield boto3.client('s3', region_name='us-east-1')
This seems to work in all cases I've tested so far.
Note 2022-06-03
As of now (moto.__version__ == '3.1.12dev'), moto still sets ACCOUNT_ID once and for all, and then copies it in various sub-modules. The only way I found to force-change the account ID everywhere is by reloading moto first (so that it assigns the new ACCOUNT_ID in moto.core.models) and then all sub-modules already loaded.
Related
I have a very simple Google Cloud Function written in Python and it makes a reference to Google's Secret manager via their Python library.
The code is very simple and it looks like this:
import os
from google.cloud import secretmanager
import logging
client = secretmanager.SecretManagerServiceClient()
secret_name = "my-secret"
project_id = os.environ.get('GCP_PROJECT')
resource_name = "projects/{}/secrets/{}/versions/latest".format(project_id, secret_name)
response = client.access_secret_version(resource_name)
secret_string = response.payload.data.decode('UTF-8')
def new_measures_handler(data, context):
logging.info(secret_string)
print('File: {}.'.format(data['name']))
and then I have my simple unit test which is trying to take advantage of monkey patching:
import main
def test_print(capsys, monkeypatch):
# arrange
monkeypatch.setenv("GCP_PROJECT", "TestingUser")
monkeypatch.setattr(secretmanager, "SecretManagerServiceClient", lambda: 1)
name = 'test'
data = {'name': name}
# act
main.new_measures_handler(data, None)
out, err = capsys.readouterr()
#assert
assert out == 'File: {}.\n'.format(name)
Everything goes well with the mock for the environment variable but I can not mock secretmanager. It keeps on trying to call the actual API. My ultimate goal is to mock secretmanager.SecretManagerServiceClient() and make it return an object which later on can be used by: client.access_secret_version(resource_name) (which I will need to mock as well, I think)
See my answer to this question for a working example of using unittest patching and mocking to mock Google API calls and return mock results:
How to Mock a Google API Library with Python 3.7 for Unit Testing
Say I want to mock the following:
session = boto3.Session(profile_name=profile)
resource = session.resource('iam')
iam_users = resource.users.all()
policies = resource.policies.filter(Scope='AWS', OnlyAttached=True, PolicyUsageFilter='PermissionsPolicy')
How do I go about starting to mock this with in pytest? I could create mocked objects by creating a dummy class and the necessary attributes, but I suspect that's the wrong approach.
Some additional details, here's what I'm trying to test out:
def test_check_aws_profile(self, mocker):
mocked_boto3 = mocker.patch('myapp.services.utils.boto3.Session')
mocker.patch(mocked_boto3.client.get_caller_identity.get, return_value='foo-account-id')
assert 'foo-account-id' == my_func('foo')
#in myapp.services.utils.py
def my_func(profile):
session = boto3.Session(profile_name=profile)
client = session.client('sts')
aws_account_number = client.get_caller_identity().get('Account')
return aws_account_number
But I can't quite seem to be able to get this patched correctly. I'm trying to make it so that I can patch session and the function calls in that method
I tried using moto and got this:
#mock_sts
def test_check_aws_profile(self):
session = boto3.Session(profile_name='foo')
client = session.client('sts')
client.get_caller_identity().get('Account')
But I'm running into
> raise ProfileNotFound(profile=profile_name)
E botocore.exceptions.ProfileNotFound: The config profile (foo) could not be found
So it seems like it's not mocking anything :|
Edit:
Turns out you need to have the mocked credentials in a config and credentials file for this to work.
If you want to use moto, you can use the AWS_SHARED_CREDENTIALS_FILE environment variable, to point it to a dummy credentials file which can be kept in the tests folder.
You can define your profiles there. Example:
Files: test_stuff.py. dummy_aws_credentials
test_stuff.py:
import os
from pathlib import Path
import boto3
import pytest
from moto import mock_sts
#pytest.fixture(scope='module')
def aws_credentials():
"""Mocked AWS Credentials for moto."""
moto_credentials_file_path = Path(__file__).parent.absolute() / 'dummy_aws_credentials'
os.environ['AWS_SHARED_CREDENTIALS_FILE'] = str(moto_credentials_file_path)
#mock_sts
def test_check_aws_profile(aws_credentials):
session = boto3.Session(profile_name='foo')
client = session.client('sts')
client.get_caller_identity().get('Account')
dummy_aws_credentials:
[foo]
aws_access_key_id = mock
aws_secret_access_key = mock
I'm not sure what exactly you want, so I'll give you something to start.
You let unittest.mock to mock everything for you, for example. (Useful reading: https://docs.python.org/3/library/unittest.mock.html)
module.py:
import boto3
def function():
session = boto3.Session(profile_name="foobar")
client = session.resource("sts")
return client.get_caller_identity().get('Account')
test_module.py:
from unittest.mock import patch
import module
#patch("module.boto3") # this creates mock which is passed to test below
def test_function(mocked_boto):
# mocks below are magically created by unittest.mock when they are accessed
mocked_session = mocked_boto.Session()
mocked_client = mocked_session.resource()
mocked_identity = mocked_client.get_caller_identity()
# now mock the return value of .get()
mocked_identity.get.return_value = "foo-bar-baz"
result = module.function()
assert result == "foo-bar-baz"
# we can make sure mocks were called properly, for example
mocked_identity.get.assert_called_once_with("Account")
Results of pytest run:
$ pytest
================================ test session starts ================================
platform darwin -- Python 3.7.6, pytest-5.3.2, py-1.8.1, pluggy-0.13.1
rootdir: /private/tmp/one
collected 1 item
test_module.py . [100%]
================================= 1 passed in 0.09s =================================
I would also recommend to install pytest-socket and run pytest --disable-socket to make sure your tests do not talk with network by accident.
Although there is nothing wrong with manually patching boto using mock.patch, you could also consider using a higher level testing utility like moto.
I have a question about how to mock a nested method and test what it was called with. I'm having a hard time getting my head around: https://docs.python.org/3/library/unittest.mock-examples.html#mocking-chained-calls.
I'd like to test that the "put" method from the fabric library is called by the deploy_file method in this class, and maybe what values are given to it. This is the module that gathers some information from AWS and provides a method to take action on the data.
import json
import os
from aws.secrets_manager import get_secret
from fabric import Connection
class Deploy:
def __init__(self):
self.secrets = None
self.set_secrets()
def set_secrets(self):
secrets = get_secret()
self.secrets = json.loads(secrets)
def deploy_file(self, source_file):
with Connection(host=os.environ.get('SSH_USERNAME'), user=os.environ.get("SSH_USERNAME")) as conn:
destination_path = self.secrets["app_path"] + '/' + os.path.basename(source_file)
conn.put(source_file, destination_path)
"get_secret" is a method in another module that uses the boto3 library to get the info from AWS.
These are the tests I'm working on:
from unittest.mock import patch
from fabric import Connection
from jobs.deploy import Deploy
def test_set_secrets_dict_from_expected_json_string():
with patch('jobs.deploy.get_secret') as m_get_secret:
m_get_secret.return_value = '{"app_path": "/var/www/html"}'
deployment = Deploy()
assert deployment.secrets['app_path'] == "/var/www/html"
def test_copy_app_file_calls_fabric_put():
with patch('jobs.deploy.get_secret') as m_get_secret:
m_get_secret.return_value = '{"app_path": "/var/www/html"}'
deployment = Deploy()
with patch('jobs.deploy.Connection', spec=Connection) as m_conn:
local_file_path = "/tmp/foo"
deployment.deploy_file(local_file_path)
m_conn.put.assert_called_once()
where the second test results in "AssertionError: Expected 'put' to have been called once. Called 0 times."
the first test mocks the "get_secret" function just fine to test that the constructor for "Deploy" sets "Deploy.secrets" from the fake AWS data.
In the second test, get_secrets is mocked just as before, and I mock "Connection" from the fabric library. If I don't mock Connection, I get an error related to the "host" parameter when the Connection object is created.
I think that when "conn.put" is called its creating a whole new Mock object and I'm not testing that object when the unittest runs. I'm just not sure how to define the test to actually test the call to put.
I'm also a novice at understanding what to test (and how) and what not to test as well as how to use mock and such. I'm fully bought in on the idea though. It's been very helpful to find bugs and regressions as I work on projects.
How to retrieve Test Results from VSTS (Azure DevOps) by using Python REST API?
Documentation is (as of today) very light, and even the examples in the dedicated repo of the API examples are light (https://github.com/Microsoft/azure-devops-python-samples).
For some reasons, the Test Results are not considered as WorkItems so a regular WIQL query would not work.
Additionally, it would be great to query the results for a given Area Path.
Thanks
First you need to get the proper connection client with the client string that matches the test results.
from vsts.vss_connection import VssConnection
from msrest.authentication import BasicAuthentication
token = "hcykwckuhe6vbnigsjs7r3ai2jefsdlkfjslkfj5mxizbtfu6k53j4ia"
team_instance = "https://tfstest.toto.com:8443/tfs/Development/"
credentials = BasicAuthentication("", token)
connection = VssConnection(base_url=team_instance, creds=credentials)
TEST_CLIENT = "vsts.test.v4_1.test_client.TestClient"
test_client = connection.get_client(TEST_CLIENT)
Then, you can have a look at all the functions available in: vsts/test/<api_version>/test_client.py"
The following functions look interesting:
def get_test_results(self, project, run_id, details_to_include=None, skip=None, top=None, outcomes=None) (Get Test Results for a run based on filters)
def get_test_runs(self, project, build_uri=None, owner=None, tmi_run_id=None, plan_id=None, include_run_details=None, automated=None, skip=None, top=None)
def query_test_runs(self, project, min_last_updated_date, max_last_updated_date, state=None, plan_ids=None, is_automated=None, publish_context=None, build_ids=None, build_def_ids=None, branch_name=None, release_ids=None, release_def_ids=None, release_env_ids=None, release_env_def_ids=None, run_title=None, top=None, continuation_token=None) (although this function has a limitation of 7 days range between min_last_updated_date and max_last_updated_date
To retrieve all the results from the Test Plans in a given Area Path, I have used the following code:
tp_query = Wiql(query="""
SELECT
[System.Id]
FROM workitems
WHERE
[System.WorkItemType] = 'Test Plan'
AND [Area Path] UNDER 'Development\MySoftware'
ORDER BY [System.ChangedDate] DESC""")
for plan in wit_client.query_by_wiql(tp_query).work_items:
print(f"Results for {plan.id}")
for run in test_client.get_test_runs(my_project, plan_id = plan.id):
for res in test_client.get_test_results(my_project, run.id):
tc = res.test_case
print(f"#{run.id}. {tc.name} ({tc.id}) => {res.outcome} by {res.run_by.display_name} in {res.duration_in_ms}")
Note that a test result includes the following attributes:
duration_in_ms
build
outcome (string)
associated_bugs
run_by (Identity)
test_case (TestCase)
test_case_title (string)
area (AreaPath)
Test_run, corresponding to the test run
test_suite
test_plan
completed_date (Python datetime object)
started_date ( Python datetime object)
configuration
Hope it can help others save the number of hours I spent exploring this API.
Cheers
I am doing unit test with python mock. I've gone through blogs and python docs related to mocking but confuse about mocking the test case.
Here is the snippet for which I want to write test case.
The agenda is to test the method "set_contents_from_string()" using mock.
def write_to_customer_registry(customer):
# establish a connection with S3
conn = _connect_to_s3()
# build customer registry dict and convert it to json
customer_registry_dict = json.dumps(build_customer_registry_dict(customer))
# attempt to access requested bucket
bucket = _get_customer_bucket(conn)
s3_key = _get_customer_key(bucket, customer)
s3_key.set_metadata('Content-Type', 'application/json')
s3_key.set_contents_from_string(customer_registry_dict)
return s3_key
As you are testing some private methods I have added them to a module which I called s3.py that contains your code:
import json
def _connect_to_s3():
raise
def _get_customer_bucket(conn):
raise
def _get_customer_key(bucket, customer):
raise
def build_customer_registry_dict(cust):
raise
def write_to_customer_registry(customer):
# establish a connection with S3
conn = _connect_to_s3()
# build customer registry dict and convert it to json
customer_registry_dict = json.dumps(build_customer_registry_dict(customer))
# attempt to access requested bucket
bucket = _get_customer_bucket(conn)
s3_key = _get_customer_key(bucket, customer)
s3_key.set_metadata('Content-Type', 'application/json')
s3_key.set_contents_from_string(customer_registry_dict)
return s3_key
Next, in another module test_s3.py, I tested your code taking into account that for Unit Tests all interactions with third parties, such as network calls to s3 should be patched:
from unittest.mock import MagicMock, Mock, patch
from s3 import write_to_customer_registry
import json
#patch('json.dumps', return_value={})
#patch('s3._get_customer_key')
#patch('s3.build_customer_registry_dict')
#patch('s3._get_customer_bucket')
#patch('s3._connect_to_s3')
def test_write_to_customer_registry(connect_mock, get_bucket_mock, build_customer_registry_dict_mock, get_customer_key_mock, json_mock):
customer = MagicMock()
connect_mock.return_value = 'connection'
get_bucket_mock.return_value = 'bucket'
get_customer_key_mock.return_value = MagicMock()
write_to_customer_registry(customer)
assert connect_mock.call_count == 1
assert get_bucket_mock.call_count == 1
assert get_customer_key_mock.call_count == 1
get_bucket_mock.assert_called_with('connection')
get_customer_key_mock.assert_called_with('bucket', customer)
get_customer_key_mock.return_value.set_metadata.assert_called_with('Content-Type', 'application/json')
get_customer_key_mock.return_value.set_contents_from_string.assert_called_with({})
As you can see from the tests I am not testing that set_contents_from_string is doing what is supposed to do (since that should already be tested by the boto library) but that is being called with the proper arguments.
If you still doubt that the boto library is not properly testing such call you can always check it yourself in boto Github or boto3 Github
Something else you could test is that your are handling the different exceptions and edge cases in your code properly.
Finally, you can find more about patching and mocking in the docs. Usually the section about where to patch is really useful.
Some other resources are this blog post with python mock gotchas or this blog post I wrote myself (shameless self plug) after answering related pytest, patching and mocking questions in Stackoverflow.
came up with solution that worked for me, Posting it here, may be helpful for someone.
def setup(self):
self.customer = Customer.objects.create('tiertranstests')
self.customer.save()
def test_build_customer_registry(self):
mock_connection = Mock()
mock_bucket = Mock()
mock_s3_key = Mock()
customer_registry_dict = json.dumps(build_customer_registry_dict(self.customer))
# Patch S3 connection and Key class of registry method
with patch('<path>.customer_registry.S3Connection', Mock(return_value=mock_connection)),\
patch('<path>.customer_registry.Key', Mock(return_value=mock_s3_key)):
mock_connection.get_bucket = Mock(return_value=mock_bucket)
mock_s3_key.set_metadata.return_value = None
mock_s3_key.set_contents_from_string = Mock(return_value=customer_registry_dict)
write_to_customer_registry(self.customer)
mock_s3_key.set_contents_from_string.assert_called_once_with(customer_registry_dict)