Need help creating a VM in GCP using Python - python

I'm working on a project to automate the deployment of VMs in GCP using Python. I recently figured out how to create a custom image using Python and I'm now at the step where I need to create the VM. I have the example template from the Google documentation but I'm stuck on a particular method and don't understand the argument that it wants.
I can successfully get the image from the family, create and attached disk from the image, but when I get to create_instance function I'm unsure of how it wants me to reference the disk. disks: List[compute_v1.AttachedDisk]. I keep getting google.cloud.compute.v1.Instance.disks is not iterable when I try and specify the name or path.
Any help or guidance is appreciated.
import re
import sys
from typing import Any, List
import warnings
from google.api_core.extended_operation import ExtendedOperation
from google.cloud import compute_v1
def get_image_from_family(project: str, family: str) -> compute_v1.Image:
"""
Retrieve the newest image that is part of a given family in a project.
Args:
project: project ID or project number of the Cloud project you want to get image from.
family: name of the image family you want to get image from.
Returns:
An Image object.
"""
image_client = compute_v1.ImagesClient()
# List of public operating system (OS) images: https://cloud.google.com/compute/docs/images/os-details
newest_image = image_client.get_from_family(project=project, family=family)
return newest_image
def disk_from_image(
disk_type: str,
disk_size_gb: int,
boot: bool,
source_image: str,
auto_delete: bool = True,
) -> compute_v1.AttachedDisk:
"""
Create an AttachedDisk object to be used in VM instance creation. Uses an image as the
source for the new disk.
Args:
disk_type: the type of disk you want to create. This value uses the following format:
"zones/{zone}/diskTypes/(pd-standard|pd-ssd|pd-balanced|pd-extreme)".
For example: "zones/us-west3-b/diskTypes/pd-ssd"
disk_size_gb: size of the new disk in gigabytes
boot: boolean flag indicating whether this disk should be used as a boot disk of an instance
source_image: source image to use when creating this disk. You must have read access to this disk. This can be one
of the publicly available images or an image from one of your projects.
This value uses the following format: "projects/{project_name}/global/images/{image_name}"
auto_delete: boolean flag indicating whether this disk should be deleted with the VM that uses it
Returns:
AttachedDisk object configured to be created using the specified image.
"""
boot_disk = compute_v1.AttachedDisk()
initialize_params = compute_v1.AttachedDiskInitializeParams()
initialize_params.source_image = source_image
initialize_params.disk_size_gb = disk_size_gb
initialize_params.disk_type = disk_type
boot_disk.initialize_params = initialize_params
# Remember to set auto_delete to True if you want the disk to be deleted when you delete
# your VM instance.
boot_disk.auto_delete = auto_delete
boot_disk.boot = boot
return boot_disk
def wait_for_extended_operation(
operation: ExtendedOperation, verbose_name: str = "operation", timeout: int = 300
) -> Any:
"""
This method will wait for the extended (long-running) operation to
complete. If the operation is successful, it will return its result.
If the operation ends with an error, an exception will be raised.
If there were any warnings during the execution of the operation
they will be printed to sys.stderr.
Args:
operation: a long-running operation you want to wait on.
verbose_name: (optional) a more verbose name of the operation,
used only during error and warning reporting.
timeout: how long (in seconds) to wait for operation to finish.
If None, wait indefinitely.
Returns:
Whatever the operation.result() returns.
Raises:
This method will raise the exception received from `operation.exception()`
or RuntimeError if there is no exception set, but there is an `error_code`
set for the `operation`.
In case of an operation taking longer than `timeout` seconds to complete,
a `concurrent.futures.TimeoutError` will be raised.
"""
result = operation.result(timeout=timeout)
if operation.error_code:
print(
f"Error during {verbose_name}: [Code: {operation.error_code}]: {operation.error_message}",
file=sys.stderr,
flush=True,
)
print(f"Operation ID: {operation.name}", file=sys.stderr, flush=True)
raise operation.exception() or RuntimeError(operation.error_message)
if operation.warnings:
print(f"Warnings during {verbose_name}:\n", file=sys.stderr, flush=True)
for warning in operation.warnings:
print(f" - {warning.code}: {warning.message}", file=sys.stderr, flush=True)
return result
def create_instance(
project_id: str,
zone: str,
instance_name: str,
disks: List[compute_v1.AttachedDisk],
machine_type: str = "n1-standard-1",
network_link: str = "global/networks/default",
subnetwork_link: str = None,
internal_ip: str = None,
external_access: bool = False,
external_ipv4: str = None,
accelerators: List[compute_v1.AcceleratorConfig] = None,
preemptible: bool = False,
spot: bool = False,
instance_termination_action: str = "STOP",
custom_hostname: str = None,
delete_protection: bool = False,
) -> compute_v1.Instance:
"""
Send an instance creation request to the Compute Engine API and wait for it to complete.
Args:
project_id: project ID or project number of the Cloud project you want to use.
zone: name of the zone to create the instance in. For example: "us-west3-b"
instance_name: name of the new virtual machine (VM) instance.
disks: a list of compute_v1.AttachedDisk objects describing the disks
you want to attach to your new instance.
machine_type: machine type of the VM being created. This value uses the
following format: "zones/{zone}/machineTypes/{type_name}".
For example: "zones/europe-west3-c/machineTypes/f1-micro"
network_link: name of the network you want the new instance to use.
For example: "global/networks/default" represents the network
named "default", which is created automatically for each project.
subnetwork_link: name of the subnetwork you want the new instance to use.
This value uses the following format:
"regions/{region}/subnetworks/{subnetwork_name}"
internal_ip: internal IP address you want to assign to the new instance.
By default, a free address from the pool of available internal IP addresses of
used subnet will be used.
external_access: boolean flag indicating if the instance should have an external IPv4
address assigned.
external_ipv4: external IPv4 address to be assigned to this instance. If you specify
an external IP address, it must live in the same region as the zone of the instance.
This setting requires `external_access` to be set to True to work.
accelerators: a list of AcceleratorConfig objects describing the accelerators that will
be attached to the new instance.
preemptible: boolean value indicating if the new instance should be preemptible
or not. Preemptible VMs have been deprecated and you should now use Spot VMs.
spot: boolean value indicating if the new instance should be a Spot VM or not.
instance_termination_action: What action should be taken once a Spot VM is terminated.
Possible values: "STOP", "DELETE"
custom_hostname: Custom hostname of the new VM instance.
Custom hostnames must conform to RFC 1035 requirements for valid hostnames.
delete_protection: boolean value indicating if the new virtual machine should be
protected against deletion or not.
Returns:
Instance object.
"""
instance_client = compute_v1.InstancesClient()
# Use the network interface provided in the network_link argument.
network_interface = compute_v1.NetworkInterface()
network_interface.name = network_link
if subnetwork_link:
network_interface.subnetwork = subnetwork_link
if internal_ip:
network_interface.network_i_p = internal_ip
if external_access:
access = compute_v1.AccessConfig()
access.type_ = compute_v1.AccessConfig.Type.ONE_TO_ONE_NAT.name
access.name = "External NAT"
access.network_tier = access.NetworkTier.PREMIUM.name
if external_ipv4:
access.nat_i_p = external_ipv4
network_interface.access_configs = [access]
# Collect information into the Instance object.
instance = compute_v1.Instance()
instance.network_interfaces = [network_interface]
instance.name = instance_name
instance.disks = disks
if re.match(r"^zones/[a-z\d\-]+/machineTypes/[a-z\d\-]+$", machine_type):
instance.machine_type = machine_type
else:
instance.machine_type = f"zones/{zone}/machineTypes/{machine_type}"
if accelerators:
instance.guest_accelerators = accelerators
if preemptible:
# Set the preemptible setting
warnings.warn(
"Preemptible VMs are being replaced by Spot VMs.", DeprecationWarning
)
instance.scheduling = compute_v1.Scheduling()
instance.scheduling.preemptible = True
if spot:
# Set the Spot VM setting
instance.scheduling = compute_v1.Scheduling()
instance.scheduling.provisioning_model = (
compute_v1.Scheduling.ProvisioningModel.SPOT.name
)
instance.scheduling.instance_termination_action = instance_termination_action
if custom_hostname is not None:
# Set the custom hostname for the instance
instance.hostname = custom_hostname
if delete_protection:
# Set the delete protection bit
instance.deletion_protection = True
# Prepare the request to insert an instance.
request = compute_v1.InsertInstanceRequest()
request.zone = zone
request.project = project_id
request.instance_resource = instance
# Wait for the create operation to complete.
print(f"Creating the {instance_name} instance in {zone}...")
operation = instance_client.insert(request=request)
wait_for_extended_operation(operation, "instance creation")
print(f"Instance {instance_name} created.")
return instance_client.get(project=project_id, zone=zone, instance=instance_name)
def create_from_custom_image(
project_id: str, zone: str, instance_name: str, custom_image_link: str
) -> compute_v1.Instance:
"""
Create a new VM instance with custom image used as its boot disk.
Args:
project_id: project ID or project number of the Cloud project you want to use.
zone: name of the zone to create the instance in. For example: "us-west3-b"
instance_name: name of the new virtual machine (VM) instance.
custom_image_link: link to the custom image you want to use in the form of:
"projects/{project_name}/global/images/{image_name}"
Returns:
Instance object.
"""
disk_type = f"zones/{zone}/diskTypes/pd-standard"
disks = [disk_from_image(disk_type, 10, True, custom_image_link, True)]
instance = create_instance(project_id, zone, instance_name, disks)
return instance
create_instance('my_project', 'us-central1-a', 'testvm-01','?')

Related

Amazon Aurora PostgreSQL instance creation using CDK in python

I want to create Aurora PostgreSQL cluster and DB instance using CDK in python. I have gone through to the documents but unable to create it. Following is the code
import json
from constructs import Construct
from aws_cdk import (
Stack,
aws_secretsmanager as asm,
aws_ssm as ssm,
aws_rds as rds,
)
from settings import settings
class DatabaseDeploymentStack(Stack):
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
stage_name = settings.stage
region = Stack.of(self).region
account = Stack.of(self).account
db_username = 'customdbuser' #settings.db_username
db_name = f'netsol_{stage_name}_db'
db_resource_prefix = f'netsol-{region}-{stage_name}'
print(db_resource_prefix)
is_staging: bool = stage_name == 'staging'
generated_secret_string = asm.SecretStringGenerator(
secret_string_template=json.dumps({"username": f"{db_username}"}),
exclude_punctuation=True,
include_space=False,
generate_string_key='password'
)
db_credentials_secret = asm.Secret(
self, 'db_credentials_secret',
secret_name=f'{db_resource_prefix}-credentials',
generate_secret_string=generated_secret_string
)
ssm.StringParameter(
self, 'db_credentials_arn',
parameter_name=f'{db_resource_prefix}-credentials-arn',
string_value=db_credentials_secret.secret_arn
)
scaling_configuration = rds.CfnDBCluster.ScalingConfigurationProperty(
auto_pause=True,
max_capacity=4 if is_staging else 384,
min_capacity=2,
seconds_until_auto_pause=900 if is_staging else 10800
)
db_cluster = rds.CfnDBCluster(
self, 'db_cluster',
db_cluster_identifier=f'{db_resource_prefix}-clusterabz',
engine_mode='serverless',
engine='aurora-postgresql',
engine_version='10.14',
enable_http_endpoint=True,
database_name=db_name,
master_username='abz',
master_user_password='Password123',
backup_retention_period=1 if is_staging else 30,
scaling_configuration=scaling_configuration,
deletion_protection=False if is_staging else False
)
db_cluster_arn = f'arn:aws:rds:{region}:{account}:cluster:{db_cluster.ref}'
ssm.StringParameter(
self, 'db_resource_arn',
parameter_name=f'{db_resource_prefix}-resource-arn',
string_value=db_cluster_arn
)
cfn_dBInstance = rds.CfnDBInstance(
self, "db_instance",
db_instance_class="db.t3.medium",
)
When I run this code then found following error.
"db_instance (dbinstance) Property AllocatedStorage cannot be empty"
I have gone through aws documents which says that this property is not required for amazon aurora. Moreover, I have also tried by giving this property along with other properties but still not able to create the instance
Can anyone help me to figure out the problem please?
Note:
When I run the code without db instance then cluster created successfully.
Required Output
Required output is required as per below image.
After spending some time I figured out the issue. Following are the details:
Actually code was perfect if I execute DB cluster and DB instance separately but when I execute the whole code then system was trying to crate the DB instance before creating the DB cluster. And because of that system showing error.
Problem was resolved after creating the dependency like below.
cfn_dBInstance.add_depends_on(db_cluster)
Above line ensures that DB instance will only be crated once DB cluster will be successfully created.

How can I scale deployment replicas in Kubernetes cluster from python client?

I have Kubernetes cluster set up and managed by AKS, and I have access to it with the python client.
Thing is that when I'm trying to send patch scale request, I'm getting an error.
I've found information about scaling namespaced deployments from python client in the GitHub docs, but it was not clear what is the body needed in order to make the request work:
# Enter a context with an instance of the API kubernetes.client
with kubernetes.client.ApiClient(configuration) as api_client:
# Create an instance of the API class
api_instance = kubernetes.client.AppsV1Api(api_client)
name = 'name_example' # str | name of the Scale
namespace = 'namespace_example' # str | object name and auth scope, such as for teams and projects
body = None # object |
pretty = 'pretty_example' # str | If 'true', then the output is pretty printed. (optional)
dry_run = 'dry_run_example' # str | When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed (optional)
field_manager = 'field_manager_example' # str | fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). (optional)
force = True # bool | Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. (optional)
try:
api_response = api_instance.patch_namespaced_deployment_scale(name, namespace, body, pretty=pretty, dry_run=dry_run, field_manager=field_manager, force=force)
pprint(api_response)
except ApiException as e:
print("Exception when calling AppsV1Api->patch_namespaced_deployment_scale: %s\n" % e)
So when running the code I'm getting Reason: Unprocessable Entity
Does anyone have any idea in what format should the body be? For example if I want to scale the deployment to 2 replicas how can it be done?
The body argument to the patch_namespaced_deployment_scale can be a JSONPatch document, as #RakeshGupta shows in the comment, but it can also be a partial resource manifest. For example, this works:
>>> api_response = api_instance.patch_namespaced_deployment_scale(
... name, namespace,
... [{'op': 'replace', 'path': '/spec/replicas', 'value': 2}])
(Note that the value needs to be an integer, not a string as in the
comment.)
But this also works:
>>> api_response = api_instance.patch_namespaced_deployment_scale(
... name, namespace,
... {'spec': {'replicas': 2}})

How to wait for a variable change when using Pyads library?

I am working on a project with TwinCat and AMR. I'm using Python as a communication medium between the two systems. I have an issue with waiting for the variable to change value. I have a variable of type BOOL and want to perform a certain action when the variable changes. Can someone help me with this?
P.S. I have notified for change in variable as well.
import pyads
PLC = pyads.Connection('127.0.0.1.1.1', pyads.PORT_SPS1)
PLC.open()
StnF = PLC.read_by_name('GVL.AGVgotoStnF', pyads.PLCTYPE_BOOL)
print(StnF)
if StnF == 'TRUE' :
ArrStnF = PLC.write_by_name('GVL.iPosAGV',3,pyads.PLCTYPE_INT)
print(ArrStnF)
Your looking for notifications. The documentation of pyads gives and example how to do this:
import pyads
from ctypes import sizeof
# define the callback which extracts the value of the variable
def callback(notification, data):
contents = notification.contents
var = next(map(int, bytearray(contents.data)[0:contents.cbSampleSize]))
plc = pyads.Connection('127.0.0.1.1.1', pyads.PORT_SPS1)
plc.open()
attr = pyads.NotificationAttrib(sizeof(pyads.PLCTYPE_INT))
# add_device_notification returns a tuple of notification_handle and
# user_handle which we just store in handles
handles = plc.add_device_notification('GVL.integer_value', attr, callback)
# To remove the device notification just use the del_device_notication
# function.
plc.del_device_notification(*handles)

ec2.Instance' object is not iterable

Im trying to copy the specified ec2 tags to their respective volumes. the function is invoked when the instance state changed to 'running'. However I don't want the code to run on every instance--for the first version of the code, when it as invoked my a single instance, it ran on all instances. even those that were already tagged. I want to have it run only for the specific instances that are booting up.
with some changes, Im getting error: ec2.Instance' object is not iterable
im really new to python and not sure how to proceed. Any inputs from you bright minds?
----HERE IS MY LAMBDA CODE----
import boto3
import json
def lambda_handler(event, context):
# is_test = context.function_name == 'test' # this value is injected by SAM local
instance = boto3.resource('ec2').Instance(id=event["detail"]["instance-id"])
tags_to_use = ['Stack-Name', 'StackID']
for instance in instance:
tags = instance.tags
to_tag = [t for t in tags if t['Key'] in tags_to_use]
for vol in instance.volumes.all():
print(f"Tagging volume {vol.id} from instance {instance.id}")
vol.create_tags(Tags=to_tag)

Python AWS boto3 list of instances always updated

using AWS Boto3 Resource service i'm getting a list of all my ec2 instance:
ec2_instances = ec2.instances.all()
every time i'm doing something using my `ec2_instance' variable - it seams to reload the whole instance list..
Is there a way to just pull the list once and then work on it (filter and such)
example of what I do:
getting the list and showing it as a menu after filtering some values:
In my Aws() class:
def load_ec2_instance(self, region):
"""
Load the EC2 instances a region
:param region:
:rtype: list
:return: a list of the instances in a region or None if there are no instances
"""
ec2 = self._get_resource("ec2", region)
ec2_instances = ec2.instances.all()
counter = collections.Counter(ec2_instances);
ec2_size = sum(counter.itervalues());
if ec2_size == 0:
return None
return ec2_instances
in my menu module:
instances = Aws.get_instance().load_ec2_instance(chosen_region)
show_environments_menu(instances)
def show_environments_menu(instances):
subprocess.call("clear")
print "Please choose the environment your instance is located in:"
environments = Aws.get_instance().get_environments_from_instances(instances)
for i, environment in enumerate(environments):
print "%d. %s" % (i + 1, environment)
def get_environments_from_instances(self, instances):
"""
Get all the environments available from instances lists
:param list instances: the list of instance
:rtype: list
:return: a list of the environments
"""
environments = []
for instance in instances:
tags = instance.tags
for tag in tags:
key = tag.get("Key")
if key == "Environment":
environment = tag.get("Value").strip()
if environment not in environments:
environments.append(environment)
return environments
It takes time depending on my internet connection, but I see that when i'm disconnecting my internet - it can't filter..
I only have 12 instances so the loop to filter them shouldn't take time at all..
Update:
I changed Aws() class to module and i'm using these two functions:
def load_ec2_instances(region):
"""
Load the EC2 instances a region
:param region:
:rtype: list
:return: a list of the instances in a region or None if there are no instances
"""
ec2 = _get_resource("ec2", region)
ec2_instances = ec2.instances.all()
counter = collections.Counter(ec2_instances);
ec2_size = sum(counter.itervalues());
if ec2_size == 0:
return None
return ec2_instances
def get_environments_from_instances(instances):
"""
Get all the environments available from instances lists
:param list instances: the list of instance
:rtype: list
:return: a list of the environments
"""
environments = []
for instance in instances:
tags = instance.tags
for tag in tags:
key = tag.get("Key")
if key == "Environment":
environment = tag.get("Value").strip()
if environment not in environments:
environments.append(environment)
return environments
For anyone still facing this issue
Try using the ec2 client rather than the resource.
Resources in boto3 are higher level and do more work for you whereas the client just gives you what you ask of it.
When you use the client you get python dictionary objects which you can manipulate in memory to your hearts content. See the example below.
You could edit this to use list comprehension and methods etc.
See EC2.Client.describe_instances for the documentation on the ec2 client method for describing ec2 instances.
import boto3
ec2 = boto3.client("ec2", region_name = 'us-west-2')
reservations = ec2.describe_instances()['Reservations']
instances = []
for reservation in reservations:
for instance in reservation['Instances']:
instances.append(instance)
environments = []
for instance in instances:
for tag in instance['Tags']:
if tag['Key'] == 'Environment':
environments.append(tag['Value'])

Categories