Google cloud platform gcp delete snapshot 10 days older using python - python

I want to delete the snapshot which is 10 days older in GCP using python. I tried using the below program using filter expression, but unfortunately i faced below errors
from datetime import datetime
from googleapiclient import discovery
import google.oauth2.credentials
from oauth2client.service_account import ServiceAccountCredentials
import sys
def get_disks(project,zone):
credentials = ServiceAccountCredentials.from_json_keyfile_name(r"D:\Users\ganeshb\Desktop\Json\auth.json",
scopes='https://www.googleapis.com/auth/compute')
service = discovery.build('compute', 'v1',credentials=credentials)
request = service.snapshots().list(project='xxxx',FILTER="creationTimestamp<'2021-05-31'")
response = request.execute()
print (response)
output = get_disks("xxxxxxxx", "europe-west1-b")

Your problem is a known Google Cloud bug.
Please read these issue trackers: 132365111 and 132676194
Solution:
Remove the filter statement and process the returned results:
from datetime import datetime
from dateutil import parser
request = service.snapshots().list(project=project)
response = request.execute()
# Watch for timezone issues here!
filter_date = '2021-05-31'
d1 = parser.parse(filter_date)
for item in response['items']:
d2 = datetime.fromisoformat(item['creationTimestamp'])
if d2.timestamp() < d1.timestamp():
# Process the result here. This is a print statement stub.
print("{} {}".format(item['name'], item['creationTimestamp']))

Related

Spotify API Works Sometimes, Stalls Other Times(?)

I'm trying to follow along in a simple tutorial that uses the spotipy api. The code is below:
from spotipy.oauth2 import SpotifyClientCredentials
import spotipy
import sys
from pprint import pprint
import spotipy
import yaml
from spotipy.oauth2 import SpotifyOAuth
from data_functions import offset_api_limit, get_artists_df, get_tracks_df, get_track_audio_df,\
get_all_playlist_tracks_df, find_popular_album, get_recommendations_artists, get_recommendations_tracks, get_most_popular_artist, get_recommendations_genre
import pandas as pd
urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu'
scope = "user-library-read user-follow-read user-top-read playlist-read-private"
with open("spotify/spotify_info.yaml", 'r') as stream:
spotify_details = yaml.safe_load(stream)
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(
client_id=spotify_details['client_id'],
client_secret=spotify_details['client_secret'],
redirect_uri=spotify_details['redirect_uri'],
scope=scope,
))
results = sp.search(q='album:' + 'From 2 to 3', type='album')
print(results)
print("\n\n")
items = results['albums']['items']
if len(items) > 0:
print(items)
album = items[0]
print(album)
print(album['name'], album['id'])
alb = sp.album(album['id']) #issues here
pprint(alb)
The code works and pulls the information from spotify until the marked line. I have no idea what's different about this function but when it hits that line it stalls indefinitely. Has anyone encountered this issue?
I've tried changing my authentication configuration and various functions such as albums, related_artists that also don't work.

Deployment Error: Function deployment failed due to a health check failure on Google Cloud Function with Tweepy

I am trying to pull twitter streaming data in cloud function and essentially export the stream data into big query.
Currently, i have this code. The Entry Point is set to stream_twitter.
main.txt:
import os
import tweepy
import pandas as pd
import datalab.bigquery as bq
from google.cloud import bigquery
import os
import tweepy
import pandas as pd
import datalab.bigquery as bq
from google.cloud import bigquery
#access key
api_key = os.environ['API_KEY']
secret_key = os.environ['SECRET_KEY']
bearer_token = os.environ['BEARER_TOKEN']
def stream_twitter(event, context):
#authentication
auth = tweepy.Client(bearer_token = bearer_token)
api = tweepy.API(auth)
#create Stream Listener
class Listener(tweepy.StreamingClient):
#save list to dataframe
tweets = []
def on_tweet(self, tweet):
if tweet.referenced_tweets == None: #Original tweet not reply or retweet
self.tweets.append(tweet)
def on_error(self, status_code):
if status_code == 420:
#returning False in on_data disconnects the stream
return False
stream_tweet = Listener(bearer_token)
#filtered Stream using rules
rule = tweepy.StreamRule("(covid OR covid19 OR coronavirus OR pandemic OR #covid19 OR #covid) lang:en")
stream_tweet.add_rules(rule, dry_run = True)
stream_tweet.filter(tweet_fields=["referenced_tweets"])
#insert into dataframe
columns = ["UserID", "Tweets"]
data = []
for tweet in stream_tweet.tweets:
data.append([tweet.id, tweet.text, ])
stream_df = pd.DataFrame(data, columns=columns)
## Insert time col - TimeStamp to give the time that data is pulled from API
stream_df.insert(0, 'TimeStamp', pd.to_datetime('now').replace(microsecond=0))
## Converting UTC Time to SGT(UTC+8hours)
stream_df.insert(1,'SGT_TimeStamp', '')
stream_df['SGT_TimeStamp'] = stream_df['TimeStamp'] + pd.Timedelta(hours=8)
## Define BQ dataset & table names
bigquery_dataset_name = 'streaming_dataset'
bigquery_table_name = 'streaming-table'
## Define BigQuery dataset & table
dataset = bq.Dataset(bigquery_dataset_name)
table = bq.Table(bigquery_dataset_name + '.' + bigquery_table_name)
if not table.exists():
# Create or overwrite the existing table if it exists
table_schema = bq.Schema.from_dataframe(stream_df)
table.create(schema = table_schema, overwrite = False)
# Write the DataFrame to a BigQuery table
table.insert_data(stream_df)
requirement.txt:
tweepy
pandas
google-cloud-bigquery
However, i keep getting a
"Deployment failure: Function deployment failed due to a health check failure. This usually indicates that your code was built successfully but failed during a test execution. Examine the logs to determine the cause. Try deploying again in a few minutes if it appears to be transient."
I can't seem to figure how to solve this error. Is there something wrong with my codes? Or is there something that i should have done? I test the streaming codes on Pycharm and was able to pull the data.
Would appreicate any help i can get. Thank you.
The logs to the function are this. (I am unfamiliar with Logs hence i shall include a screenshot.) Essentially, those were the 2 info and error i've been getting.
I managed to replicate your error message. All I did was add datalab==1.2.0 inside requirements.txt. Since you are importing the datalab library, you need to include the support package for it, which is the latest version of datalab.
Here's the reference that I used: Migrating from the datalab Python package.
See the requirements.txt file to view the versions of the libraries used for these code snippets.
Here's the screenshot of the logs:

Binance API Issue - Python

APIError(code=-2015): Invalid API-key, IP, or permissions for action
I keep getting the above issue.
I am not sure what the issue is.
I am able to access the client.get_all_tickers() command no problem but when I try to place an order or access user_data (both which require a signature) I get the error
APIError(code=-2015): Invalid API-key, IP, or permissions for action
I think the issue has something to do with the signature. I checked to see if I have the relevant permissions enabled and I do. Furthermore, I tried to create a new API key and I still go the same issue.
NOTE: I am using binance.us not binance.com because I am located in the US so I cannot make an account on binance.com
Therefore, another idea I had was to create a VPN that places me in England so I can make an account through binance.com and maybe that will work.
import time
import datetime
import json
from time import sleep
from binance.client import Client
from binance.enums import *
import sys
import requests, json, time, hashlib
import urllib3
import logging
from urllib3 import PoolManager
from binance.exceptions import BinanceAPIException, BinanceWithdrawException
r = requests.get('https://www.binance.us/en/home')
client = Client(API_key,Secret_key,tld="us")
prices = client.get_all_tickers()
#Def to get location
def crypto_location(sym):
count = 0
for i in prices:
count += 1
ticker = i.get('symbol')
if ticker == sym:
val = i.get('price')
count = count-1
return count
bitcoin_location = crypto_location('BTCUSDT')
ethereum_location = crypto_location('ETHUSDT')
stable_coin_location = crypto_location('BUSDUSDT')
bitcoin_as_BUSD_location = crypto_location('BTCBUSD')
#%% Where to quickly get bitcoin price
t_min = time.localtime().tm_min
prices = client.get_all_tickers()
bitcoin_price = prices[bitcoin_location].get('price')
print(bitcoin_price)
ethereum_price = prices[ethereum_location].get('price')
print(ethereum_price)
stable_coin_price = prices[stable_coin_location].get('price')
print(stable_coin_price)
bitcoin_as_BUSD = prices[bitcoin_as_BUSD_location].get('price')
print(bitcoin_as_BUSD)
client.session.headers.update({ 'X-MBX-APIKEY': API_key})
client.get_account()
error occurs at client.get_account()
I had the same problem, the binance APIs without any IP restrictions, expire every 90 days. I have restricted the API to my IP and it works!
Still, you're sure to find it all here:
https://python-binance.readthedocs.io/en/latest/index.html

Python : AttributeError: 'ApiClient' object has no attribute 'configure_jwt_authorization_flow'

I am getting an attribute error while running the code given below:
import base64
import subprocess
from __future__ import absolute_import, print_function
from pprint import pprint
import unittest
import webbrowser
import docusign_esign as docusign
from docusign_esign import AuthenticationApi, TemplatesApi,EnvelopesApi,ApiClient
from PyPDF2 import PdfFileReader
import pandas as pd
from datetime import datetime
from os import path
import requests
integrator_key = "XYZ"
base_url = "https://www.docusign.net/restapi"
oauth_base_url = "account.docusign.com" #use account-d.docusign.com for sandbox
redirect_uri = "https://www.docusign.com/api"
user_id = 'MNO'
private_key_filename = "docusign_private_key.txt"
client_secret = 'ABC' #production
account_id = 'QRS'
api_client = docusign.ApiClient(base_url)
api_client.configure_jwt_authorization_flow(integrator_key, client_secret, redirect_uri)
ERROR:
AttributeError
Traceback (most recent call last)
<ipython-input-2-1abfece08e05> in <module>()
55 api_client = docusign.ApiClient(base_url)
56 # make sure to pass the redirect uri
---> 57 api_client.configure_jwt_authorization_flow(integrator_key, client_secret, redirect_uri)
AttributeError: 'ApiClient' object has no attribute 'configure_jwt_authorization_flow'
have you configured your Integration Key in DocuSign? did you get a private RSA key? this is required to use JWT
Also note that your redirect_uri doesn't seem correct. That uri should point to your code, not to Docusign.
The issue is that, Python Docusign SDK interface has changed in the latest versions but the Official code examples still uses the old version.
So for obtaining JWT bearer tokens, instead of configure_jwt_authorization_flow you need to use request_jwt_user_token function of ApiClient object.
Code Sample:
def update_token(api_client):
key_data = None
with open(<rsa private key file>, mode='rb') as file:
key_data = file.read()
if key_data is None:
return None
jwt_token = api_client.request_jwt_user_token( <INTEGRATION_KEY>,
<USER_ID>,
<AUTH_HOST>,
key_data,
<token expiration interval> )
if jwt_token is not None:
print(jwt_token.access_token)

deadline = None after using urlfetch.set_default_fetch_deadline(n)

I'm working on a web application with Python and Google App Engine.
I tried to set the default URLFetch deadline globally as suggested in a previous thread:
https://stackoverflow.com/a/14698687/2653179
urlfetch.set_default_fetch_deadline(45)
However it doesn't work - When I print its value in one of the functions: urlfetch.get_default_fetch_deadline() is None.
Here is main.py:
from google.appengine.api import users
import webapp2
import jinja2
import random
import string
import hashlib
import CQutils
import time
import os
import httpRequests
import logging
from google.appengine.api import urlfetch
urlfetch.set_default_fetch_deadline(45)
...
class Del(webapp2.RequestHandler):
def get(self):
id = self.request.get('id')
ext = self.request.get('ext')
user_id = httpRequests.advance(id,ext)
d2 = urlfetch.get_default_fetch_deadline()
logging.debug("value of deadline = %s", d2)
Prints in the Log console:
DEBUG 2013-09-05 07:38:21,654 main.py:427] value of deadline = None
The function which is being called in httpRequests.py:
def advance(id, ext=None):
url = "http://localhost:8080/api/" + id + "/advance"
if ext is None:
ext = ""
params = urllib.urlencode({'ext': ext})
result = urlfetch.fetch(url=url,
payload=params,
method=urlfetch.POST,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
if (result.status_code == 200):
return result.content
I know this is an old question, but recently ran into the issue.
The setting is placed into a thread-local, meaning that if your application is set to thread-safe and you handle a request in a different thread than the one you set the default deadline for, it can be lost. For me, the solution was to set the deadline before every request as part of the middleware chain.
This is not documented, and required looking through the source to figure it out.

Categories