Hi I am trying to do a query with pandas_gbq however I can't call on the variable. How do I do so? I am using the pandas_gbq library
def dataBQ(maxDate):
#load data
dat = pd.read_csv("data/source/rawdat.csv", delimiter=";")
#convert to datetime format
dat['date_key']=pd.to_datetime(dat['date_key'],format='%d/%m/%Y').dt.date
#get latest date
maxDate = dat['date_key'].max()
dataTraffic = """
SELECT *
from
`fileData` where
date_key > {maxDate}
"""
dataBQ = pandas_gbq.read_gbq(dataBQ , project_id=projectId)
how do I do a reference maxdate in the query of dataTraffic?
Related
I'm developing a kubeflow pipeline that takes in a data set, splits that dataset into two different datasets based on a filter inside the code, and outputs both datasets. That function looks like the following:
def merge_promo_sales(input_data: Input[Dataset],
output_data_hd: OutputPath("Dataset"),
output_data_shop: OutputPath("Dataset")):
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
import numpy as np
from google.cloud import bigquery
from utils import google_bucket
client = bigquery.Client("gcp-sc-demand-plan-analytics")
print("Client creating using default project: {}".format(client.project), "Pulling Data")
query = """
SELECT * FROM `gcp-sc-demand-plan-analytics.Modeling_Input.monthly_delivery_type_sales` a
Left Join `gcp-sc-demand-plan-analytics.Modeling_Input.monthly_promotion` b
on a.ship_base7 = b.item_no
and a.oper_cntry_id = b.corp_cd
and a.dmand_mo_yr = b.dates
"""
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location="US",
) # API request - starts the query
df = query_job.to_dataframe()
df.drop(['corp_cd', 'item_no', 'dates'], axis = 1, inplace=True)
df.loc[:, 'promo_objective_increase_margin':] = df.loc[:, 'promo_objective_increase_margin':].fillna(0)
items = df_['ship_base7'].unique()
df = df[df['ship_base7'].isin(items)]
df_hd = df[df['location_type'] == 'home_delivery']
df_shop = df[df['location_type'] != 'home_delivery']
df_hd.to_pickle(output_data_hd)
df_shop.to_pickle(output_data_shop)
That part works fine. When I try to feed those two data sets into the next function with the compiler, I hit errors.
I tried the following:
#kfp.v2.dsl.pipeline(name=PIPELINE_NAME)
def my_pipeline():
merge_promo_sales_nl = merge_promo_sales(input_data = new_launch.output)
rule_3_hd = rule_3(input_data = merge_promo_sales_nl.output_data_hd)
rule_3_shop = rule_3(input_data = merge_promo_sales_nl.output_data_shop)`
The error I get is the following:
AttributeError: 'ContainerOp' object has no attribute 'output_data_hd'
output_data_hd is the parameter I put that dataset out to but apparently it's not the name of parameter kubeflow is looking for.
I just figured this out.
When you run multiple outputs, you use the following in the compile section:
rule_3_hd = rule_3(input_data = merge_promo_sales_nl.outputs['output_data_hd'])
rule_3_shop = rule_3(input_data = merge_promo_sales_nl.outputs['output_data_shop'])
my dynamodb table has timestamp(in YYYY-MM-DD HH:MN:SS) as PrimaryKey column and temperature as sortkey while in data {"humidity" : 42 ,"location":"room" , "temperature":,"thermostat":}
in boto3 python i need to scan based on timestamp (now and 15min ago) with condition if difference(temperature - thermostat) > 5 for more than 10 times then return thermostat-5 and if (temperature - thermostat) < 5 for more than 10 times then returns thermostat+5... following is the code
import boto3
import math
import json
import time
import dateutil.tz
from datetime import datetime,timedelta
from dateutil import tz
from dateutil.tz import tzlocal
from boto3.dynamodb.conditions import Key, Attr
client = boto3.client('dynamodb')
dynamodb = boto3.resource('dynamodb')
def lambda_handler(event, context):
#table_name= "thermostat_dynamo"
table_name= "TableDynamo"
Primary_Column_Name = 'timestamp'
table = dynamodb.Table(table_name)
#key_param = "thermostat"
#thermostatVal = table.get_item(Key={key_param:event[key_param]}) ## get record from dynamodb for this sensor
thermostatVal= 77
south = dateutil.tz.gettz('Asia/Kolkata')
now = datetime.now(tz=south)
fifteen_min_ago = now - timedelta(seconds=900)
now = now.strftime('%F %T')
fifteen_min_ago = fifteen_min_ago.strftime('%F %T')
fe = Key('timeStamp').between(fifteen_min_ago,now);
response = table.scan(FilterExpression=fe & Attr('temperature').lt(thermostatVal))
if response['Count'] == 10:
#return thermostatVal+5
thermonew = thermostatVal + 5
tosensor = '{"thermostat":'+ str(thermonew) + '}'
print(tosensor)
#response = client.publish(topic="updatehomesensor", qos=1, payload=tosensor)
return
elif response['Count'] < 10:
print('{"thermostat":' + str(thermostatVal) + '}')
return
If timestamp was a sort key, you could have used a Query request to scan through all the items with timestamp > now-15min.
However, unfortunately, timestamp is your hash key. The only way you can find the items with timestamp > now-15min is to Scan through all your items. This will cost you a lot of money: You pay Amazon for each item scanned, not each item returned after the filtering.
Another problem is that the DynamoDB filtering syntax (look at the FilterExpression documentation) doesn't actually allow to do addition and subtractions as part of the test. If you always want to do "temperature - thermostat", you can use that as one of the attributes (so you can do a FilterExpression on it), and the second attribute would be "thermostat", and later you can add the two up to get the "temperature".
I have an SQL output in a pandas dataframe, that I would like to first convert to a .hyper Tableau extract, and then publish to Tableau server via the Extract API. When I run my code(below), I get the error: 'module' object is not callable for tdefile = tableausdk.HyperExtract(outfilename). I believe my code is correct, but maybe modules were installed incorrectly? Has anyone seen this error?
print("Importing modules...")
import pandas as pd
import pyodbc
import re
import numpy as np
import cx_Oracle
import smtplib
import schedule
import time
import win32com.client as win32
import tableauserverclient as TSC
import os
import tableausdk
from pandleau import *
from tableausdk import *
from tableausdk.HyperExtract import *
print("Done importing modules.")
server = x
db = y
conn_sql = pyodbc.connect(#fill in your connection data)
### sql query - change from getdate() - 4 to TD# ##
sql_1 = """
select
* from test
"""
df = pd.read_sql_query(sql_1, conn_sql)
df.head()
def job(df, outfilename):
if os.path.isfile(outfilename):
os.remove(outfilename)
os.remove('DataExtract.log')
try:
tdefile = tableausdk.HyperExtract(outfilename)
except:
#os.remove(outfilename)
os.system('del ' + outfilename)
os.system('del DataExtract.log')
tdefile = tableausdk.HyperExtract(outfilename)
# define the table definition
tableDef = tableausdk.TableDefinition()
# create a list of column names
colnames = df.columns
# create a list of column types
coltypes = df.dtypes
# create a dict for the field maps
# Define type maps
# Caveat: I am not including all of the possibilities here
fieldMap = {
'float64' : tde.Types.Type.DOUBLE,
'float32' : tde.Types.Type.DOUBLE,
'int64' : tde.Types.Type.DOUBLE,
'int32' : tde.Types.Type.DOUBLE,
'object': tde.Types.Type.UNICODE_STRING,
'bool' : tde.Types.Type.BOOLEAN,
'datetime64[ns]': tde.Types.Type.DATE,
}
# for each column, add the appropriate info the Table Definition
for i in range(0, len(colnames)):
cname = colnames[i] #header of column
coltype = coltypes[i] #pandas data type of column
ctype = fieldMap.get(str(coltype)) #get integer field type in Tableau Speak
tableDef.addColumn(cname, ctype)
# add the data to the table
with tdefile as extract:
table = extract.addTable("Extract", tableDef)
for r in range(0, df.shape[0]):
row = tde.Row(tableDef)
for c in range(0, len(coltypes)):
if df.iloc[r,c] is None:
row.setNull(c)
elif str(coltypes[c]) in ('float64', 'float32', 'int64', 'int32'):
try:
row.setDouble(c, df.iloc[r,c])
except:
row.setNull(c)
elif str(coltypes[c]) == 'object':
try:
row.setString(c, df.iloc[r,c])
except:
row.setNull(c)
elif str(coltypes[c]) == 'bool':
row.setBoolean(c, df.iloc[r,c])
elif str(coltypes[c]) == 'datetime64[ns]':
try:
row.setDate(c, df.iloc[r,c].year, df.iloc[r,c].month, df.iloc[r,c].day )
except:
row.setNull
else:
row.setNull(c)
# insert the row
table.insert(row)
tdefile.close()
#df_tableau = pandleau(df_1)
#df_tableau.set_spatial('SpatialDest', indicator=True)
#df_tableau.to_tableau('test.hyper', add_index=False)
job(df, 'test_1.hyper')
Really basic question pyspark/hive question:
How do I append to an existing table? My attempt is below
from pyspark import SparkContext, SparkConf
from pyspark.sql import HiveContext
conf_init = SparkConf().setAppName('pyspark2')
sc = SparkContext(conf = conf_init)
hive_cxt = HiveContext(sc)
import pandas as pd
df = pd.DataFrame({'a':[0,0], 'b':[0,0]})
sdf = hive_cxt.createDataFrame(df)
sdf.write.mode('overwrite').saveAsTable('database.table') #this line works
df = pd.DataFrame({'a':[1,1,1], 'b':[2,2,2]})
sdf = hive_cxt.createDataFrame(df)
sdf.write.mode('append').saveAsTable('database.table') #this line does not work
#sdf.write.insertInto('database.table',overwrite = False) #this line does not work
Thanks!
Sam
It seems using option('overwrite') was causing the problem; it drops the table and then recreates a new one. If I do the following, everything works fine:
from pyspark import SparkContext, SparkConf
from pyspark.sql import HiveContext
conf_init = SparkConf().setAppName('pyspark2')
sc = SparkContext(conf = conf_init)
print(sc.version)
hive_cxt = HiveContext(sc)
hive_cxt.sql('USE database')
query = """
CREATE TABLE IF NOT EXISTS table (a int, b int)
STORED AS parquet
"""
hive_cxt.sql(query)
import pandas as pd
df = pd.DataFrame({'a':[0,0], 'b':[0,0]})
sdf = hive_cxt.createDataFrame(df)
sdf.write.mode('append').format('hive').saveAsTable('table')
query = """
SELECT *
FROM table
"""
df = hive_cxt.sql(query)
df = df.toPandas()
print(df) # successfully pull the data in table
df = pd.DataFrame({'a':[1,1,1], 'b':[2,2,2]})
sdf = hive_cxt.createDataFrame(df)
sdf.write.mode('append').format('hive').saveAsTable('table')
I think previously you forgot use the format option which caused the issue for you when you are trying to append and not overwrite like you mentioned above.
import pandas as pd
import urllib
import time
import sys
baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_bs_query = 'select * from yahoo.finance.historicaldata where symbol = "YHOO" and startDate = "2009-09-11" and endDate = "2010-03-10"'
yql_bs_url = baseurl + urllib.parse.urlencode({'q':yql_bs_query}) + "&format=json&diagnostics=true&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback="
bs_json = pd.io.json.read_json(yql_bs_url)
bs_json.values
YHOO = bs_json.values.tolist()
Not able to convert this list in dataframe.
It is converting to a DataFrame but the frame has only 1 column and 5 rows since the form of the JSON is:
{u'query': {u'count': 124,
u'created': u'2017-01-26T05:44:52Z',
u'diagnostics': {u'build-version': u'2.0.84',
...
You just need to download the JSON separately, index in to get the quote data, and then convert that to a DataFrame:
# same code as above here:
import pandas as pd
import urllib
import time
import sys
baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_bs_query = 'select * from yahoo.finance.historicaldata where symbol = "YHOO" and startDate = "2009-09-11" and endDate = "2010-03-10"'
yql_bs_url = baseurl + urllib.parse.urlencode({'q':yql_bs_query}) + "&format=json&diagnostics=true&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback="
# now that you have the URL:
import requests
# download json data and convert to dict
data = requests.get(yql_bs_url).json()
# get quote data
quote = data["query"]["results"]["quote"]
# convert to dataframe
quote = pd.DataFrame.from_dict(quote)