How to execute a stored procedure in Azure Databricks PySpark? - python

I am able to execute a simple SQL statement using PySpark in Azure Databricks but I want to execute a stored procedure instead. Below is the PySpark code I tried.
#initialize pyspark
import findspark
findspark.init('C:\Spark\spark-2.4.5-bin-hadoop2.7')
#import required modules
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql import *
import pandas as pd
#Create spark configuration object
conf = SparkConf()
conf.setMaster("local").setAppName("My app")
#Create spark context and sparksession
sc = SparkContext.getOrCreate(conf=conf)
spark = SparkSession(sc)
table = "dbo.test"
#read table data into a spark dataframe
jdbcDF = spark.read.format("jdbc") \
.option("url", f"jdbc:sqlserver://localhost:1433;databaseName=Demo;integratedSecurity=true;") \
.option("dbtable", table) \
.option("driver", "com.microsoft.sqlserver.jdbc.SQLServerDriver") \
.load()
#show the data loaded into dataframe
#jdbcDF.show()
sqlQueries="execute testJoin"
resultDF=spark.sql(sqlQueries)
resultDF.show(resultDF.count(),False)
This doesn't work — how do I do it?

In case someone is still looking for a method on how to do this, it's possible to use the built-in jdbc-connector of you spark session. Following code sample will do the trick:
import msal
# Set url & credentials
jdbc_url = ...
tenant_id = ...
sp_client_id = ...
sp_client_secret = ...
# Write your SQL statement as a string
name = "Some passed value"
statement = f"""
EXEC Staging.SPR_InsertDummy
#Name = '{name}'
"""
# Generate an OAuth2 access token for service principal
authority = f"https://login.windows.net/{tenant_id}"
app = msal.ConfidentialClientApplication(sp_client_id, sp_client_secret, authority)
token = app.acquire_token_for_client(scopes="https://database.windows.net/.default")["access_token"]
# Create a spark properties object and pass the access token
properties = spark._sc._gateway.jvm.java.util.Properties()
properties.setProperty("accessToken", token)
# Fetch the driver manager from your spark context
driver_manager = spark._sc._gateway.jvm.java.sql.DriverManager
# Create a connection object and pass the properties object
con = driver_manager.getConnection(jdbc_url, properties)
# Create callable statement and execute it
exec_statement = con.prepareCall(statement)
exec_statement.execute()
# Close connections
exec_statement.close()
con.close()
For more information and a similar method using SQL-user credentials to connect over JDBC, or on how to take return parameters, I'd suggest you take a look at this blogpost:
https://medium.com/delaware-pro/executing-ddl-statements-stored-procedures-on-sql-server-using-pyspark-in-databricks-2b31d9276811

Running a stored procedure through a JDBC connection from azure databricks is not supported as of now. But your options are:
Use a pyodbc library to connect and execute your procedure. But by using this library, it means that you will be running your code on the driver node while all your workers are idle. See this article for details.
https://datathirst.net/blog/2018/10/12/executing-sql-server-stored-procedures-on-databricks-pyspark
Use a SQL table function rather than procedures. In a sense, you can use anything that you can use in the FORM clause of a SQL query.
Since you are in an azure environment, then using a combination of azure data factory (to execute your procedure) and azure databricks can help you to build pretty powerful pipelines.

Related

How to pass dataset id to bigquery client for python

I just started playing around with bigquery, and I am trying to pass the dataset id to the python client. It should be a pretty basic operation, but I can't find it on other threads.
In practice I would like to to take the following example
# import packages
import os
from google.cloud import bigquery
# set current work directory to the one with this script.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# initialize client object using the bigquery key I generated from Google clouds
google_credentials_path = 'bigquery-stackoverflow-DC-fdb49371cf87.json'
client = bigquery.Client.from_service_account_json(google_credentials_path)
# create simple query
query_job = client.query(
"""
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags like '%google-bigquery%'
ORDER BY view_count DESC
LIMIT 10"""
)
# store results in dataframe
dataframe_query = query_job.result().to_dataframe()
and make it look something like
# import packages
import os
from google.cloud import bigquery
# set current work directory to the one with this script.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# initialize client object using the bigquery key I generated from Google clouds
google_credentials_path = 'bigquery-stackoverflow-DC-fdb49371cf87.json'
client = bigquery.Client.from_service_account_json(google_credentials_path)\
.A_function_to_specify_id(bigquery-public-data.stackoverflow)
# create simple query
query_job = client.query(
"""
SELECT
CONCAT(
'https://stackoverflow.com/questions/',
CAST(id as STRING)) as url,
view_count
FROM `posts_questions` -- No dataset ID here anymore
WHERE tags like 'google-bigquery'
ORDER BY view_count DESC
LIMIT 10"""
)
# store results in dataframe
dataframe_query = query_job.result().to_dataframe()
The documentation eludes me, so any help would be appreciated.
The closest thing to what you're asking for is the default_dataset (reference) property of the query job config. The query job config is an optional object that can be passed into the query() method of the instantiated BigQuery client.
You don't set default dataset as part of instantiating a client as not all resources are dataset scoped. You're implicitly working with a query job in your example, which is a project scoped resource.
So, to adapt your sample a bit, it might look something like this:
# skip the irrelevant bits like imports and client construction
job_config = bigquery.QueryJobConfig(default_dataset="bigquery-public-data.stackoverflow")
sql = "SELECT COUNT(1) FROM posts_questions WHERE tags like 'google-bigquery'"
dataframe = client.query(sql, job_config=job_config).to_dataframe()
If you're issuing multiple queries against this same dataset you could certainly reuse the same job config object with multiple query invocations.

Running python script to get SQL Statement for Google BigQuery

Trying to run a script that contains a SQL query:
import example_script
example_script.df.describe()
example_script.df.info()
q1 = '''
example_script.df['specific_column'])
'''
job_config = bigquery.QueryJobConfig()
query_job = client.query(q1, job_config= job_config)
q = query_job.to_dataframe()
Issues I'm having are when I import it, how do I get that specific column name used as a text? Then it will run the query from GBQ but instead, it's stuck in pandas formatting that Google doesn't want to read. Are there other options?

Querying Athena tables in AWS Glue Python Shell

Python Shell Jobs was introduced in AWS Glue. They mentioned:
You can now use Python shell jobs, for example, to submit SQL queries to services such as ... Amazon Athena ...
Ok. We have an example to read data from Athena tables here:
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
glueContext = GlueContext(SparkContext.getOrCreate())
persons = glueContext.create_dynamic_frame.from_catalog(
database="legislators",
table_name="persons_json")
print("Count: ", persons.count())
persons.printSchema()
# TODO query all persons
However, it uses Spark instead of Python Shell. There are no such libraries that are normally available with Spark job type and I have an error:
ModuleNotFoundError: No module named 'awsglue.transforms'
How can I rewrite the code above to make it executable in the Python Shell job type?
The thing is, Python Shell type has its own limited set of built-in libraries.
I only managed to achieve my goal using Boto 3 to query data and Pandas to read it into a dataframe.
Here is the code snippet:
import boto3
import pandas as pd
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
athena_client = boto3.client(service_name='athena', region_name='us-east-1')
bucket_name = 'bucket-with-csv'
print('Working bucket: {}'.format(bucket_name))
def run_query(client, query):
response = client.start_query_execution(
QueryString=query,
QueryExecutionContext={ 'Database': 'sample-db' },
ResultConfiguration={ 'OutputLocation': 's3://{}/fromglue/'.format(bucket_name) },
)
return response
def validate_query(client, query_id):
resp = ["FAILED", "SUCCEEDED", "CANCELLED"]
response = client.get_query_execution(QueryExecutionId=query_id)
# wait until query finishes
while response["QueryExecution"]["Status"]["State"] not in resp:
response = client.get_query_execution(QueryExecutionId=query_id)
return response["QueryExecution"]["Status"]["State"]
def read(query):
print('start query: {}\n'.format(query))
qe = run_query(athena_client, query)
qstate = validate_query(athena_client, qe["QueryExecutionId"])
print('query state: {}\n'.format(qstate))
file_name = "fromglue/{}.csv".format(qe["QueryExecutionId"])
obj = s3_client.get_object(Bucket=bucket_name, Key=file_name)
return pd.read_csv(obj['Body'])
time_entries_df = read('SELECT * FROM sample-table')
SparkContext won't be available in Glue Python Shell. Hence you need to depend on Boto3 and Pandas to handle the data retrieval. But it comes a lot of overhead to query Athena using boto3 and poll the ExecutionId to check if the query execution got finished.
Recently awslabs released a new package called AWS Data Wrangler. It extends power of Pandas library to AWS to easily interact with Athena and lot of other AWS Services.
Reference link:
https://github.com/awslabs/aws-data-wrangler
https://github.com/awslabs/aws-data-wrangler/blob/master/tutorials/006%20-%20Amazon%20Athena.ipynb
Note: AWS Data Wrangler library wont be available by default inside Glue Python shell. To include it in Python shell, follow the instructions in following link:
https://aws-data-wrangler.readthedocs.io/en/latest/install.html#aws-glue-python-shell-jobs
I have a few month using glue, i use:
from pyspark.context import SparkContext
from awsglue.context import GlueContext
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
data_frame = spark.read.format("com.databricks.spark.csv")\
.option("header","true")\
.load(<CSVs THAT IS USING FOR ATHENA - STRING>)

How to fix AWS Glue code in displaying count and schema of partitioned table from AWS S3

I'm trying to count the records and print the schema of my partitioned table (in a form of parquet). I'm doing it just in AWS Glue Console (since I dont have access to connect to a developer endpoint). However, I dont think my query is producing any result. See my code below. Any suggestion?
%pyspark
from awsglue.context import GlueContext
from awsglue.transforms import *
from pyspark.context import SparkContext
glueContext = GlueContext(SparkContext.getOrCreate())
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = "s3", table_name = "subscriber", push_down_predicate = "(date=='2018-12-06')", transformation_ctx = "datasource0")
df = datasource0.toDF()
print df.count()
df.printSchema()
I'm not sure about using print in Glue... I would recommend use logging to print results. You can get the logger object and use it like that:
spark = glueContext.spark_session
log4jLogger = spark.sparkContext._jvm.org.apache.log4j
logger = log4jLogger.LogManager.getLogger(__name__)
logger.info(df.count())
From the Job console you can then access to the logs of the specific Job execution. There you should be able to see your DF count for example.
You can see an example code with generated results in the below picture:

How to build a sparkSession in Spark 2.0 using pyspark?

I just got access to spark 2.0; I have been using spark 1.6.1 up until this point. Can someone please help me set up a sparkSession using pyspark (python)? I know that the scala examples available online are similar (here), but I was hoping for a direct walkthrough in python language.
My specific case: I am loading in avro files from S3 in a zeppelin spark notebook. Then building df's and running various pyspark & sql queries off of them. All of my old queries use sqlContext. I know this is poor practice, but I started my notebook with
sqlContext = SparkSession.builder.enableHiveSupport().getOrCreate().
I can read in the avros with
mydata = sqlContext.read.format("com.databricks.spark.avro").load("s3:...
and build dataframes with no issues. But once I start querying the dataframes/temp tables, I keep getting the "java.lang.NullPointerException" error. I think that is indicative of a translational error (e.g. old queries worked in 1.6.1 but need to be tweaked for 2.0). The error occurs regardless of query type. So I am assuming
1.) the sqlContext alias is a bad idea
and
2.) I need to properly set up a sparkSession.
So if someone could show me how this is done, or perhaps explain the discrepancies they know of between the different versions of spark, I would greatly appreciate it. Please let me know if I need to elaborate on this question. I apologize if it is convoluted.
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('abc').getOrCreate()
now to import some .csv file you can use
df=spark.read.csv('filename.csv',header=True)
As you can see in the scala example, Spark Session is part of sql module. Similar in python. hence, see pyspark sql module documentation
class pyspark.sql.SparkSession(sparkContext, jsparkSession=None) The
entry point to programming Spark with the Dataset and DataFrame API. A
SparkSession can be used create DataFrame, register DataFrame as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \
... .master("local") \
... .appName("Word Count") \
... .config("spark.some.config.option", "some-value") \
... .getOrCreate()
From here http://spark.apache.org/docs/2.0.0/api/python/pyspark.sql.html
You can create a spark session using this:
>>> from pyspark.sql import SparkSession
>>> from pyspark.conf import SparkConf
>>> c = SparkConf()
>>> SparkSession.builder.config(conf=c)
spark = SparkSession.builder\
.master("local")\
.enableHiveSupport()\
.getOrCreate()
spark.conf.set("spark.executor.memory", '8g')
spark.conf.set('spark.executor.cores', '3')
spark.conf.set('spark.cores.max', '3')
spark.conf.set("spark.driver.memory",'8g')
sc = spark.sparkContext
Here's a useful Python SparkSession class I developed:
#!/bin/python
# -*- coding: utf-8 -*-
######################
# SparkSession class #
######################
class SparkSession:
# - Notes:
# The main object if Spark Context ('sc' object).
# All new Spark sessions ('spark' objects) are sharing the same underlying Spark context ('sc' object) into the same JVM,
# but for each Spark context the temporary tables and registered functions are isolated.
# You can't create a new Spark Context into another JVM by using 'sc = SparkContext(conf)',
# but it's possible to create several Spark Contexts into the same JVM by specifying 'spark.driver.allowMultipleContexts' to true (not recommended).
# - See:
# https://medium.com/#achilleus/spark-session-10d0d66d1d24
# https://stackoverflow.com/questions/47723761/how-many-sparksessions-can-a-single-application-have
# https://stackoverflow.com/questions/34879414/multiple-sparkcontext-detected-in-the-same-jvm
# https://stackoverflow.com/questions/39780792/how-to-build-a-sparksession-in-spark-2-0-using-pyspark
# https://stackoverflow.com/questions/47813646/sparkcontext-getorcreate-purpose?noredirect=1&lq=1
from pyspark.sql import SparkSession
spark = None # The Spark Session
sc = None # The Spark Context
scConf = None # The Spark Context conf
def _init(self):
self.sc = self.spark.sparkContext
self.scConf = self.sc.getConf() # or self.scConf = self.spark.sparkContext._conf
# Return the current Spark Session (singleton), otherwise create a new oneÒ
def getOrCreateSparkSession(self, master=None, appName=None, config=None, enableHiveSupport=False):
cmd = "self.SparkSession.builder"
if (master != None): cmd += ".master(" + master + ")"
if (appName != None): cmd += ".appName(" + appName + ")"
if (config != None): cmd += ".config(" + config + ")"
if (enableHiveSupport == True): cmd += ".enableHiveSupport()"
cmd += ".getOrCreate()"
self.spark = eval(cmd)
self._init()
return self.spark
# Return the current Spark Context (singleton), otherwise create a new one via getOrCreateSparkSession()
def getOrCreateSparkContext(self, master=None, appName=None, config=None, enableHiveSupport=False):
self.getOrCreateSparkSession(master, appName, config, enableHiveSupport)
return self.sc
# Create a new Spark session from the current Spark session (with isolated SQL configurations).
# The new Spark session is sharing the underlying SparkContext and cached data,
# but the temporary tables and registered functions are isolated.
def createNewSparkSession(self, currentSparkSession):
self.spark = currentSparkSession.newSession()
self._init()
return self.spark
def getSparkSession(self):
return self.spark
def getSparkSessionConf(self):
return self.spark.conf
def getSparkContext(self):
return self.sc
def getSparkContextConf(self):
return self.scConf
def getSparkContextConfAll(self):
return self.scConf.getAll()
def setSparkContextConfAll(self, properties):
# Properties example: { 'spark.executor.memory' : '4g', 'spark.app.name' : 'Spark Updated Conf', 'spark.executor.cores': '4', 'spark.cores.max': '4'}
self.scConf = self.scConf.setAll(properties) # or self.scConf = self.spark.sparkContext._conf.setAll()
# Stop (clears) the active SparkSession for current thread.
#def stopSparkSession(self):
# return self.spark.clearActiveSession()
# Stop the underlying SparkContext.
def stopSparkContext(self):
self.spark.stop() # Or self.sc.stop()
# Returns the active SparkSession for the current thread, returned by the builder.
#def getActiveSparkSession(self):
# return self.spark.getActiveSession()
# Returns the default SparkSession that is returned by the builder.
#def getDefaultSession(self):
# return self.spark.getDefaultSession()

Categories