Skipping step in an apache beam pipeline Python - python

So I'm constructing an apache beam pipeline and having some trouble skipping the rest of the steps in the python SDK. Here is a simplified example I'm having trouble with:
import apache_beam as beam
import os
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = API_KEY
def foo(message):
pass
options = {
'streaming': True
}
runner = 'DirectRunner'
opts = beam.pipeline.PipelineOptions(flags=[], **options)
with beam.Pipeline(runner, options=opts) as p:
sub_message = (p | 'sub' >> beam.io.ReadFromPubSub(subscription=my_sub))
result = (sub_message | 'foo' >> beam.Map(foo))
result | 'print' >> beam.Map(print)
job = p.run()
if runner == 'DirectRunner':
job.wait_until_finish()
So according to this: Apache Beam - skip pipeline step which is in Java if my function doesn't return anything then apache_beam should skip the rest of the steps. Correct me if I'm wrong but in python that is the same as returning None so my pass could be replaced with return None and be the exact same. But when I run this code with the pass or return None the result does end up going to the next step. That is, it keeps printing None when it should not be printing anything since it should skip all of the next steps. Any help appreciated.

Funnily enough, as soon as I posted this I found the answer out on the docs. So looks like in the link I provided the equivalent is using a ParDo NOT a map as I did. So really it should look like this:
import apache_beam as beam
import os
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials
class TestFn(beam.DoFn):
def process(self, element):
print('hi')
pass
options = {
'streaming': True
}
runner = 'DirectRunner'
opts = beam.pipeline.PipelineOptions(flags=[], **options)
with beam.Pipeline(runner, options=opts) as p:
sub_message = (p | 'sub' >> beam.io.ReadFromPubSub(subscription=mysub))
result = (sub_message | 'foo' >> beam.ParDo(TestFn()))
result | 'print' >> beam.Map(print)
job = p.run()
if runner == 'DirectRunner':
job.wait_until_finish()

Related

Dataflow python pickeling issue

I have written a simple dataflow program that takes input from a pub/sub topic and calculates the fibanacci number for that integer. However, my DoFn is not able to pickle the custom function fibonacci which is giving me errors when running on Dataflowrunner. Can someone help me in telling me what I am doing wrong?
Below is my pipeline code.
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions, StandardOptions, SetupOptions
class Fibonacci(beam.DoFn):
def fibonacci(self, n):
if n < 2:
return n
else:
return self.fibonacci(n-1) + self.fibonacci(n-2)
def process(self, element, fib):
import json
# do some processing
n = int(json.loads(element.data))
# call fibonnaci
return [fib(n)]
def Print(n):
print(n)
if __name__ == "__main__":
input_subscription = 'projects/consumerresearch/subscriptions/test-user-sub'
options = PipelineOptions()
options.view_as(StandardOptions).streaming=True
options.view_as(SetupOptions).save_main_session = True
p = beam.Pipeline(options=options)
raw_pubsub_data = (
p | 'Read from topic' >> beam.io.ReadFromPubSub(subscription=input_subscription, with_attributes=True)
)
output = raw_pubsub_data | beam.ParDo(Fibonacci()) | beam.Map(Print)
result = p.run()
result.wait_until_finish()
The signature of process should be this:
process(self, element):
Your implementation has a 3rd parameter fib; Beam would not know what to pass for this. Change your implementation to reference self.fibonacci?
https://beam.apache.org/documentation/programming-guide/#pardo

Is it possible to join batch data with streaming data in Apache beam?

I wonder whether it is possible to join batch data with streaming data in apache-beam, something like below:
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.io.external.kafka import ReadFromKafka
def run():
with beam.Pipeline(options=PipelineOptions(["--runner=DirectRunner"])) as p:
batch_data = (
p
| 'ReadMyFile' >> beam.io.ReadFromText("s3://my_batch_data.txt")
| beam.Map(batch_processing_func)
)
streaming_data = (
p
| 'Read data' >> ReadFromKafka(
consumer_config={"bootstrap.servers": "localhost:9092"},
topics=["my-first-topic2"],
)
| beam.Map(streaming_processing_func)
)
joined_data = ({'batch_data': batch_data, 'streaming_data': streaming_data} | beam.CoGroupByKey())
if __name__ == "__main__":
run()
The reason that I'm curious about it is that it looks like Google Dataflow supports only either of them.
This is a good question. The answer is: yes, you can join batch data with streaming.
For your particular pipeline, the likely easiest way is to define a side input for your batch data, and use that to enrich your stream:
def run():
with beam.Pipeline(options=PipelineOptions(["--runner=DirectRunner"])) as p:
batch_data_si = beam.pvalue.AsList(
p
| 'ReadMyFile' >> beam.io.ReadFromText("s3://my_batch_data.txt")
| beam.Map(batch_processing_func)
)
streaming_data = (
p
| 'Read data' >> ReadFromKafka(
consumer_config={"bootstrap.servers": "localhost:9092"},
topics=["my-first-topic2"],
)
| beam.Map(streaming_processing_func)
)
joined_data = (streaming_data
| beam.Map(enrich_stream, batch_data_si))
if __name__ == "__main__":
run()
Where your enrich_stream function looks something like this:
def enrich_stream(element, batch_side_input):
element = dict(element) # make a copy of the first element
element['special_element'] = batch_side_input[elm['index']] # or something like that : )
return element

python dataflow, setup is called but process is not

I have a streaming python dataflow job. Because of some expensive setup, I implement setup method for my DoFn class. When I work it using DirectRunner everything works as expected. However, when I deploy this to dataflow (GCP), process is never called. I can confirm by looking at log, setup was finished succesfully but no log from process is called. What could be the reason?
Simplified code of my job:
class PredictionFn(beam.DoFn):
def setup(self):
# download data from remote server etc
# ...
logging.info('setup successful!')
def process(self, element):
(user_id, device_id) = element
logging.info('process ' + user_id_hash)
# more logic here...
def run(argv=None, save_main_session=True):
parser = argparse.ArgumentParser()
parser.add_argument(
'--subscription',
required=True,
help=(
'Input PubSub subscription of the form '
'"projects/<PROJECT>/subscriptions/<SUBSCRIPTION>"'))
parser.add_argument(
'--bigtable_instance',
help='The Bigtable instance ID',
default='devices-metadata')
parser.add_argument(
'--bigtable_table',
help='The Bigtable table ID in the instance.',
default='device-profiles')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
# We use the save_main_session option because DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
pipeline_options.view_as(StandardOptions).streaming = True
project_id = pipeline_options.view_as(GoogleCloudOptions).project
logging.info('initializing pipeline: %s', known_args.subscription)
with beam.Pipeline(options=pipeline_options) as p:
users = (p
| "ReadEvents" >> beam.io.ReadFromPubSub(subscription=known_args.subscription, with_attributes=False)
| "ExtractUser" >> beam.ParDo(ExtractUserId())
| beam.ParDo(AddTimestampFn())
| beam.WindowInto(beam.window.FixedWindows(5, 0))
| beam.Distinct())
predictions = (users
| 'Predict' >> (beam.ParDo(PredictionFn())))
_ = (predictions
| 'PredictionToRowUpdate' >> beam.ParDo(CreateRowFn())
| WriteToBigTable(
project_id=project_id,
instance_id=known_args.bigtable_instance,
table_id=known_args.bigtable_table))
if __name__ == '__main__':
logging.getLogger('elasticsearch').setLevel(logging.WARN)
run()

Apache beam python groupbykey with kafka io streaming data

I'm trying to create fixed windows of 10 sec using apache beam 2.23 with kafka as data source.
It seems to be getting triggered for every record even if I try to set AfterProcessingtime trigger to 15 and throwing the following error if I try to use GroupByKey.
Error : KeyError: 0 [while running '[17]: FixedWindow']
Data simulation :
from kafka import KafkaProducer
import time
producer = KafkaProducer()
id_val = 1001
while(1):
message = {}
message['id_val'] = str(id_val)
message['sensor_1'] = 10
if (id_val<1003):
id_val = id_val+1
else:
id_val=1001
time.sleep(2)
print(time.time())
producer.send('test', str(message).encode())
Beam snippet :
class AddTimestampFn(beam.DoFn):
def process(self, element):
timestamp = int(time.time())
yield beam.window.TimestampedValue(element, timestamp)
pipeline_options = PipelineOptions()
pipeline_options.view_as(StandardOptions).streaming = True
p = beam.Pipeline(options=pipeline_options)
with beam.Pipeline() as p:
lines = p | "Reading messages from Kafka" >> kafkaio.KafkaConsume(kafka_config)
groups = (
lines
| 'ParseEventFn' >> beam.Map(lambda x: (ast.literal_eval(x[1])))
| 'Add timestamp' >> beam.ParDo(AddTimestampFn())
| 'After timestamp add ' >> beam.ParDo(PrintFn("timestamp add"))
| 'FixedWindow' >> beam.WindowInto(
beam.window.FixedWindows(10*1),allowed_lateness = 30)
| 'Group ' >> beam.GroupByKey())
| 'After group' >> beam.ParDo(PrintFn("after group")))
What am I doing wrong here? I have just started using beam so it could be something really silly.

Apache Beam ETL dimension table loading , any example?

I am thinking of Loading File into one Dimension table. My solution is:
Beam.read the file
Create the side input from the DB about existing data.
in a ParDo: filter the records which are already in the side input
biquerySink into DB.
and want to inquire if someone has implement this ? and can you give me some example for this ?
Thanks
can you give me some example about coGroupByKey. I understand that it may look like below : Sorry,I am newbie to Dataflow,and watching codes is the best way to me
step 1: sourcedata = beam.ReadFromText(...)
step 2: existing_table = beam.pvalue.AsDict(p
| beam.Read(beam.BigQuerySource(my_query)
| beam.Map(format_rows)
I assume the structure of sourcedata and existing data is the same :<k,v>
step 3: source_existing_Data= {sourcedata,existing_table}
|'coGroupBy' >> beam.coGroupByKey()
step4: new_Data = source_existing_Data | beam.filter(lamada (name,(existing,source)):source is NONE))
step 5: bigQuerySink(new_Data)
Side inputs are a good option for this, but consider that if your DB table is pretty large, you may find later that CoGroupByKey is a better option. To implement this in side inputs, you'd do the following:
p = beam.Pipeline(..)
existing_table = beam.pvalue.AsDict(p
| beam.Read(beam.io.BigQuerySource(my_query)
| beam.Map(format_rows))
class FilterRowsDoFn(beam.DoFn):
def process(self, elem, table_dict):
k = elem[0]
if k not in table_dict:
yield elem
result = (p
| beam.ReadFromText(...)
| beam.ParDo(FilterRowsDoFn(), table_dict=existing_table))
And then you can write the result to BQ. But, again, if your table already contains many elements, you may want to consider using CoGroupByKey.
The code to accomplish this using CoGroupByKey should look something like this:
sourcedata = (p
| beam.ReadFromText(...)
| beam.Map(format_text))
existing_table = (p
| beam.Read(beam.io.BigQuerySource(my_query)
| beam.Map(format_rows))
source_existing_data = ((sourcedata, existing_table)
| 'coGroupBy' >> beam.coGroupByKey())
new_data = (source_existing_data
| beam.Filter(lamada (name, (source, existing)): not list(source))
| beam.FlatMap(lambda (name, (source, existing)): [(name, s) for s in source]))
result = new_data | bigQuerySink(new_Data)
Let me know if you have any trouble using either of the code snippets so I'll fix them up.
For the row coming from the text file and row coming form BIGQUERY needed to be done with function :
from GCPUtil import BuildTupleRowFn as BuildTupleRowFn
from GCPUtil import BuildDictTupleRowFn as BuildDictTupleRowFn
and also the new data also after coGroupKey and Filter also need to convert since what get from coGroupKey is Tuple, so need to convert it from Dict or List.
Below is the detailed codes:
#####################################################################
# Develop by Emma 2017/08/19
#####################################################################
import argparse
import logging
from random import randrange
import apache_beam as beam
from apache_beam.io import WriteToText
from apache_beam.pvalue import AsList
from apache_beam.pvalue import AsSingleton
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import StandardOptions
import sys
sys.path.append("..")
from GCPUtil import BuildTupleRowFn as BuildTupleRowFn
from GCPUtil import BuildDictTupleRowFn as BuildDictTupleRowFn
def configure_bigquery_write():
return [
('CAND_ID', 'STRING'),
('CAND_NAME', 'STRING'),
]
class BuildRowFn(beam.DoFn):
def process(self, element):
row = {}
for entry in element:
print('start')
print(entry)
# print(entry[0])
# print(entry[1])
print('end')
row['CAND_ID'] = entry[0]
row['CAND_NAME'] = entry[1]
yield row
def run(argv=None):
"""Run the workflow."""
# schema = 'CAND_ID:STRING,CAND_NAME:STRING'
schema = 'CAND_ID:STRING,CAND_NAME:STRING'
parser = argparse.ArgumentParser()
parser.add_argument('--input', default=r'd:/resource/test*')
parser.add_argument('--output', default=r'd:/output/test/new_emma')
# parser.add_argument('--project', default='chinarose_project')
known_args, pipeline_args = parser.parse_known_args(argv)
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(StandardOptions).runner = 'DirectRunner'
pipeline_options.view_as(GoogleCloudOptions).project = 'chinarose_project'
# query = 'select store FROM [chinarose_project:emma_test.sales]'
query = 'select CAND_ID ,CAND_NAME from emma_test.campaign'
p = beam.Pipeline(options=pipeline_options)
# get the length of the word and write them in the text file,noticed the UDF
source_data = (p | beam.io.ReadFromText(known_args.input)
| beam.Map(lambda a: a.split(","))
| beam.ParDo(BuildTupleRowFn())
)
# source_data | 'write' >> WriteToText(known_args.output)
# source_data | WriteToText(known_args.output)
print("connect to BQ")
existing_data= (p | beam.io.Read(beam.io.BigQuerySource(query=query, project='chinarose_project'))
| beam.ParDo(BuildDictTupleRowFn())
)
#existing_data | WriteToText(known_args.output)
source_existing_data = ((source_data, existing_data)
| 'GoGroupBy' >> beam.CoGroupByKey())
# source_existing_data |'write to text' >> WriteToText(known_args.output)
new_data = (source_existing_data | beam.Filter(lambda (name, (source, existing)): len(existing) == 0)
| beam.Map(lambda (name, (source, existing)): [(name, s) for s in source])
| beam.ParDo(BuildRowFn())
| beam.io.Write(beam.io.BigQuerySink(table='campaign_emma_v2', dataset='emma_test',project='chinarose_project',schema=schema))
)
#new_data | 'write to text' >> WriteToText(known_args.output)
p.run().wait_until_finish()
if __name__ == '__main__':
# logging.getLogger().setLevel(logging.INFO)
print('begin')
run()
print('end')

Categories