Getting duplicate data from csv file - python

def general_number(request):
csvfilename = 'algo/generalnumber.csv'
csvfile = open(csvfilename, 'r')
reader = csv.DictReader(csvfile)
fieldnames = ("Ticker","Company","Industry","PreviousClose","Open","Bid","Ask","DaysRange","ftWeekRange","Volume","AvgVolume","MarketCap","Beta","PERatioTTM","EPSTTM","EarningsDate","ForwardDividendYield","ExDividendDate","OneyTargetEst","ticker_company")
output = []
for each in reader:
row = {}
for field in fieldnames:
row[field] = each[field]
output.append(row)
return JsonResponse(output[20:30],safe=False)
Here I am getting duplicate data in json from csv.
Each datum is showing 10 times. What is wrong in this?

It's just an indentation problem. Fixed it, try now!
def general_number(request):
csvfilename = 'algo/generalnumber.csv'
csvfile = open(csvfilename, 'r')
reader = csv.DictReader(csvfile)
fieldnames = ("Ticker","Company","Industry","PreviousClose","Open","Bid","Ask","DaysRange","ftWeekRange","Volume","AvgVolume","MarketCap","Beta","PERatioTTM","EPSTTM","EarningsDate","ForwardDividendYield","ExDividendDate","OneyTargetEst","ticker_company")
output = []
for each in reader:
row = {}
for field in fieldnames:
row[field] = each[field]
output.append(row)
return JsonResponse(output[20:30],safe=False)

Related

Compare 2 different csv files and output all the changes into a new csv

I have 2 CSVs which are New.csv and Old.csv shown below:
Old.csv
longName,shortName,eventType,number,severity
ACTAGENT201,ACAT201,RES,1,INFO
ACTAGENT202,ACAT202,RES,2,ALERT
ACODE801,AC801,ADMIN,1,MINOR
ACODE802,AC802,ADMIN,2,MINOR
ACODE102,AC102,COMM,2,CRITICAL
ACODE103,AC103,COMM,3,CRITICAL
ACODE104,AC104,COMM,4,CRITICAL
ACODE105,AC105,COMM,5,CRITICAL
ACODE106,AC106,COMM,6,CRITICAL
New.csv
longName,shortName,eventType,number,severity
ACTAGENT201,ACAT201,RES,1,INFO
ACTAGENT202,ACAT202,RES,2,ALERT
ACODE801,AC801,ADMIN,1,MINOR
ACODE802,AC802,ThisHasBeenChanged,2,MINOR
ACODE102,AC102,COMM,2,CRITICAL
ACODE103,AC103,COMM,3,CRITICAL
ACODE104,AC104,COMM,4,THISHASBEENCHANGED
ACODE105,AC105,COMM,5,CRITICAL
ACODE106,AC106,COMM,6,CRITICAL
If there is data in one of the columns in the row that has been modified/changed between the old.csv and the new.csv then that whole row should be appended to the changes.csv like this with each column from old.csv and new.csv beside each other:
I know how to find new and deleted items in the csv, but could not figure out how to get the modified items. Code below:
import csv
def DeletedItems(old_csv, new_csv, changes_csv):
with open(new_csv, newline="", encoding="utf8") as new_fp:
csv_reader = csv.reader(new_fp)
csv_headings = next(csv_reader)
new_long_names = {row[0] for row in csv.reader(new_fp)}
with open(old_csv, newline="", encoding="utf8") as old_fp:
with open(changes_csv, "a", newline="", encoding="utf8") as changes_fp:
writer = csv.writer(changes_fp)
writer.writerow("")
for row in csv.reader(old_fp):
if row[0] not in new_long_names:
writer.writerow(row)
def NewItems(old_csv, new_csv, changes_csv):
with open(old_csv, newline="", encoding="utf8") as old_fp:
csv_reader = csv.reader(old_fp)
csv_headings = next(csv_reader)
old_long_names = {row[0] for row in csv.reader(old_fp)}
with open(new_csv, newline="", encoding="utf8") as new_fp:
with open(changes_csv, "w", newline="", encoding="utf8") as changes_fp:
writer = csv.writer(changes_fp)
for row in csv.reader(new_fp):
if row[0] not in old_long_names:
writer.writerow(row)
NewItems("old.csv", "new.csv", "changes.csv")
DeletedItems("old.csv", "new.csv", "changes.csv")
First, read both CSV files into a dictionary, using the longName values as keys.
import csv
with open(old_csv_file, "r") as fh:
reader = csv.reader(fh)
old_csv = {row[0]: row for row in reader}
with open(new_csv_file, "r") as fh:
reader = csv.reader(fh)
new_csv = {row[0]: row for row in reader}
Then, it's easy to find newly added and deleted keys using set operations.
old_longNames = set(old_csv.keys())
new_longNames = set(new_csv.keys())
# common: set intersection
common_longNames = old_longNames.intersection(new_longNames)
# removed: whatever's in old but not in new
removed_longNames = old_longNames - new_longNames
# added: whatever's in new but not in old
added_longNames = new_longNames - old_longNames
Finally, iterate over the common set to find where there are changes:
changed_longNames = []
for key in common_longNames:
old_row = old_csv[key]
new_row = new_csv[key]
# if any(o != n for o, n in zip(old_row, new_row)):
if old_row != new_row:
# this row has at least one column changed. Do whatever
print(f"LongName {key} has changes")
changed_longNames.append(key)
Or, as a list comprehension:
changed_longNames = [key for key in common_longNames if old_csv[key] != new_csv[key]]
Writing everything to a new csv file is also fairly trivial. Note that the sets don't preserve the order, so you might not get the result in the same order.
with open("deleted.csv", "w") as fh:
writer = csv.writer(fh)
for key in removed_longNames:
writer.writerow(old_csv[key])
with open("inserted.csv", "w") as fh:
writer = csv.writer(fh)
for key in added_longNames:
writer.writerow(new_csv[key])
with open("changed.csv", "w") as fh:
writer = csv.writer(fh)
for key in changed_longNames:
old_row = old_csv[key]
new_row = new_csv[key]
merged_row = []
for oi, ni in zip(old_row, new_row):
merged_row.append(oi)
merged_row.append(ni)
writer.writerow(merged_row)

Error: UNC paths are not supported. Default to Windows Directory

I did not write this code, nor do I really work with Python, so please bear with me. The code below is supposed to run an update on one of our databases & create a few new files. When I try to run it, however, I get the error above. I think it works up to a certain point:
import arcpy
import os
import csv
import pandas as pd
import xlwt
import glob
##set workspaces
os.chdir("//dotatufs04/crm/Public/temp/DDahms/Python/Update_MRS")
In_shp = "DraftHistoricProjectFootprints.gdb/DraftHistoricProjectFootprints"
##delete columns on Production_dot_project
os.system(r"\\dotatufs04\crm\Public\temp\DDahms\Python\Update_MRS\Scripts\run_macro_delete_columns.vbs")
##make joined shapefile and update status
try:
# Set environment settings
arcpy.env.workspace = "//dotatufs04/crm/Public/temp/DDahms/Python/Update_MRS"
arcpy.env.qualifiedFieldNames = False
# Set local variables
layerName = "shapefile2"
joinTable = "PRODUCTION_DOTPF_PROJECT.xls/Sheet3$"
layerName2 = "tableview"
joinField1 = "AKSAS_Num_Txt"
joinField2 = "AKSAS_NUMBER"
join_tview = "test_tview"
output_shp = "DraftHistoricProjectFootprints.gdb/joined"
# Create a feature layer from the vegtype featureclass
arcpy.MakeFeatureLayer_management(In_shp, layerName)
# Make Table view from .xls
arcpy.MakeTableView_management(joinTable, join_tview)
# Join the feature layer to a table
arcpy.AddJoin_management(layerName, joinField1, join_tview, joinField2)
arcpy.CopyFeatures_management(layerName, output_shp)
except Exception as err:
print(err.args[0])
#add "update" field
lstFields = arcpy.ListFields(output_shp)
x = False
for field in lstFields:
if field.name == "update":
x = True
if x <> "True":
arcpy.AddField_management(output_shp, "updated", "STRING")
#calculate field
infield = "updated"
expression = 'myfunct(!AKSAS_Num_Txt!,!AKSAS_NUMBER!)'
codeblock = '''def myfunct(val1,val2):
if (val1 <> val2 or val1 == 'N/A' or val1 == '' or val2 == '' or val2 == 'N/A' or val1== None or val2 == None):
return "Not Updated"
else:
return "Updated"'''
arcpy.CalculateField_management(output_shp, infield, expression, "PYTHON_9.3", codeblock)
#calculate field
infield2 = "AKSAS_Num_Txt"
infield3 = "AKSAS_NUMBER"
infield4 = "STATUS_CATEGORY"
infield5 = "Project_Status"
expression3 = 'myfunct3(!AKSAS_Num_Txt!,!AKSAS_NUMBER!,!Project_Status!,!STATUS_CATEGORY!)'
codeblock3 = '''def myfunct3(inpt,inpt2,inpt3,inpt4):
if (inpt <> inpt2 or inpt == 'N/A' or inpt == '' or inpt2 == '' or inpt2 == 'N/A' or inpt== None or inpt2 == None):
result = inpt3
else:
result = inpt4
return result'''
arcpy.CalculateField_management(output_shp, infield5, expression3, "PYTHON_9.3", codeblock3)
#delete fields
arcpy.DeleteField_management(output_shp, ["STATUS_CATEGORY","AKSAS_NUMBER"])
##export drafthistoricprojectfootprints to excel
Input_Table = "DraftHistoricProjectFootprints.gdb/joined"
Output_Excel_File = "DraftHistoricProjectFootprints.xls"
arcpy.TableToExcel_conversion(Input_Table,Output_Excel_File)
The reason I say this works is because the "DraftHistoricProjectFootprints.xls" appears in the correct folder. Everything after this section of code does not:
##make csv sheets
os.system(r"\\dotatufs04\crm\Public\temp\DDahms\Python\Update_MRS\Scripts\run_macro_sort_construction.vbs")
wb = xlwt.Workbook(encoding="latin-1")
for filename in glob.glob(r"csv\*.csv"):
(f_path, f_name) = os.path.split(filename)
(f_short_name, f_extension) = os.path.splitext(f_name)
ws = wb.add_sheet(f_short_name)
spamReader = csv.reader(open(filename, 'rb'))
for rowx, row in enumerate(spamReader):
for colx, value in enumerate(row):
ws.write(rowx, colx, value)
svstr = 'Projects_In_MRS.xls'
wb.save(svstr)
os.system(r"\\dotatufs04\crm\Public\temp\DDahms\Python\Update_MRS\Scripts\run_macro_sort_drafthistoricprojectfootprints.vbs")
##delete sheet3 and historic projects sheet
os.remove("csv/historicprojectsDraftHistoricProjectFootprints.csv")
os.remove("csv/Sheet3.csv")
##read cvs and make lists from data
with open(r"csv\completed.csv", 'rb') as f:
reader = csv.reader(f)
completed_list = list(reader)
completed_list = [el[2] for el in completed_list]
with open(r"csv\construction.csv", 'rb') as f:
reader = csv.reader(f)
construction_list = list(reader)
construction_list = [el[2] for el in construction_list]
with open(r"csv\design.csv", 'rb') as f:
reader = csv.reader(f)
design_list = list(reader)
design_list = [el[2] for el in design_list]
with open(r"csv\proposed.csv", 'rb') as f:
reader = csv.reader(f)
proposed_list = list(reader)
proposed_list = [el[2] for el in proposed_list]
with open(r"csv\suspended.csv", 'rb') as f:
reader = csv.reader(f)
suspended_list = list(reader)
suspended_list = [el[2] for el in suspended_list]
with open(r"csv\historicprojectscompleted.csv", 'rb') as f:
reader = csv.reader(f)
historic_completed_list = list(reader)
historic_completed_list = [el[2] for el in historic_completed_list]
with open(r"csv\historicprojectsconstruction.csv", 'rb') as f:
reader = csv.reader(f)
historic_construction_list = list(reader)
historic_construction_list = [el[2] for el in historic_construction_list]
with open(r"csv\historicprojectsdesign.csv", 'rb') as f:
reader = csv.reader(f)
historic_design_list = list(reader)
historic_design_list = [el[2] for el in historic_design_list]
with open(r"csv\historicprojectsproposed.csv", 'rb') as f:
reader = csv.reader(f)
historic_proposed_list = list(reader)
historic_proposed_list = [el[2] for el in historic_proposed_list]
with open(r"csv\historicprojectssuspended.csv", 'rb') as f:
reader = csv.reader(f)
historic_suspended_list = list(reader)
historic_suspended_list = [el[2] for el in historic_suspended_list]
##compare lists and write to csv
compare_completed = list(set(completed_list)-set(historic_completed_list))
if not compare_completed:
compare_completed.append("All Projects are in Shapefile!")
with open(r"compared_lists\compare_completed.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in compare_completed:
writer.writerow([val])
compare_construction = list(set(construction_list)-set(historic_construction_list))
if not compare_construction:
compare_construction.append("All Projects are in Shapefile!")
with open(r"compared_lists\compare_construction.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in compare_construction:
writer.writerow([val])
compare_design = list(set(design_list)-set(historic_design_list))
if not compare_design:
compare_design.append("All Projects are in Shapefile!")
with open(r"compared_lists\compare_design.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in compare_design:
writer.writerow([val])
compare_proposed = list(set(proposed_list)-set(historic_proposed_list))
if not compare_proposed:
compare_proposed.append("All Projects are in Shapefile!")
with open(r"compared_lists\compare_proposed.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in compare_proposed:
writer.writerow([val])
compare_suspended = list(set(suspended_list)-set(historic_suspended_list))
if not compare_suspended:
compare_suspended.append("All Projects are in Shapefile!")
with open(r"compared_lists\compare_suspended.csv", "w") as output:
writer = csv.writer(output, lineterminator='\n')
for val in compare_suspended:
writer.writerow([val])
##combine csv to single workbook
wb = xlwt.Workbook()
for filename in glob.glob(r"compared_lists\*.csv"):
(f_path, f_name) = os.path.split(filename)
(f_short_name, f_extension) = os.path.splitext(f_name)
ws = wb.add_sheet(f_short_name)
spamReader = csv.reader(open(filename, 'rb'))
for rowx, row in enumerate(spamReader):
for colx, value in enumerate(row):
ws.write(rowx, colx, value)
wb.save(r"Projects_Missing_from_DraftHistoricProjectFootprints.xls")
##put proj names on worksheet
os.system(r"\\dotatufs04\crm\Public\temp\DDahms\Python\Update_MRS\Scripts\run_macro_get_proj_names_to_missing_proj.vbs")
##change project status to proper (sentence) case
expression99 = '!Project_Status!.title()'
arcpy.CalculateField_management(output_shp, "Project_Status", expression99, "PYTHON_9.3")
Once this script runs, it should create/populate a few different documents as shown above, but instead, the error pops up and kills it. As mentioned above, I do not code in Python, so this is all new to me. Please explain your solutions like I'm five.
Thank you!

Python csv - replace any columns with specified value

I have the following input file with a header row:
test_in.csv
LON,LAT,NUMBER,STREET,UNIT,CITY,DISTRICT,REGION,POSTCODE,ID,HASH
-72.5708234,41.4155142,39,HICKORY LA,,,,,,,8a0df668e0d49b02
-72.5647745,41.4160301,1213,KILLINGWORTH RD,,,,,,,b3ecaab86e476f46
I need to replace any of the columns with a specified string
for example CITY column's data should be replaced from "" to "MyCity"
My code only outputs the header and first row
python test_forcefld.py test_in.csv MyCity CITY out_test.csv
import csv
import sys
in_file_name = sys.argv[1]
force_data = sys.argv[2]
force_fld = sys.argv[3]
out_file_name = sys.argv[4]
# First read top row/header from input file
fieldnames = []
for filename in [in_file_name]:
with open(filename, "rb") as f_in:
reader = csv.reader(f_in)
headers = next(reader)
for h in headers:
fieldnames.append(h)
#print headers to output file
with open(out_file_name, 'w') as fou:
dw = csv.DictWriter(fou, delimiter=',', fieldnames=fieldnames)
dw.writeheader()
f_in2 = open(in_file_name, "rb")
reader2 = csv.DictReader(f_in2) # Uses the field names in this file
datarow = next(reader2)
datarow[force_fld] = force_data
with open(out_file_name, 'wa') as fou:
dw2 = csv.DictWriter(fou, delimiter=',', fieldnames=fieldnames)
dw2.writeheader()
dw2.writerow(data row)
Output shows
LON,LAT,NUMBER,STREET,UNIT,CITY,DISTRICT,REGION,POSTCODE,ID,HASH
-72.5708234,41.4155142,39,HICKORY LA,,MyCity,,,,,8a0df668e0d49b02
Your code is a little difficult to read, but assuming datarow is a dictionary containing your records:
In your last row, change
dw2.writerow(datarow)
Into
dw2.writerows(datarow)
While you're at it, you should also consider using datarow.keys() for your fieldnames, for conciseness.
This should do it, you just need pandas:
import pandas as pd
df = pd.read_csv(in_file_name, sep=',')
df['CITY'].fillna('MyCity', inplace=True)
And to save it:
df.to_csv(out_file_name)
You can try somthing like this in order to have your desired file:
I'm assuming your input file is called f_input.txt and your output file is called f_output.txt:
data = list(k.rstrip().split(',') for k in open("f_input.txt", 'r'))
with open("f_output.txt", 'a+') as f:
f.write(",".join(data[0]) + '\n')
for k in data[1:]:
# Modify the positions of k[:n] + your data + k[n+1]
# if you need to handle another position
f.write(",".join(k[:6]) + "MyCity" + ",".join(k[7:]) + "\n")
This worked in the end:
import csv
import sys
in_file_name = sys.argv[1]
force_data = sys.argv[2]
force_fld = sys.argv[3]
out_file_name = sys.argv[4]
# First read top row/header from input file
fieldnames = []
for filename in [in_file_name]:
with open(filename, "rb") as f_in:
reader = csv.reader(f_in)
headers = next(reader)
for h in headers:
fieldnames.append(h)
f_in2 = open(in_file_name, "r")
#print headers to output file
fou = open(out_file_name, 'wa')
dw = csv.DictWriter(fou, delimiter=',', fieldnames=fieldnames)
dw.writeheader()
reader2 = csv.DictReader(f_in2) # Uses the field names in this file
for row in reader2:
row[force_fld] = force_data
dw2 = csv.DictWriter(fou, delimiter=',', fieldnames=fieldnames)
dw2.writerow(row)

Python file matching and appending

This is one file result.csv:
M11251TH1230
M11543TH4292
M11435TDS144
This is another file sample.csv:
M11435TDS144,STB#1,Router#1
M11543TH4292,STB#2,Router#1
M11509TD9937,STB#3,Router#1
M11543TH4258,STB#4,Router#1
Can I write a Python program to compare both the files and if line in result.csv matches with the first word in the line in sample.csv, then append 1 else append 0 at every line in sample.csv?
import pandas as pd
d1 = pd.read_csv("1.csv",names=["Type"])
d2 = pd.read_csv("2.csv",names=["Type","Col2","Col3"])
d2["Index"] = 0
for x in d1["Type"] :
d2["Index"][d2["Type"] == x] = 1
d2.to_csv("3.csv",header=False)
Considering "1.csv" and "2.csv" are your csv input files and "3.csv" is the result you needed
The solution using csv.reader and csv.writer (csv module):
import csv
newLines = []
# change the file path to the actual one
with open('./data/result.csv', newline='\n') as csvfile:
data = csv.reader(csvfile)
items = [''.join(line) for line in data]
with open('./data/sample.csv', newline='\n') as csvfile:
data = list(csv.reader(csvfile))
for line in data:
line.append(1 if line[0] in items else 0)
newLines.append(line)
with open('./data/sample.csv', 'w', newline='\n') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(newLines)
The sample.csv contents:
M11435TDS144,STB#1,Router#1,1
M11543TH4292,STB#2,Router#1,1
M11509TD9937,STB#3,Router#1,0
M11543TH4258,STB#4,Router#1,0
With only one column, I wonder why you made it as a result.csv. If it is not going to have any more columns, a simple file read operation would suffice. Along with converting the data from result.csv to dictionary will help in quick run as well.
result_file = "result.csv"
sample_file = "sample.csv"
with open(result_file) as fp:
result_data = fp.read()
result_dict = dict.fromkeys(result_data.split("\n"))
"""
You can change the above logic, in case you have very few fields on csv like this:
result_data = fp.readlines()
result_dict = {}
for result in result_data:
key, other_field = result.split(",", 1)
result_dict[key] = other_field.strip()
"""
#Since sample.csv is a real csv, using csv reader and writer
with open(sample_file, "rb") as fp:
sample_data = csv.reader(fp)
output_data = []
for data in sample_data:
output_data.append("%s,%d" % (data, data[0] in result_dict))
with open(sample_file, "wb") as fp:
data_writer = csv.writer(fp)
data_writer.writerows(output_data)
The following snippet of code will work for you
import csv
with open('result.csv', 'rb') as f:
reader = csv.reader(f)
result_list = []
for row in reader:
result_list.extend(row)
with open('sample.csv', 'rb') as f:
reader = csv.reader(f)
sample_list = []
for row in reader:
if row[0] in result_list:
sample_list.append(row + [1])
else:
sample_list.append(row + [0]
with open('sample.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(sample_list)

Attempting to merge three columns in CSV, updating original CSV

Some example data:
title1|title2|title3|title4|merge
test|data|here|and
test|data|343|AND
",3|data|343|and
My attempt at coding this:
import csv
import StringIO
storedoutput = StringIO.StringIO()
fields = ('title1', 'title2', 'title3', 'title4', 'merge')
with open('file.csv', 'rb') as input_csv:
reader = csv.DictReader(input_csv, fields, delimiter='|')
for counter, row in enumerate(reader):
counter += 1
#print row
if counter != 1:
for field in fields:
if field == "merge":
row['merge'] = ("%s%s%s" % (row["title1"], row["title3"], row["title4"]))
print row
storedoutput.writelines(','.join(map(str, row)) + '\n')
contents = storedoutput.getvalue()
storedoutput.close()
print "".join(contents)
with open('file.csv', 'rb') as input_csv:
input_csv = input_csv.read().strip()
output_csv = []
output_csv.append(contents.strip())
if "".join(output_csv) != input_csv:
with open('file.csv', 'wb') as new_csv:
new_csv.write("".join(output_csv))
Output should be
title1|title2|title3|title4|merge
test|data|here|and|testhereand
test|data|343|AND|test343AND
",3|data|343|and|",3343and
For your reference upon running this code the first print it prints the rows as I would hope then to appear in the output csv. However the second print prints the title row x times where x is the number of rows.
Any input or corrections or working code would be appreciated.
I think we can make this a lot simpler. Dealing with the rogue " was a bit of a nuisance, I admit, because you have to work hard to tell Python you don't want to worry about it.
import csv
with open('file.csv', 'rb') as input_csv, open("new_file.csv", "wb") as output_csv:
reader = csv.DictReader(input_csv, delimiter='|', quoting=csv.QUOTE_NONE)
writer = csv.DictWriter(output_csv, reader.fieldnames, delimiter="|",quoting=csv.QUOTE_NONE, quotechar=None)
merge_cols = "title1", "title3", "title4"
writer.writeheader()
for row in reader:
row["merge"] = ''.join(row[col] for col in merge_cols)
writer.writerow(row)
produces
$ cat new_file.csv
title1|title2|title3|title4|merge
test|data|here|and|testhereand
test|data|343|AND|test343AND
",3|data|343|and|",3343and
Note that even though you wanted the original file updated, I refused. Why? It's a bad idea, because then you can destroy your data while working on it.
How can I be so sure? Because that's exactly what I did when I first ran your code, and I know better. ;^)
That double quote in the last line is definitely messing up the csv.DictReader().
This works:
new_lines = []
with open('file.csv', 'rb') as f:
# skip the first line
new_lines.append(f.next().strip())
for line in f:
# the newline and split the fields
line = line.strip().split('|')
# exctract the field data you want
title1, title3, title4 = line[0], line[2], line[3]
# turn the field data into a string and append in to the rest
line.append(''.join([title1, title3, title4]))
# save the new line for later
new_lines.append('|'.join(line))
with open('file.csv', 'w') as f:
# make one long string and write it to the new file
f.write('\n'.join(new_lines))
import csv
import StringIO
stored_output = StringIO.StringIO()
with open('file.csv', 'rb') as input_csv:
reader = csv.DictReader(input_csv, delimiter='|', quoting=csv.QUOTE_NONE)
writer = csv.DictWriter(stored_output, reader.fieldnames, delimiter="|",quoting=csv.QUOTE_NONE, quotechar=None)
merge_cols = "title1", "title3", "title4"
writer.writeheader()
for row in reader:
row["merge"] = ''.join(row[col] for col in merge_cols)
writer.writerow(row)
contents = stored_output.getvalue()
stored_output.close()
print contents
with open('file.csv', 'rb') as input_csv:
input_csv = input_csv.read().strip()
if input_csv != contents.strip():
with open('file.csv', 'wb') as new_csv:
new_csv.write("".join(contents))

Categories