abaqus python, why are the extracted displacement values different? - python

I am trying to use abaqus-ython scripting to extract the nodal coordinates.
in doing so i first extract the original nodal positions and then add the displacement value.
But for 1 of my abaqus models i notice that the displacement values i extract are different from the ones i find in abaqus (see attached pictures)
i have no idea how or why this is happening.
Can someone helpe me?
You can find my code below.
for ODBname in os.listdir("D:/r0333338/Documents/MP/nodal_files_genereren/OBD"): # directory van waar alle .odb bestanden zitten, hier worden ze allemaal
print 'Current File: '+ODBname #checken welke file er gebruikt wordt
ODBnamefull = 'D:/r0333338/Documents/MP/nodal_files_genereren/OBD/'+ODBname # Volledig pad naar de .odb file. ander wordt de file in de default work directory gezocht.
odb = openOdb(path=ODBnamefull) #openen van het ODB bestand
ODBalleenNaam = ODBname.rstrip('.odb') #om .odb weg te knippen
NodalName = ODBalleenNaam + '-nodal.txt' #naam ven het te schrijven bestand
for name, instance in odb.rootAssembly.instances.items(): #'name' is naam van elke part van in de assembly, zo kan de nodal coordinaten van het onvervormde testobject (part) achterhaald worden
print name
type(name)
name2 = 'DISK-1'
if name == name2:
numNodesTotal = len( instance.nodes ) #aantal nodes
frame = odb.steps[ 'Step-1' ].frames[-1] #informatie van de laatste frame van Step-1 gebruiken
dispField = frame.fieldOutputs['U'] #verplaatsingsveld van laatste frame van step-1
print 'total numer of nodes: '+ str(numNodesTotal) #checken hoeveel nodes er zijn
for i in range( numNodesTotal ): #voor elke node :
curNode = instance.nodes[i] #informatie van de huidige node
#print curNode.label #nummer van de huidige node
#2D verplaatsing omzetten naar 3D verplaatsing
U1 = dispField.values[i].data[0] #X-verplaatsing aan U1 geven
U2 = dispField.values[i].data[1] #Y-verplaatsing aan U2 geven
array = [] #maken van een lege array voor invullen van de coordinaten
array.append(U1) #X-verplaatsing toevoegen
array.append(U2) #Y-verplaatsing toevoegen
array.append(0) #Z-verplaatsing toevoegen
print 'node: '
print curNode.label
print 'displacement: '
print array #checken van 3D verplaatsing
print 'coordinates: '
print curNode.coordinates
odb.close()
else:
print 'name is not DISK-1 but: ' + str(name)
Abaqus displacement
python extracted displacement

you should pull the node label directly from the field data:
curNodeLabel=dispField.values[i].nodeLabel
you then need to use that to get the node:
curNode=instance.getNodeFromLabel(curNodeLabel)
don't assume the node indexing is the same as the field data indexing.
I'd further for consistency make the for loop:
for i in range( len(dispField.values) ):

Despite the title, I am assuming that you want the final coordinates and not the displacement. As Daniel F mentioned, you should add COORDS as a field output. In that case the code bellow should be helpful.
def findCoordSet(OdbName,StepName,InstanceName,SetName):
"""
This ODB reading script does the following:
-Retrieves coordinates at SetName
"""
Coordinates={'x':[],'y':[]}
# Open the output database.
odbName = OdbName + '.odb'
odb = visualization.openOdb(odbName)
lastFrame = odb.steps[StepName].frames[-1]
coordset = odb.rootAssembly.instances[InstanceName.upper()].nodeSets[SetName.upper()]
# Retrieve Y-displacements at the splines/connectors
dispField = lastFrame.fieldOutputs['COORD']
dFieldpTip = dispField.getSubset(region=coordset)
for i in range(len(dFieldpTip.values)):
Coordinates['x'].append(dFieldpTip.values[i].data[0])
Coordinates['y'].append(dFieldpTip.values[i].data[1])
odb.close()
return Coordinates

Related

Non-homogeneous Second Order Differential Equation With second member as a list

Hello. im working on a school project and im still a beginner in python. I need to solve this differential equation but I don't know how since the second member is a list.
This is my code
import numpy as np
import scipy.integrate as integr
t=np.linspace(0,799,96)
Q=np.array([60.50,113.93,199.59,292.04,376.85,459.97,526.13,570.23,604.16,623.66,626.21,627.06,628.75,622.81,600.76,585.50,582.10,567.69,536.30,501.53,489.66,480.33,451.49,411.63,372.61,332.75,304.76,276.77,242.85,211.47,178.39,107.99,24.03,-36.18,-13.28,19.79,39.3,51.17,41.84,28.27,18.94,19.79,27.42,38.45,40.99,31.66,21.48,12.16,4.52,-2.25,-10.73,-17.52,-20.06,-17.52,-14.98,-14.98,-16.67,-25.15,-34.48,-36.18,-31.94,-27.02,-24.30,-20.06,-16.67,-14.13,-15.82,-20.06,-26.85,-33.63,-37.03,-35.33,-31.09,-32.79,-37.03,-39.57,-40.42,-37.03,-31.09,-25.15,-26.85,-32.79,-40.42,-44.66,-46.36,-47.20,-48.05,-51.45,-53.99,-51.45,-44.66,-36.18,-29.39,-24.30,-20.06,-17.52]
)
Pmes=np.array([70.10,77.17,85.26,87.62,90.33,92.02,93.41,94.89,96.07,96.79,97.56,98.63,99.45,99.96,99.71,99.45,99.5,99.09,98.58,97.91,97.25,96.58,96.02,95.56,95.15,94.43,94.07,94.33,94.69,94.53,93.61,91.97,90.49,85.67,81.88,83.11,89.87,89.77,89.31,89.05,89.1,89.31,89.62,89.31,88.9,88.64,88.33,87.82,87.41,87.26,87.21,86.85,86.23,85.82,85.41,84.95,84.13,83.47,83.21,83.11,82.85,82.44,81.88,81.47,81.01,80.6,80.34,80.09,79.93,79.68,79.37,78.91,78.45,78.14,77.78,77.73,77.52,77.32,77.12,76.76,76.24,75.83,75.63,75.37,74.91,74.14,73.48,73.02,72.71,72.25,71.74,71.38,71.12,70.86,70.61,70.81])
#Conditions initiales
P0=70
dP0=70
#valeur de la résistance périphérique
Rp=0.63
#valeur de Rp4,Rc4,C4 et L pour le modèle de Windkessel à 4 éléments
Rc4=0.045
C4=2.53
L=0.0054
n=96
c=800/n
h=10**(-2)
A=np.array([[0,1],[-Rc4/(C4*L*Rp),-Rc4/L -1/(C4*Rp)]])
a=np.matrix(A)
#dérivée de Q
def D(F,i):
if i!=0 and i!=n-1:
return (F[i+1]-F[i-1])/2*h
elif i==0:
return (F[i+1]-F[i])/h
else:
return (F[i]-F[i-1])/h
#dérivé seconde de Q
def DD(F,i):
if i!=0 and i!=n-1:
return (F[i+1]+F[i-1]-2*F[i])/(2*(h*h))
elif i==0:
return (F[i+2]-2*F[i+1]+F[i])/(h*h)
else:
return (F[i]-2*F[i-1]+F[i-2])/(h*h)
#Second membre de l'équation différentielle
QQ=[DD(Q,i)*Rc4+D(Q,i)*(1/C4 +Rc4/(C4*Rp))+Q[i]*Rc4/(C4*Rp) for i in range(n)]
Pp=np.empty(n)
Pp[0]=[P0,dP0]
for i in range(n-1):
Pp[i+1]=c*a*np.matrix(Pp[i])+QQ[i]
P4WK=[Pp[i][0] for i in range(n)]
plt.plot(t,P4WK,'b',label='4WK')
plt.plot(t,Pmes,'g',label='Mesurée')
plt.title('La pression de l\'aorte ascedante pour le modèles de Windkessel à trois éléments')
plt.xlabel('temps(ms)')
plt.ylabel('Pression aortique(mmHg)')
plt.legend()
plt.grid()
plt.show()```

How can I modify an "if condition" in order to apply it to different list at the same time?

I wrote a script to extract sentences in huge set which contains particular pattern. The problem lied in the fact that , for some patterns I checked the value of the attribute at the beginning or ending of the pattern to see if the word is present in a particular list. I have 4 dictionaries with 2 lists of positive and negative word. So far I wrote the script and I am able to use the function I wrote with one dictionary. I am thinking how can I improve the my function so that I can use it at the same time of the 4 dictionaries without duplicating the bloc which loop in the dictionary.
I give an example with two dictionaries (since the script is quite long I make a small example with all the necessary element
import spacy.attrs
from spacy.attrs import POS
import spacy
from spacy import displacy
from spacy.lang.fr import French
from spacy.tokenizer import Tokenizer
from spacy.util import compile_prefix_regex, compile_infix_regex, compile_suffix_regex
from spacy.lemmatizer import Lemmatizer
nlp = spacy.load("fr_core_news_md")
from spacy.matcher import Matcher#LIST
##################### List of lexicon
# Lexique Diko
lexicon = open(os.path.join('/h/Ressources/Diko.txt'), 'r', encoding='utf-8')
data = pd.read_csv(lexicon, sep=";", header=None)
data.columns = ["id", "terme", "pol"]
pol_diko_pos = data.loc[data.pol =='positive', 'terme']
liste_pos_D = list(pol_diko_pos)
print(liste_pos[1])
pol_diko_neg = data.loc[data.pol =='negative', 'terme']
liste_neg_D = list(pol_diko_neg)
#print(type(liste_neg))
# Lexique Polarimots
lexicon_p = open(os.path.join('/h/Ressources/polarimots.txt'), 'r', encoding='utf-8')
data_p = pd.read_csv(lexicon_p, sep="\t", header=None)
#data.columns = ["terme", "pol", "pos", "degre"]
data_p.columns = ["ind", "terme", "cat", "pol", "fiabilité"]
pol_polarimot_pos = data_p.loc[data_p.pol =='POS', 'terme']
liste_pos_P = list(pol_polarimot_pos)
print(liste_pos_P[1])
pol_polarimot_neg = data_p.loc[data_p.pol =='NEG', 'terme']
liste_neg_P = list(pol_polarimot_neg)
#print(type(liste_neg))
# ############################# Lists
sentence_not_extract_lexique_1 =[] #List of all sentences without the specified pattern
sentence_extract_lexique_1 = [] #list of sentences which the pattern[0] is present in the first lexicon
sentence_not_extract_lexique_2 =[] #List of all sentences without the specified pattern
sentence_extract_lexique_2 = [] #list of sentences which the pattern[0] is present in the second lexicon
list_token_pos = [] #list of the token found in the lexique
list_token_neg = [] #list of the token found in the lexique
list_token_not_found = [] #list of the token not found in the lexique
#PATTERN
pattern1 = [{"POS": {"IN": ["VERB", "AUX","ADV","NOUN","ADJ"]}}, {"IS_PUNCT": True, "OP": "*"}, {"LOWER": "mais"} ]
pattern1_tup = (pattern1, 1, True)
pattern3 = [{"LOWER": {"IN": ["très","trop"]}},
{"POS": {"IN": ["ADV","ADJ"]}}]
pattern3_tup = (pattern3, 0, True)
pattern4 = [{"POS": "ADV"}, # adverbe de négation
{"POS": "PRON","OP": "*"},
{"POS": {"IN": ["VERB", "AUX"]}},
{"TEXT": {"IN": ["pas", "plus", "aucun", "aucunement", "point", "jamais", "nullement", "rien"]}},]
pattern4_tup = (pattern4, None, False)
#Tuple of pattern
pattern_list_tup =[pattern1_tup, pattern3_tup, pattern4_tup]
pattern_name = ['first', 'second', 'third', 'fourth']
length_of_list = len(pattern_list_tup)
print('length', length_of_list)
#index of the value of attribute to check in the lexicon
value_of_attribute = [0,-1,-1]
# List of lexicon to use
lexique_1 = [lexique_neg, lexique_pos]
lexique_2 = [lexique_2neg, lexique_2pos]
# text (example of some sentences)
file =b= ["Le film est superbe mais cette édition DVD est nulle !",
"J'allais dire déplorable, mais je serais peut-être un peu trop extrême.",
"Hélas, l'impression de violence, bien que très bien rendue, ne sauve pas cette histoire gothique moderne de la sécheresse scénaristique, le tout couvert d'un adultère dont le propos semble être gratuit, classique mais intéressant...",
"Tout ça ne me donne pas envie d'utiliser un pieu mais plutôt d'aller au pieu (suis-je drôle).",
"Oui biensur, il y a la superbe introduction des parapluies au debut, et puis lorsqu il sent des culs tout neufs et qu il s extase, j ai envie de faire la meme chose apres sur celui de ma voisine de palier (ma voisine de palier elle a un gros cul, mais j admets que je voudrais bien lui foute mon tarin), mais c est tout, apres c est un film tres noir, lent et qui te plonge dans le depression.",
"Et bien hélas ce DVD ne m'a pas appris grand chose par rapport à la doc des agences de voyages et la petite dame qui fait ses dessins est bien gentille mais tout tourne un peu trop autour d'elle.",
"Au final on passe de l'un a l'autre sans subtilité, et on n'arrive qu'à une caricature de plus : si Kechiche avait comme but initial déclaré de fustiger les préjugés, c'est le contraire qui ressort de ce ''film'' truffé de clichés très préjudiciables pour les quelques habitants de banlieue qui ne se reconnaîtront pas dans cette lourde farce.",
"-ci écorche les mots, les notes... mais surtout nos oreilles !"]
# Loop to check each sentence and extract the sentences with the specified pattern from above
for pat in range(0, length_of_list):
matcher = Matcher(nlp.vocab)
matcher.add("matching_2", None, pattern_list_tup[pat][0])
# print(pat)
# print(pattern_list_tup[pat][0])
for sent in file:
doc =nlp(sent)
matches= matcher(doc)
for match_id, start, end in matches:
span = doc[start:end].lemma_.split()
#print(f"{pattern_name[pat]} pattern found: {span}")
This is the part I want ot modify to use it for another dictionary, the goal is to able to retrieve sentences extract by 4 different dictionaries to make a comparison and then check which sentences are present in more than two list.
# Condition to use the lexicon and extract the sentence
if (pattern_list_tup[pat][2]):
if (span[value_of_attribute[pat]] in lexique_1[pattern_list_tup[pat][1]]):
if sent not in sentence_extract:
sentence_extract_lexique_1.append(sent)
if (pattern_list_tup[pat][1] == 1):
list_token_pos.append(span[value_of_attribute[pat]])
if (pattern_list_tup[pat][1] == 0):
list_token_neg.append(span[value_of_attribute[pat]])
else:
list_token_not_found.append(span[value_of_attribute[pat]]) # the text form is not present in the lexicon need the lemma form
sentence_not_extract_lexique_1.append(sent)
else:
if sent not in sentence_extract:
sentence_extract_lexique_1.append(sent)
print(len(sentence_extract))
print(sentence_extract)
One solution I find is to duplicate the code abode and change the name of the list where the sentences are stored but since I have 2 dictionaries duplicating will make the code longer is there a way to combine the looping the 2 dictionaries (actually 4 dictionaries in the original) and append the result to the good list. So, for example, when I use lexique_1 , all the sentences extracted are send to "sentence_extract_lexique_1" and so on for the other.
In my opinion attempt using the if-elif-else chain. If not attempt only using the if-elif block simply because the elif statement catches the specific condition of interest. In which you're trying to catch a specific to compare and check with the sentences. Keep in mind if you try the if-elif-else chain its a good method, but it only works when you need one test to pass. Because Python finds one test to pass and it skips the rest. Its very efficient and allows you to test for one specific condition.

Extract nodal coordinates from the deformed testsubject (abaqus-python)

I am trying to make a python script to extract the nodal coordinates from the ODB file (from abaqus).
So far i have come up with the code attached below (don't mind the extra information i put behind the #, sometimes it's just so i can keep track of what i'm doing)
The problem is that the coordinates i extract are those from the undeformed test subject. and i need the coordinates of the nodes from the deformed test subject.
Can somebody help me with how i reach this information using python code?
from abaqus import *
from abaqusConstants import *
import __main__
import section
import regionToolset
import displayGroupMdbToolset as dgm
import part
import material
import assembly
import step
import interaction
import load
import mesh
import optimization
import job
import sketch
import visualization
import xyPlot
import displayGroupOdbToolset as dgo
import connectorBehavior
import shutil
import os
import sys
from odbAccess import openOdb
for ODBname in os.listdir("D:/r0333338/Documents/MP/nodal_files_genereren/OBD"): # this is where all your ODB files are located #t hier wordt ook de odb file gekozen die geopend zal worden
SUBname = ODBname[1:3] # My subject ID is saved in the ODB name - this helps me create the file #woerdt er hier de 3e tot6e letter van de ODBname gepakt ?
print 'Current File: '+ODBname #voor check welke file er gebruikt wordt #t (ZIT '.odb' hier bij in ?)
ODBnamefull = 'D:/r0333338/Documents/MP/nodal_files_genereren/OBD/'+ODBname # Full path to the ODB file, otherwise abaqus tries to find it in default work directory
odb = openOdb(path=ODBnamefull) #open the ODB file
assembly = odb.rootAssembly #t declareren van assembly?
session.viewports['Viewport: 1'].odbDisplay.setFrame(step=0, frame=1)
numNodes = 0 #t num nodes op nul zetten
f = open("D:/r0333338/Documents/MP/nodal_files_genereren/ODBoutput/nodal.txt", "w") #indien het bestand al bestaat moet er "a" staan ipv "w"
for name, instance in assembly.instances.items(): #t 'name' is naam van elke part van in de assembly ?
n = len(instance.nodes) #t tellen van hoeveelheid nodes
print 'Number of nodes of instance %s: %d' % (name, n) #moet niet in de file staan, kan eigenlijk weggelaten worden. (maar is een goede check?)
numNodes = numNodes + n #tellen van totaal aantal nodes (globaal over alle parts) maar is eigenlijk niet nodig?
f.write( "*Part, name=Part-1" + "\n")#moet erin staan volgens de MatchId regels
f.write( "*Nodes" + "\n") #moet erin staan volgens de MatchId regels
if instance.embeddedSpace == THREE_D: #indien het 3D is
print ' X Y Z' #moet niet in de file staan, maar is een goede check om te zien waar we zitten
for node in instance.nodes:
#print node #printen van node
f.write( str(node.label) + ";" ) #schrijven van nodenummer
f.write(str(node.coordinates[0]) + ";" + str(node.coordinates[1]) + ";" + str(node.coordinates[2]) + "\n") #schrijven van coordinaten [X;Y;Z] en enter
else: #indien het 2D is
print ' X Y' ';0' #moet niet in de file staan, maar is een goede check om te zien waar we zitten
for node in instance.nodes:
#print node #printen van node
f.write( str(node.label) + ";" )
f.write(str(node.coordinates[0]) + ";" + str(node.coordinates[1]) + ";" + str(node.coordinates[2]) + "\n") #schrijven van coordinaten [X;Y;Z] en enter
f.write( "*End Part" ) #moet erin staan volgens de MatchId regels
f.close()
get the displacement field:
u=odb.steps['Step-1'].frames[-1].fieldOutputs['U']
then u.values is a list of all nodal values:
u.values[i].data -> array of (ux,uy,uz)
u.values[i].nodeLabel -> node label
then you grab the original position like this:
instance.getNodeFromLabel(u.values[i].nodeLabel).coordinates
You can also directly get the deformed coordinate as a field output, but you need to request COORD output when you run the analysis.

Histogram representing number of substitutions, insertions and deleting in sequences

l have two columns that represent : right sequence and predicted sequence. l want to make statistics on the number of deletion, substitution and insertion by comparing each right sequence with its predicted sequence.
l did the levenstein distance to get the number of characters which are different (see the function below) and error_dist function to get the most common errors (in terms of substitution) :
here is a sample of my data :
de de
date date
pour pour
etoblissemenls etablissements
avec avec
code code
communications communications
r r
seiche seiche
titre titre
publiques publiques
ht ht
bain bain
du du
ets ets
premier premier
dans dans
snupape soupape
minimum minimum
blanc blanc
fr fr
nos nos
au au
bl bl
consommations consommations
somme somme
euro euro
votre votre
offre offre
forestier forestier
cs cs
de de
pour pour
de de
paye r
cette cette
votre votre
valeurs valeurs
des des
gfda gfda
tva tva
pouvoirs pouvoirs
de de
revenus revenus
offre offre
ht ht
card card
noe noe
montant montant
r r
comprises comprises
quantite quantite
nature nature
ticket ticket
ou ou
rapide rapide
de de
sous sous
identification identification
du du
document document
suicide suicide
bretagne bretagne
tribunal tribunal
services services
cif cif
moyen moyen
gaec gaec
total total
lorsque lorsque
contact contact
fermeture fermeture
la la
route route
tva tva
ia ia
noyal noyal
brie brie
de de
nanterre nanterre
charcutier charcutier
semestre semestre
de de
rue rue
le le
bancaire bancaire
martigne martigne
recouvrement recouvrement
la la
sainteny sainteny
de de
franc franc
rm rm
vro vro
here is my code
import pandas as pd
import collections
import numpy as np
import matplotlib.pyplot as plt
import distance
def error_dist():
df = pd.read_csv('data.csv', sep=',')
df = df.astype(str)
df = df.replace(['é', 'è', 'È', 'É'], 'e', regex=True)
df = df.replace(['à', 'â', 'Â'], 'a', regex=True)
dictionnary = []
for i in range(len(df)):
if df.manual_raw_value[i] != df.raw_value[i]:
text = df.manual_raw_value[i]
text2 = df.raw_value[i]
x = len(df.manual_raw_value[i])
y = len(df.raw_value[i])
z = min(x, y)
for t in range(z):
if text[t] != text2[t]:
d = (text[t], text2[t])
dictionnary.append(d)
#print(dictionnary)
dictionnary_new = dict(collections.Counter(dictionnary).most_common(25))
pos = np.arange(len(dictionnary_new.keys()))
width = 1.0
ax = plt.axes()
ax.set_xticks(pos + (width / 2))
ax.set_xticklabels(dictionnary_new.keys())
plt.bar(range(len(dictionnary_new)), dictionnary_new.values(), width, color='g')
plt.show()
enter image description here
and the levenstein distance :
def levenstein_dist():
df = pd.read_csv('data.csv', sep=',')
df=df.astype(str)
df['string diff'] = df.apply(lambda x: distance.levenshtein(x['raw_value'], x['manual_raw_value']), axis=1)
plt.hist(df['string diff'])
plt.show()
enter image description here
Now l want to make a histograms showing three bins : number of substitution, number of insertion and number of deletion . How can l proceed ?
Thank you
Thanks to the suggestions of #YohanesGultom the answer for the problem can be found here :
http://www.nltk.org/_modules/nltk/metrics/distance.html
or
https://gist.github.com/kylebgorman/1081951

Print resultes to file

Can somebody help my with mine code. I want de results of the code printed in a .txt file on my system, but i cat an error and i can't resolve it by my self, please can somebody help me with the code ?
# Importeer bibliotheek voor systeemfuncties.
import sys
import datetime
import time
############
## Part 1 ##
############
#temperatuur uitlezen
# Definieer een array (temp).
temp = {}
sensorids = ["28-0416b07c2dff"]
# loop net zo lang alles sensors af dat in het array hieboven staan.
for sensor in range(len(sensorids)):
tfile = open("/sys/bus/w1/devices/"+ sensorids[sensor] +"/w1_slave") #RPi 2,3 met nieuwe kernel.
# Lees alle dat uit het "bestand" in een variabele.
text = tfile.read()
# Sluit het "bestand" nadat we het gelezen hebben.
tfile.close()
# We gaan nu de tekst splitsen per nieuwe regel (\n)
# en we selecteren de 2e regel [1] (1e regel = [0])
secondline = text.split("\n")[1]
# Splits de regel in "woorden", er wordt gespleten op de spaties.
# We selecteren hier het 10 "woord" [9] (tellend vanaf 0)
temperaturedata = secondline.split(" ")[9]
# De eerste 2 karakters zijn "t=", deze moeten we weghalen.
# we maken meteen van de string een integer (nummer).
temperature = float(temperaturedata[2:])
# De temperatuurwaarde moeten we delen door 1000 voor de juiste waarde.
temp[sensor] = temperature / 1000
#laatste mogelijkheid
temperatuur2 = temp[sensor]
# print de gegevens naar de console.
#print "sensor", sensor, "=", temp[sensor], "graden."
print temperatuur2
############
## Part 2 ##
############
#waardes wegschrijven naar text bestand
#Open file
file = open("/home/pi/python_prog/_Log/_temp.txt", "a")
#schrijf waarde testbestand [Tijd: Temp]
file.write(time.strftime("%A %d-%m-%Y %H:%M:%S: " ))
file.write(temperatuur2)
file.write('\r\n')
#file.write()
#Sluit File
file.close()
It goes wrong in the "part 2" selection, When i run the script i get the following error:
"Traceback (most recent call last):
File "./python_prog/temp2.py", line 48, in
file.write(temperatuur2)
TypeError: expected a character buffer object"
Please can you help me!?
file.write(str(temperatuur2))
you should be writing strings to files.

Categories