Print resultes to file - python

Can somebody help my with mine code. I want de results of the code printed in a .txt file on my system, but i cat an error and i can't resolve it by my self, please can somebody help me with the code ?
# Importeer bibliotheek voor systeemfuncties.
import sys
import datetime
import time
############
## Part 1 ##
############
#temperatuur uitlezen
# Definieer een array (temp).
temp = {}
sensorids = ["28-0416b07c2dff"]
# loop net zo lang alles sensors af dat in het array hieboven staan.
for sensor in range(len(sensorids)):
tfile = open("/sys/bus/w1/devices/"+ sensorids[sensor] +"/w1_slave") #RPi 2,3 met nieuwe kernel.
# Lees alle dat uit het "bestand" in een variabele.
text = tfile.read()
# Sluit het "bestand" nadat we het gelezen hebben.
tfile.close()
# We gaan nu de tekst splitsen per nieuwe regel (\n)
# en we selecteren de 2e regel [1] (1e regel = [0])
secondline = text.split("\n")[1]
# Splits de regel in "woorden", er wordt gespleten op de spaties.
# We selecteren hier het 10 "woord" [9] (tellend vanaf 0)
temperaturedata = secondline.split(" ")[9]
# De eerste 2 karakters zijn "t=", deze moeten we weghalen.
# we maken meteen van de string een integer (nummer).
temperature = float(temperaturedata[2:])
# De temperatuurwaarde moeten we delen door 1000 voor de juiste waarde.
temp[sensor] = temperature / 1000
#laatste mogelijkheid
temperatuur2 = temp[sensor]
# print de gegevens naar de console.
#print "sensor", sensor, "=", temp[sensor], "graden."
print temperatuur2
############
## Part 2 ##
############
#waardes wegschrijven naar text bestand
#Open file
file = open("/home/pi/python_prog/_Log/_temp.txt", "a")
#schrijf waarde testbestand [Tijd: Temp]
file.write(time.strftime("%A %d-%m-%Y %H:%M:%S: " ))
file.write(temperatuur2)
file.write('\r\n')
#file.write()
#Sluit File
file.close()
It goes wrong in the "part 2" selection, When i run the script i get the following error:
"Traceback (most recent call last):
File "./python_prog/temp2.py", line 48, in
file.write(temperatuur2)
TypeError: expected a character buffer object"
Please can you help me!?

file.write(str(temperatuur2))
you should be writing strings to files.

Related

Count occurrences of words in a text with special characters

I want to count occurrences of each word in a text to spot the key words coming over the most.
This script works quite well but the problem is that this text is written in French. So there are important key words that would be missed.
For example the word Europe may appear in the text like l'Europe or en Europe.
In the first case, the code will remove the apostrophe and l'Europe is considered as one unique word leurope in the final result.
How can I improve the code to split l' from Europe?
import string
# Open the file in read mode
#text = open("debat.txt", "r")
text = ["Monsieur Mitterrand, vous avez parlé une minute et demie de moins que Monsieur Chirac dans cette première partie. Je préfère ne pas avoir parlé une minute et demie de plus pour dire des choses aussi irréelles et aussi injustes que celles qui viennent d'être énoncées. Si vous êtes d'accord, nous arrêtons cette première partie, nous arrêtons les chronomètres et nous repartons maintenant pour une seconde partie en parlant de l'Europe. Pour les téléspectateurs, M. Mitterrand a parlé 18 minutes 36 et M. Chirac, 19 minutes 56. Ce n'est pas un drame !... On vous a, messieurs, probablement jamais vus plus proches à la fois physiquement et peut-être politiquement que sur les affaires européennes... les Français vous ont vus, en effet, à la télévision, participer ensemble à des négociations, au coude à coude... voilà, au moins, un domaine dans lequel, sans aucun doute, vous connaissez fort bien, l'un et l'autre, les opinions de l'un et de l'autre. Nous avons envie de vous demander ce qui, aujourd'hui, au -plan européen, vous sépare et vous rapproche ?... et aussi lequel de vous deux a le plus évolué au cours des quelques années qui viennent de s'écouler ?... #"]
# Create an empty dictionary
d = dict()
# Loop through each line of the file
for line in text:
# Remove the leading spaces and newline character
line = line.strip()
# Convert the characters in line to
# lowercase to avoid case mismatch
line = line.lower()
# Remove the punctuation marks from the line
line = line.translate(line.maketrans("", "", string.punctuation))
# Split the line into words
words = line.split(" ")
# Iterate over each word in line
for word in words:
# Check if the word is already in dictionary
if word in d:
# Increment count of word by 1
d[word] = d[word] + 1
else:
# Add the word to dictionary with count 1
d[word] = 1
sorted_tuples = sorted(d.items(), key=lambda item: item[1], reverse=True)
sorted_dict = {k: v for k, v in sorted_tuples}
# Print the contents of dictionary
for key in list(sorted_dict.keys()):
print(key, ":", sorted_dict[key])
line = line.translate(line.maketrans("", "", string.punctuation))
removes all punctuation characters (l'Europe becomes lEurope). Instead of that, you may want to replace them by spaces, using for example:
for p in string.punctuation:
line = line.replace(p, ' ')
Where you currently have:
line = line.translate(line.maketrans("", "", string.punctuation))
... you can add the following line before it:
line = line.translate(line.maketrans("'", " "))
This will replace the ' character with a space wherever it's found, and the line using string.punctuation will behave exactly as before, except that it will not encounter any ' characters since we have already replaced them.

Getting "raise JSONDecodeError("Expecting value", s, err.value) from None json.decoder.JSONDecodeError

I'm getting the JSONDecodeError error when i try to run the below code. I am not sure how to fix it.
I am trying to stream tweets. It works fine for few tweets and all of a sudden I get the error and the stream stops please help how can I fix this?
UPDATED FULL CODE
from tweepy import API
from tweepy import Cursor
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from textblob import TextBlob
from googletrans import Translator
import json
import re
import csv
ACCESS_TOKEN = "2446723783-cqGTmWPdtJI5HhCT7UmmET2xVY7xlA1RqlBQdQl"
ACCESS_TOKEN_SECRET = "6ZRIqd9HDCgZxsR6iMUbt6eOgk1YVsz59lEsvL30eFnPf"
CONSUMER_KEY = "slWhR4Z6VOjp0R3ojx1B2a4pr"
CONSUMER_SECRET = "6OrySqqUIwQM8ioBycqbStWizM6KkLCMFecjXPvbVmsrsw6eNT"
Count = 0
translator = Translator()
class StdOutListener(StreamListener):
def on_data(self, data):
global Count
#Data has the JSON STring
y = json.loads(data)
print("--------------------")
try:
y = y['extended_tweet']['full_text']
y = str(y)
## Portuguese to English conversion block
translations = translator.translate(y, dest='en')
print(translations.origin, '\n -> \n', translations.text)
z = translations.text
a = translations.origin
row = [data,a,z]
with open(fetched_tweets_filename, 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
Count = Count + 1
print(Count)
except KeyError:
y = y['text']
y = str(y)
## Portuguese to English conversion block
translations = translator.translate(y, dest='en')
print(translations.origin, '\n -> \n', translations.text)
z = translations.text
a = translations.origin
row = [data,a,z]
with open(fetched_tweets_filename, 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
Count = Count + 1
print(Count)
### Sentiment Analysis block
analysis = TextBlob(z)
if analysis.sentiment.polarity > 0:
print("sentiment is positiv")
elif analysis.sentiment.polarity == 0:
print("sentiment is Neutral")
else:
print("sentiment is Negative")
print("--------------------\n")
return True
def on_error(self,status):
print(status)
if __name__ == "__main__":
listener=StdOutListener()
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
fetched_tweets_filename = "datas3.csv"
stream = Stream(auth, listener)
stream.filter(track=['como'],languages=['pt'])
Trackback
--------------------
RT #VBrasil_: Você decidiu chamar todos os meninos para assistir um filme na sua casa! Mas como cada um queria assistir algo, vocês decidir…
->
RT #VBrasil_: You decided to call all the boys to watch a movie in your house! But as everyone wanted to watch something, you decide ...
1
sentiment is Neutral
--------------------
--------------------
RT #queroqtefodas2: Como é ter uma turma unida ?
->
RT #myselfs: What is it like to have a group together?
2
sentiment is Neutral
--------------------
--------------------
RT #thiegonovais: FUTURO AMEAÇADO?
Entenda como é importante os fãs de Harry Potter se unirem mais do que nunca. Isso pode impactar tudo!…
->
RT #thiegonovais: FUTURE THREATENED?
Understand how important it is for Harry Potter fans to come together more than ever. This can impact everything! ...
3
sentiment is positiv
--------------------
--------------------
RT #nadiardgs: como assim você OUSA seguir no Instagram a conta do meu namorado de 27 anos desempregado streamer de lol e fã de Djonga?
tá…
->
RT #nadiardgs: how dare you follow on Instagram the account of my unemployed 27-year-old boyfriend lol streamer and Djonga fan?
OK…
4
sentiment is positiv
--------------------
--------------------
Amanda foi dormir na amiga dela, e eu fiquei sozinha como sempre.
->
Amanda went to bed with her friend, and I was alone as usual.
5
sentiment is Negative
--------------------
--------------------
Este é só um exemplo de "voto de cabresto" pelo interior do nosso Brasil. O #ptbrasil sabia fazer isto muito bem, entretanto...
->
This is just an example of a "halting vote" for the interior of our Brazil. #Ptbrasil knew how to do this very well, though ...
6
sentiment is positiv
--------------------
--------------------
E ainda tem gente que tira sarro de feminismo vtnc...
Com nossa luta há décadas ainda vemos notícias como essa diariamente, imagina sem!
->
And there are still people who make fun of vtnc feminism ...
With our struggle for decades we still see news like this every day, imagine without!
7
sentiment is positiv
--------------------
--------------------
RT #cvrlosmdz: Eu nem tenho palavras para quem quebra pinky promise smh como é que conseguem quebrar uma cena tão séria ? Oh pinky mm é a p…
->
RT #cvrlosmdz: I do not even have words for anyone who breaks pinky promise smh how can they break such a serious scene? Oh pinky mm is the p ...
8
sentiment is Negative
--------------------
--------------------
Se tem uma coisa que vou sentir saudade quando defender o mestrado é a refeição por 3 reais no RU, putz como eu amo pagar só treisss reaissssss numa refeição
QUANTO?
SÓ TREISSSSS REAISSSSS
->
If there is one thing I will miss when defending the masters is the meal for 3 reals in the UK, putz as I love to pay only real reals in a meal
HOW MUCH?
ONLY TREISSSSS REAISSSSS
9
sentiment is positiv
--------------------
--------------------
Estou doida p/ir pro cinema, porem cm o gato mas como eu não tenho kkk vai cm as manas mesmo!!!!
->
I'm crazy to go to the cinema, but the cat but I do not have kkk goes in the same manas !!!!
10
sentiment is Negative
--------------------
--------------------
#expedientefutebol futebol é assim, cabe ao jogador se portar como exemplo que é, e enquanto todos lamberem ele, irá continuar assim, e na seleção brasileira ele joga merda nenhuma
->
# football football is so, it's up to the player to be an example, and while everyone licks it, will continue like this, and in the Brazilian team he plays no shit
11
sentiment is positiv
--------------------
--------------------
KKKKKKKKKKK, nem como tanto assim
->
KKKKKKKKKKK, or how much
12
sentiment is positiv
--------------------
--------------------
Tô assistindo os vídeos antigos do Felipe Neto e lembrando de como eu era feliz
->
I'm watching Felipe Neto's old videos and remembering how happy I was
13
sentiment is positiv
--------------------
--------------------
a julia as vezes eh tao seca que qnd fala eu te amo eu fico boba
->
Julia sometimes it's so dry that you say I love you, I'm silly.
14
sentiment is Negative
--------------------
--------------------
RT #hellboyahs: gente me ajuda, como dizer para uma pessoa que não quer mais conversar com ela por ela ser chata mas sem magoar a pessoa?
->
RT #hellboyahs: People help me, how to tell a person who does not want to talk to her anymore because she is annoying but not hurting the person?
15
sentiment is Negative
--------------------
--------------------
#resistxre #CabelloJane458 Uai cada um tem sua opinião e tem pessoas que expressão sua opinião como vc expressou a sua . O RBR é uma produção tosca kkkkkkkkkkk da vergonha alheia pq poderia ser melhor a Record poderia fazer algo legal se não nem fizesse pra virar aquilo .
->
#resistxre # CabelloJane458 Uai everyone has their opinion and has people who express their opinion as you expressed yours. The RBR is a crude production kkkkkkkkkkk of the shame of others could be better Record could do something cool if it did not even make it turn.
16
sentiment is positiv
--------------------
--------------------
RT #Bultaourune: Lembando que o BTS é o #1 Ato Coreano >NA HISTÓRIA< indicado a Top Duo/Group no BBMAs, also eles serão tb o #1 k-Ato a gan…
->
RT #Bultaourune: Remembering that the BTS is the # 1 Korean Act & NA HISTORY & lt; indicated the Top Duo / Group in the BBMAs, also they will be tb o # 1 k-Ato a gan ...
17
sentiment is positiv
--------------------
--------------------
Traceback (most recent call last):
File "C:\Users\ManoharRaoN\Desktop\SentizAnalyzer\Version3.py", line 85, in <module>
stream.filter(track=['como'],languages=['pt'])
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\site-packages\tweepy\streaming.py", line 453, in filter
self._start(is_async)
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\site-packages\tweepy\streaming.py", line 368, in _start
self._run()
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\site-packages\tweepy\streaming.py", line 300, in _run
six.reraise(*exc_info)
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\site-packages\six.py", line 693, in reraise
raise value
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\site-packages\tweepy\streaming.py", line 269, in _run
self._read_loop(resp)
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\site-packages\tweepy\streaming.py", line 331, in _read_loop
self._data(next_status_obj)
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\site-packages\tweepy\streaming.py", line 303, in _data
if self.listener.on_data(data) is False:
File "C:\Users\ManoharRaoN\Desktop\SentizAnalyzer\Version3.py", line 48, in on_data
translations = translator.translate(y, dest='en')
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\site-packages\googletrans\client.py", line 172, in translate
data = self._translate(text, dest, src)
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\site-packages\googletrans\client.py", line 81, in _translate
data = utils.format_json(r.text)
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\site-packages\googletrans\utils.py", line 62, in format_json
converted = legacy_format_json(original)
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\site-packages\googletrans\utils.py", line 54, in legacy_format_json
converted = json.loads(text)
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\json\__init__.py", line 348, in loads
return _default_decoder.decode(s)
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\json\decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\lib\json\decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
[Finished in 13.7s with exit code 1]
[shell_cmd: python -u "C:\Users\ManoharRaoN\Desktop\SentizAnalyzer\Version3.py"]
[dir: C:\Users\ManoharRaoN\Desktop\SentizAnalyzer]
[path: C:\ProgramData\Oracle\Java\javapath;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Windows\System32\OpenSSH\;C:\Program Files (x86)\Webex\Webex\Applications;C:\Program Files\IBM\SPSS\Modeler\18.1.1\ext\bin\spss.TMWBServer\bin;C:\Program Files (x86)\Sennheiser\SoftphoneSDK\;C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37;C:\Users\ManoharRaoN\AppData\Local\Programs\Python\Python37\Scripts;C:\Users\ManoharRaoN\AppData\Roaming\nltk_data;C:\Users\ManoharRaoN\AppData\Local\Microsoft\WindowsApps;C:\Users\ManoharRaoN\AppData\Local\Box\Box Edit\]
I think It happens on certain tweets, How can I pass this when it occurs.
Not sure I can be completely wrong.
I also get the below error when I try to write a string which contains a special character such as "te amo ❤️ "
Below is the error
--------------------
RT #PortalMessi: “Messi seria para mim, como um jogador que eu vi jogar, o número 1. Meu pai sempre disse que o melhor foi Pelé, mas eu não…
->
RT #PortalMessi: "Messi would be for me, as a player I saw play, the number 1. My father always said that the best was Pele, but I did not ...
1
sentiment is positiv
--------------------
--------------------
RT #felipeneto: Informo que a dívida de 170 mil reais do Botafogo comigo como pessoa física foi integralmente perdoada, junto com os juros…
->
RT #felipeneto: I report that Botafogo's debt of 170,000 reais with me as a natural person has been totally forgiven, along with interest ...
2
sentiment is positiv
--------------------
--------------------
#MaluMonteiro08 óh #deus como pode tamanha beleza?????
->
# MaluMonteiro08 ohh #deus how can such beauty ?????
3
sentiment is Neutral
--------------------
--------------------
Meu pai amava.
Descanse em paz, rainha! ❤️
->
My father loved it.
Rest in peace, queen! (I.e.
Traceback (most recent call last):
File "C:\Users\Stramzik\Desktop\SentizAnalyzer\Version3.py", line 32, in on_data
y = y['extended_tweet']['full_text']
KeyError: 'extended_tweet'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\Stramzik\Desktop\SentizAnalyzer\Version3.py", line 84, in <module>
stream.filter(track=['como'],languages=['pt'])
File "C:\Users\Stramzik\AppData\Local\Programs\Python\Python37\lib\site-packages\tweepy\streaming.py", line 453, in filter
self._start(is_async)
File "C:\Users\Stramzik\AppData\Local\Programs\Python\Python37\lib\site-packages\tweepy\streaming.py", line 368, in _start
self._run()
File "C:\Users\Stramzik\AppData\Local\Programs\Python\Python37\lib\site-packages\tweepy\streaming.py", line 300, in _run
six.reraise(*exc_info)
File "C:\Users\Stramzik\AppData\Local\Programs\Python\Python37\lib\site-packages\six.py", line 693, in reraise
raise value
File "C:\Users\Stramzik\AppData\Local\Programs\Python\Python37\lib\site-packages\tweepy\streaming.py", line 269, in _run
self._read_loop(resp)
File "C:\Users\Stramzik\AppData\Local\Programs\Python\Python37\lib\site-packages\tweepy\streaming.py", line 331, in _read_loop
self._data(next_status_obj)
File "C:\Users\Stramzik\AppData\Local\Programs\Python\Python37\lib\site-packages\tweepy\streaming.py", line 303, in _data
if self.listener.on_data(data) is False:
File "C:\Users\Stramzik\Desktop\SentizAnalyzer\Version3.py", line 55, in on_data
writer.writerow(row)
File "C:\Users\Stramzik\AppData\Local\Programs\Python\Python37\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
UnicodeEncodeError: 'charmap' codec can't encode characters in position 5967-5968: character maps to <undefined>
[Finished in 5.5s with exit code 1]
[shell_cmd: python -u "C:\Users\Stramzik\Desktop\SentizAnalyzer\Version3.py"]
[dir: C:\Users\Stramzik\Desktop\SentizAnalyzer]
[path: C:\ProgramData\Oracle\Java\javapath;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Windows\System32\OpenSSH\;C:\Program Files (x86)\Webex\Webex\Applications;C:\Program Files\IBM\SPSS\Modeler\18.1.1\ext\bin\spss.TMWBServer\bin;C:\Program Files (x86)\Sennheiser\SoftphoneSDK\;C:\Users\Stramzik\AppData\Local\Programs\Python\Python37;C:\Users\Stramzik\AppData\Local\Programs\Python\Python37\Scripts;C:\Users\Stramzik\AppData\Roaming\nltk_data;C:\Users\Stramzik\AppData\Local\Microsoft\WindowsApps;C:\Users\Stramzik\AppData\Local\Box\Box Edit\]
Check if the data you are getting is actually json, in case of some errors you may not be getting valid json data and also try putting the json.loads() inside the try/catch block.
Encode the data to bytes while writing to the file using a proper encoding.
import json
import csv
class StdOutListener(StreamListener):
def on_data(self, data):
global Count
#Data has the JSON STring
print("--------------------")
try:
y = json.loads(data)
y = y['extended_tweet']['full_text']
y = str(y)
## Portuguese to English conversion block
translations = translator.translate(y, dest='en')
print(translations.origin, '\n -> \n', translations.text)
z = translations.text.encode("utf-8")
a = translations.origin.encode("utf-8")
data = data.encode("utf-8")
row = [data,a,z]
with open(fetched_tweets_filename, 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
Count = Count + 1
print(Count)
except KeyError:
y = y.get('text', '') # change this to get
y = str(y)
## Portuguese to English conversion block
translations = translator.translate(y, dest='en')
print(translations.origin, '\n -> \n', translations.text)
z = translations.text.encode("utf-8")
a = translations.origin.encode("utf-8")
row = [data, a, z]
with open(fetched_tweets_filename, 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
Count = Count + 1
print(Count)
except Exception as e:
print(str(e))
### Sentiment Analysis block
analysis = TextBlob(z)
if analysis.sentiment.polarity > 0:
print("sentiment is positiv")
elif analysis.sentiment.polarity == 0:
print("sentiment is Neutral")
else:
print("sentiment is Negative")
print("--------------------\n")
return True
def on_error(self,status):
print(status)
if __name__ == "__main__":
listener=StdOutListener()
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
fetched_tweets_filename = "datas3.csv"
stream = Stream(auth, listener)
stream.filter(track=['como'],languages=['pt'])

Find an incorrect line in a .txt file and delete it

I have a .txt file with csv data like this :
1529074866;29.89;29.41;321;70;1;60;1003.05
1529074868;29.87;29.82;140;79;1;60;1003.52
I made this function to extract the data from the file:
def __init__(self, file):
self.data_ = {"Temps": [], "Temperature": [], "Humidite": [], "Luminosite": [], "Niveau sonore": [], "Radiations EM": [], "Rythme cardiaque": [], "Pression": [] }
# data_ = {date : [t1, t2,...], temp : [temp1, temp2,...]...}. Cette disposition des données (par date, luminosité...) permet d'optimiser l affichage des graphiques ulterieurement.
try:
for line in file: # Cette commande permet de parcourir une à une toutes les lignes du fichier file.
line = line.rstrip() # Cette commande permet de supprimer le caractère invisible de retour chariot en fin de ligne.
line = line.rsplit(";") # Cette commande permet de transpormer la ligne en liste en coupant au niveau des ";".
self.data_["Temps"].append( int(line[0]) ) # Ceci implique que la donnée correspondant à la date soit bien la 1ère donnée (rang 0) sur la ligne.
self.data_["Temperature"].append( float(line[1])) # Ceci implique que la donnée correspondant à la date soit bien la 2ème donnée (rang 1) sur la ligne.
self.data_["Humidite"].append( float(line[2]) ) # ect
self.data_["Luminosite"].append( float(line[3]) )
self.data_["Niveau sonore"].append( float(line[4]) )
self.data_["Radiations EM"].append( float(line[5]))
self.data_["Rythme cardiaque"].append( float(line[6]) )
self.data_["Pression"].append( float(line[7]) )
except Exception as expt : # Cette exception est executee si l'execution du bloc try si dessus a echoue.
print("\n!!! Echec de l'importation - exception relevee !!!")
print expt
I would like to create a function which extracts the first parameter of the line, which is the Unix time, and, if the time is not between [1514761200; 1546297200], deletes the line.
How can I do this?
To delete a line from the file you're actually going to read it completely, and then to rewrite the filtered file.
One approach:
data_typo = "1529074866;29.89;29.41;321;70;1;60;1003.05\n"
with open("file.txt", "r") as f:
lines = f.readlines() # Extract all the lines
data = [line.rstrip().split(";") for line in lines]
# data elements: ['1529074866', '29.89', '29.41', '321', '70', '1', '60', '1003.05']
At that point, a simple approach is to filter the list data.
def criterion(elt):
if 1514761200 <= eval(elt[0]) and eval(elt[0]) <= 1546297200:
return True
else:
return False
data_to_rewrite = list(filter(criterion, data)) # Keeps the elements where criterion returns True.
with open("new_file.txt", "w") as f:
for elt in data_to_rewrite:
line = ";".join(elt) + "\n"
f.write(line)

abaqus python, why are the extracted displacement values different?

I am trying to use abaqus-ython scripting to extract the nodal coordinates.
in doing so i first extract the original nodal positions and then add the displacement value.
But for 1 of my abaqus models i notice that the displacement values i extract are different from the ones i find in abaqus (see attached pictures)
i have no idea how or why this is happening.
Can someone helpe me?
You can find my code below.
for ODBname in os.listdir("D:/r0333338/Documents/MP/nodal_files_genereren/OBD"): # directory van waar alle .odb bestanden zitten, hier worden ze allemaal
print 'Current File: '+ODBname #checken welke file er gebruikt wordt
ODBnamefull = 'D:/r0333338/Documents/MP/nodal_files_genereren/OBD/'+ODBname # Volledig pad naar de .odb file. ander wordt de file in de default work directory gezocht.
odb = openOdb(path=ODBnamefull) #openen van het ODB bestand
ODBalleenNaam = ODBname.rstrip('.odb') #om .odb weg te knippen
NodalName = ODBalleenNaam + '-nodal.txt' #naam ven het te schrijven bestand
for name, instance in odb.rootAssembly.instances.items(): #'name' is naam van elke part van in de assembly, zo kan de nodal coordinaten van het onvervormde testobject (part) achterhaald worden
print name
type(name)
name2 = 'DISK-1'
if name == name2:
numNodesTotal = len( instance.nodes ) #aantal nodes
frame = odb.steps[ 'Step-1' ].frames[-1] #informatie van de laatste frame van Step-1 gebruiken
dispField = frame.fieldOutputs['U'] #verplaatsingsveld van laatste frame van step-1
print 'total numer of nodes: '+ str(numNodesTotal) #checken hoeveel nodes er zijn
for i in range( numNodesTotal ): #voor elke node :
curNode = instance.nodes[i] #informatie van de huidige node
#print curNode.label #nummer van de huidige node
#2D verplaatsing omzetten naar 3D verplaatsing
U1 = dispField.values[i].data[0] #X-verplaatsing aan U1 geven
U2 = dispField.values[i].data[1] #Y-verplaatsing aan U2 geven
array = [] #maken van een lege array voor invullen van de coordinaten
array.append(U1) #X-verplaatsing toevoegen
array.append(U2) #Y-verplaatsing toevoegen
array.append(0) #Z-verplaatsing toevoegen
print 'node: '
print curNode.label
print 'displacement: '
print array #checken van 3D verplaatsing
print 'coordinates: '
print curNode.coordinates
odb.close()
else:
print 'name is not DISK-1 but: ' + str(name)
Abaqus displacement
python extracted displacement
you should pull the node label directly from the field data:
curNodeLabel=dispField.values[i].nodeLabel
you then need to use that to get the node:
curNode=instance.getNodeFromLabel(curNodeLabel)
don't assume the node indexing is the same as the field data indexing.
I'd further for consistency make the for loop:
for i in range( len(dispField.values) ):
Despite the title, I am assuming that you want the final coordinates and not the displacement. As Daniel F mentioned, you should add COORDS as a field output. In that case the code bellow should be helpful.
def findCoordSet(OdbName,StepName,InstanceName,SetName):
"""
This ODB reading script does the following:
-Retrieves coordinates at SetName
"""
Coordinates={'x':[],'y':[]}
# Open the output database.
odbName = OdbName + '.odb'
odb = visualization.openOdb(odbName)
lastFrame = odb.steps[StepName].frames[-1]
coordset = odb.rootAssembly.instances[InstanceName.upper()].nodeSets[SetName.upper()]
# Retrieve Y-displacements at the splines/connectors
dispField = lastFrame.fieldOutputs['COORD']
dFieldpTip = dispField.getSubset(region=coordset)
for i in range(len(dFieldpTip.values)):
Coordinates['x'].append(dFieldpTip.values[i].data[0])
Coordinates['y'].append(dFieldpTip.values[i].data[1])
odb.close()
return Coordinates

Extract nodal coordinates from the deformed testsubject (abaqus-python)

I am trying to make a python script to extract the nodal coordinates from the ODB file (from abaqus).
So far i have come up with the code attached below (don't mind the extra information i put behind the #, sometimes it's just so i can keep track of what i'm doing)
The problem is that the coordinates i extract are those from the undeformed test subject. and i need the coordinates of the nodes from the deformed test subject.
Can somebody help me with how i reach this information using python code?
from abaqus import *
from abaqusConstants import *
import __main__
import section
import regionToolset
import displayGroupMdbToolset as dgm
import part
import material
import assembly
import step
import interaction
import load
import mesh
import optimization
import job
import sketch
import visualization
import xyPlot
import displayGroupOdbToolset as dgo
import connectorBehavior
import shutil
import os
import sys
from odbAccess import openOdb
for ODBname in os.listdir("D:/r0333338/Documents/MP/nodal_files_genereren/OBD"): # this is where all your ODB files are located #t hier wordt ook de odb file gekozen die geopend zal worden
SUBname = ODBname[1:3] # My subject ID is saved in the ODB name - this helps me create the file #woerdt er hier de 3e tot6e letter van de ODBname gepakt ?
print 'Current File: '+ODBname #voor check welke file er gebruikt wordt #t (ZIT '.odb' hier bij in ?)
ODBnamefull = 'D:/r0333338/Documents/MP/nodal_files_genereren/OBD/'+ODBname # Full path to the ODB file, otherwise abaqus tries to find it in default work directory
odb = openOdb(path=ODBnamefull) #open the ODB file
assembly = odb.rootAssembly #t declareren van assembly?
session.viewports['Viewport: 1'].odbDisplay.setFrame(step=0, frame=1)
numNodes = 0 #t num nodes op nul zetten
f = open("D:/r0333338/Documents/MP/nodal_files_genereren/ODBoutput/nodal.txt", "w") #indien het bestand al bestaat moet er "a" staan ipv "w"
for name, instance in assembly.instances.items(): #t 'name' is naam van elke part van in de assembly ?
n = len(instance.nodes) #t tellen van hoeveelheid nodes
print 'Number of nodes of instance %s: %d' % (name, n) #moet niet in de file staan, kan eigenlijk weggelaten worden. (maar is een goede check?)
numNodes = numNodes + n #tellen van totaal aantal nodes (globaal over alle parts) maar is eigenlijk niet nodig?
f.write( "*Part, name=Part-1" + "\n")#moet erin staan volgens de MatchId regels
f.write( "*Nodes" + "\n") #moet erin staan volgens de MatchId regels
if instance.embeddedSpace == THREE_D: #indien het 3D is
print ' X Y Z' #moet niet in de file staan, maar is een goede check om te zien waar we zitten
for node in instance.nodes:
#print node #printen van node
f.write( str(node.label) + ";" ) #schrijven van nodenummer
f.write(str(node.coordinates[0]) + ";" + str(node.coordinates[1]) + ";" + str(node.coordinates[2]) + "\n") #schrijven van coordinaten [X;Y;Z] en enter
else: #indien het 2D is
print ' X Y' ';0' #moet niet in de file staan, maar is een goede check om te zien waar we zitten
for node in instance.nodes:
#print node #printen van node
f.write( str(node.label) + ";" )
f.write(str(node.coordinates[0]) + ";" + str(node.coordinates[1]) + ";" + str(node.coordinates[2]) + "\n") #schrijven van coordinaten [X;Y;Z] en enter
f.write( "*End Part" ) #moet erin staan volgens de MatchId regels
f.close()
get the displacement field:
u=odb.steps['Step-1'].frames[-1].fieldOutputs['U']
then u.values is a list of all nodal values:
u.values[i].data -> array of (ux,uy,uz)
u.values[i].nodeLabel -> node label
then you grab the original position like this:
instance.getNodeFromLabel(u.values[i].nodeLabel).coordinates
You can also directly get the deformed coordinate as a field output, but you need to request COORD output when you run the analysis.

Categories