Trying to get the python neat algorithm to work with openAI gym retro.
I am using python3 with the youtube:https://www.youtube.com/watch?v=8dY3nQRcsac&list=PLTWFMbPFsvz3CeozHfeuJIXWAJMkPtAdS&index=8&t=410s
trying to get neat to work with sonic in the openAI env. it seems there is a problem with the recrrent.py file.
Find the code here: https://gitlab.com/lucasrthompson/Sonic-Bot-In-OpenAI-and-NEAT/blob/master/tut2.py
This Is the Error message
File "tut3.py", line 53, in <module>
winner = p.run(eval_genomes)
File "/home/gym/OPAI/lib/python3.6/site-packages/neat/population.py", line 89, in run
fitness_function(list(iteritems(self.population)), self.config)
File "tut3.py", line 41, in eval_genomes
imgarray.append(y)
AttributeError: 'numpy.ndarray' object has no attribute 'append'
Line 89 in the population.py file
self.reporters.start_generation(self.generation)
# Evaluate all genomes using the user-provided function.
fitness_function(list(iteritems(self.population)), self.config)
The tut3 code that I got from #lucas
Just plan neat net work.
import retro
import numpy as np
import pickle
import neat
import cv2
env = retro.make('SonicTheHedgehog-Genesis', 'GreenHillZone.Act1')
imgarray = []
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
ob = env.reset()
ac = env.action_space.sample()
inx, iny, inc = env.observation_space.shape
inx = int(inx/8)
iny = int(iny/8)
net = neat.nn.RecurrentNetwork.create(genome, config)
current_max_fitness = 0
fitness_current = 0
frame = 0
counter = 0
xpos = 0
xpos_max = 0
done = False
while not done:
env.render()
frame +=1
ob = cv2.resize(ob, (inx,iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx,iny))
imgarray = np.ndarray.flatten(ob)
for x in ob:
for y in x:
imgarray.append(y)
nnOutput = net.activate(imgarray)
print(nnOutput)
ob, rew,done, info = env.step(nnOutput)
imgarray.clear()
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'config-feedforward')
p = neat.Population(config)
winner = p.run(eval_genomes)
Would be great if you guys could help.
I want to fully understand as it is a school project.
Thanks for your help
:))
Your while loop has some errors in it. Make your eval_genomes function look like this below :
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
ob = env.reset()
ac = env.action_space.sample()
inx, iny, inc = env.observation_space.shape
inx = int(inx/8)
iny = int(iny/8)
net = neat.nn.RecurrentNetwork.create(genome, config)
current_max_fitness = 0
fitness_current = 0
frame = 0
counter = 0
xpos = 0
xpos_max = 0
done = False
while not done:
env.render()
frame +=1
ob = cv2.resize(ob, (inx, iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx,iny))
imgarray = np.ndarray.flatten(ob)
nnOutput = net.activate(imgarray)
print(nnOutput)
ob, rew,done, info = env.step(nnOutput)
The ndarray.flatten does the same thing as the for x and for y loop so you only need one of the two solutions and flatten is easier to read. Additionally python is a language where indentation really matters. Always make sure your tabs/spaces are lined up correctly!
Hope that works. If it doesn't, just go on and use this:
https://gitlab.com/lucasrthompson/Sonic-Bot-In-OpenAI-and-NEAT/blob/master/tut2.py
or this:
https://gitlab.com/lucasrthompson/Sonic-Bot-In-OpenAI-and-NEAT/blob/master/neat-paralle-sonic.py
Good luck!
Related
I use a Teledyne Dalsa line scan camera. Sometimes when I increase the line rate of my Teledyne Dalsa line scan camera to 13000 or higher and trigger it with an Arduino at 3V3, the images are not consistently captured from the buffer for every trigger, unlike when the line rate is 10000 or lower.
The trigger seems to be perfectly working in Sapera Software for increased line rate.
Attaching the code:
import platform
import os
import sys
import time
from harvesters.core import Harvester
import cv2
import numpy as np
h = Harvester()
gentl_file = r"C:\Program Files\MATRIX VISION\mvIMPACT Acquire\bin\x64\mvGenTLProducer.cti"
h.add_file(gentl_file)
h.update()
ia = h.create(0)
ia.remote_device.node_map.Width.value = 2048
ia.remote_device.node_map.Height.value = 2000
ia.remote_device.node_map.PixelFormat.value = 'RGB8'
ia.remote_device.node_map.AcquisitionLineRate.set_value(15000)
ia.remote_device.node_map.ExposureTime.set_value(47)
ia.remote_device.node_map.Gain.set_value(6)
ia.remote_device.node_map.TriggerSelector.value = "FrameBurstStart"
ia.remote_device.node_map.TriggerMode.value = "On"
ia.remote_device.node_map.triggerFrameCount.set_value(1) # new
ia.remote_device.node_map.TriggerSource.value = "Line2"
ia.remote_device.node_map.TriggerActivation.value = "RisingEdge"
ia.remote_device.node_map.TriggerDelay.set_value(0.0) # new
ia.remote_device.node_map.triggerDelaySource.value = "lineTriggerSignal" # new
ia.remote_device.node_map.LineSelector.value = "Line2"
ia.remote_device.node_map.LineFormat.value = "SingleEnded"
ia.remote_device.node_map.lineDetectionLevel.value = "Threshold_for_3V3"
ia.remote_device.node_map.LineInverter.value = False
ia.remote_device.node_map.lineDebouncingPeriod.value = 0
ia.remote_device.node_map.lineElectricalTermination.value = "Disabled"
ia.start()
start = time.time()
ia.start()
def image_capture():
global ia
print('Image capture started')
lst_app = []
with ia.fetch() as buffer:
component = buffer.payload.components[0]
_2d_new = component.data.reshape(component.height,component.width,int(component.num_components_per_pixel))
_2d_new = cv2.cvtColor(_2d_new , cv2.COLOR_RGB2BGR)
lst_app.append(_2d_new)
if len(lst_app) == 1:
print('Buffer is full')
arr = np.vstack(lst_app)
lst_app = []
return np.array(arr)
count = 0
while True:
image = image_capture()
print('captured', count)
count += 1
Thanks in Advance
Even after including all the parameters from the Sapera software, the image still doesn't get returned for certain triggers.
I am trying to use stockfish to evaluate a chess position using FEN notation all in Python. I am mainly using two libraries (pgnToFen I found on github here: https://github.com/SindreSvendby/pgnToFen and Stockfish the MIT licensed one here: https://github.com/zhelyabuzhsky/stockfish). After many bugs I have reached problem after problem. Stockfish not only can't analyse this FEN position (3b2k1/1p3pp1/8/3pP1P1/pP3P2/P2pB3/6K1/8 b f3 -) but it infinitely loops! "No worries!" and thought changing the source code would be accomplishable. Changed to _put(), but basically I am unable to put dummy values in because stdin.flush() won't execute once I give it those values! Meaning I don't even think I can skip to the next row in my dataframe. :( The code I changed is below.
def _put(self, command: str, tmp_time) -> None:
if not self.stockfish.stdin:
raise BrokenPipeError()
self.stockfish.stdin.write(f"{command}\n")
try:
self.stockfish.stdin.flush()
except:
if command != "quit":
self.stockfish.stdin.write('isready\n')
try:
time.sleep(tmp_time)
self.stockfish.stdin.flush()
except:
#print ('Imma head out', file=sys.stderr)
raise ValueError('Imma head out...')
#sys.stderr.close()
def get_evaluation(self) -> dict:
"""Evaluates current position
Returns:
A dictionary of the current advantage with "type" as "cp" (centipawns) or "mate" (checkmate in)
"""
evaluation = dict()
fen_position = self.get_fen_position()
if "w" in fen_position: # w can only be in FEN if it is whites move
compare = 1
else: # stockfish shows advantage relative to current player, convention is to do white positive
compare = -1
self._put(f"position {fen_position}", 5)
self._go()
x=0
while True:
x=x+1
text = self._read_line()
#print(text)
splitted_text = text.split(" ")
if splitted_text[0] == "info":
for n in range(len(splitted_text)):
if splitted_text[n] == "score":
evaluation = {
"type": splitted_text[n + 1],
"value": int(splitted_text[n + 2]) * compare,
}
elif splitted_text[0] == "bestmove":
return evaluation
elif x == 500:
evaluation = {
"type": 'cp',
"value": 10000,
}
return evaluation
and last but not least change to the init_ contructor below:
self._stockfish_major_version: float = float(self._read_line().split(" ")[1])
And the code where I am importing this code to is below, this is where errors pop up.
import pandas as pd
import re
import nltk
import numpy as np
from stockfish import Stockfish
import os
import sys
sys.path.insert(0, r'C:\Users\path\to\pgntofen')
import pgntofen
#nltk.download('punkt')
#Changed models.py for major version line 39 in stockfish from int to float
stockfish = Stockfish(r"C:\Users\path\to\Stockfish.exe")
file = r'C:\Users\path\to\selenium-pandas output.csv'
chunksize = 10 ** 6
for chunk in pd.read_csv(file, chunksize=chunksize):
for index, row in chunk.iterrows():
FullMovesStr = str(row['FullMoves'])
FullMovesStr = FullMovesStr.replace('+', '')
if "e.p" in FullMovesStr:
row.to_csv(r'C:\Users\MyName\Logger.csv', header=None, index=False, mode='a')
print('Enpassant')
continue
tokens = nltk.word_tokenize(FullMovesStr)
movelist = []
for tokenit in range(len(tokens)):
if "." in str(tokens[tokenit]):
try:
tokenstripped = re.sub(r"[0-9]+\.", "", tokens[tokenit])
token = [tokenstripped, tokens[tokenit+1]]
movelist.append(token)
except:
continue
else:
continue
DFMoves = pd.DataFrame(movelist, columns=[['WhiteMove', 'BlackMove']])
DFMoves['index'] = row['index']
DFMoves['Date'] = row['Date']
DFMoves['White'] = row['White']
DFMoves['Black'] = row['Black']
DFMoves['W ELO'] = row['W ELO']
DFMoves['B ELO'] = row['B ELO']
DFMoves['Av ELO'] = row['Av ELO']
DFMoves['Event'] = row['Event']
DFMoves['Site'] = row['Site']
DFMoves['ECO'] = row['ECO']
DFMoves['Opening'] = row['Opening']
pd.set_option('display.max_rows', DFMoves.shape[0]+1)
print(DFMoves[['WhiteMove', 'BlackMove']])
seqmoves = []
#seqmovesBlack = []
evalmove = []
pgnConverter = pgntofen.PgnToFen()
#stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
#rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1
for index, row in DFMoves.iterrows():
try:
stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
except:
evalmove.append("?")
continue
#stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
pgnConverter.resetBoard()
WhiteMove = str(row['WhiteMove'])
BlackMove = str(row['BlackMove'])
if index == 0:
PGNMoves1 = [WhiteMove]
seqmoves.append(WhiteMove)
#seqmoves.append(BlackMove)
else:
seqmoves.append(WhiteMove)
#seqmoves.append(BlackMove)
PGNMoves1 = seqmoves.copy()
#print(seqmoves)
try:
pgnConverter.pgnToFen(PGNMoves1)
fen = pgnConverter.getFullFen()
except:
break
try:
stockfish.set_fen_position(fen)
print(stockfish.get_board_visual())
evalpos = stockfish.get_evaluation()
evalmove.append(evalpos)
except:
pass
try:
stockfish.set_fen_position("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
except:
evalmove.append("?")
continue
pgnConverter.resetBoard()
if index == 0:
PGNMoves2 = [WhiteMove, BlackMove]
seqmoves.append(BlackMove)
else:
seqmoves.append(BlackMove)
PGNMoves2 = seqmoves.copy()
try:
pgnConverter.pgnToFen(PGNMoves2)
fen = pgnConverter.getFullFen()
except:
break
try:
stockfish.set_fen_position(fen)
print(stockfish.get_board_visual())
evalpos = stockfish.get_evaluation()
print(evalpos)
evalmove.append(evalpos)
except:
pass
#DFMoves['EvalWhite'] = evalwhite
#DFMoves['EvalBlack'] = evalblack
print(evalmove)
So the detailed question is getting stockfish.get_evalution() to just skip, or better yet fix the problem, for this ( 3b2k1/1p3pp1/8/3pP1P1/pP3P2/P2pB3/6K1/8 b f3 - ) FEN position. I have been working on this problem for quite a while so any insight into this would be very much appreciated.
My specs are Windows 10, Python 3.9, Processor:Intel(R) Core(TM) i9-10980XE CPU # 3.00GHz 3.00 GHz and RAM is 64.0 GB.
Thanks :)
Ok. It seems your fen is invalid (3b2k1/1p3pp1/8/3pP1P1/pP3P2/P2pB3/6K1/8 b f3 -). So check that. And python-chess (https://python-chess.readthedocs.io/en/latest/index.html) library allows you to use FEN AND chess engines. So, pretty cool no ? Here is an example of theses two fantastics tools :
import chess
import chess.engine
import chess.pgn
pgn = open("your_pgn_file.pgn")
game = chess.pgn.read_game(pgn)
engine = chess.engine.SimpleEngine.popen_uci("your_stockfish_path.exe")
# Iterate through all moves, play them on a board and analyse them.
board = game.board()
for move in game.mainline_moves():
board.push(move)
print(engine.analyse(board, chess.engine.Limit(time=0.1))["score"])
Been messing around with gym-retro and OpenCV. I have been looking at other code examples and tutorials. Several of them seem to be coded the same way but when I do it I get the following error. Has there been some type of update or something? Any suggestions on a fix welcome. I can comment out the reshaping and conversion to greyscale and it works. However then I am feeding too much info to my NN.
import retro
import numpy as np
import cv2
import neat
import pickle
env = retro.make('SuperMarioBros3-Nes', '1Player.World1.Level1')
def eval_genomes(genomes, config):
for genome_id,genome in genomes:
ob = env.reset()
ac = env.action_space.sample()
inx, iny, inc = env.observation_space.shape
inx = int(inx/8)
iny = int(iny/8)
net = neat.nn.recurrent.RecurrentNetwork.create(genome, config)
current_max_fitness = 0
fitness_current = 0
frame = 0
counter = 0
xpos = 0
xpos_max = 0
done = False
while not done:
env.render()
frame += 1
#print(ob)
ob = cv2.resize(ob, (inx,iny))
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
ob = np.reshape(ob, (inx,iny))
imgarray = np.ndarray.flatten(ob)
nnOutput = net.activate(imgarray)
ob, rew, done, info = env.step(nnOutput)
#imgarray.clear()
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,neat.DefaultSpeciesSet,neat.DefaultStagnation,'config-feedforward')
p = neat.Population(config)
winner = p.run(eval_genomes)
(gameai) C:\Users\dgilk\anaconda3\envs\gameai>python mario.py
Traceback (most recent call last):
File "mario.py", line 45, in <module>
winner = p.run(eval_genomes)
File "C:\Users\dgilk\anaconda3\envs\gameai\lib\site-packages\neat\population.py", line 89, in run
fitness_function(list(iteritems(self.population)), self.config)
File "mario.py", line 32, in eval_genomes
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
cv2.error: OpenCV(4.4.0) c:\users\appveyor\appdata\local\temp\1\pip-req-build-k8sx3e60\opencv\modules\imgproc\src\color.simd_helpers.hpp:92: error: (-2:Unspecified error) in function '__cdecl cv::impl::`anonymous-namespace'::CvtHelper<struct cv::impl::`anonymous namespace'::Set<3,4,-1>,struct cv::impl::A0xbf2c9cd3::Set<1,-1,-1>,struct cv::impl::A0xbf2c9cd3::Set<0,2,5>,2>::CvtHelper(const class cv::_InputArray &,const class cv::_OutputArray &,int)'
> Invalid number of channels in input image:
> 'VScn::contains(scn)'
> where
> 'scn' is 1
UPDATE:
Here is an output of the shrunken image. There appears to be color.
snippet of observation window
you are creating a cv2 object with shape (inx,iny) here
ob = cv2.resize(ob, (inx,iny)) # 1
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY) # 2
cv2.COLOR_BGR2GRAY is expecting a color image which would have shape ( inx, iny, 3) , so check what shape you need your 'ob' to be , what do you need line 2 for ?
I am trying to simulate my FMU created from TRNSYS using pyFMI. When I try to simulate it, it prompts the following message:
"forrtl severe(193):Run-time Check Failure. The variable \'TRNSYSFUNCTIONS_mp_GETOUTPUT VALUE$GETOUTPUT VALUE\' is being used without being initiated"
My code looks like this:
from pyfmi import load_fmu
import os
from collections import defaultdict
import time
model = 'ZUB_FMU2.fmu'
model_dir ='C:\\Trnsys17\MyProjects\\TEST_ZUB_13_FMU_check'
trnsys = load_fmu(fmu=model, path=model_dir)
os.chdir(model_dir)
import numpy as np
t_start = 3624*3600
t_end = 6552*3600
h_step = 1*3600
t_array = np.arange(t_start, t_end, h_step)
cool = 26
heat = 19
tim = 0
LR = []
# Initialize FMU
start_time = time.time()
trnsys.initialize(t_start, t_end)
while tim <= len(t_array)-1:
try:
trnsys.set('setCool', cool)
trnsys.set('setHeat', heat)
res = trnsys.do_step(current_t= t_array[tim],step_size=h_step, new_step=True)
if res != 0:
print "Failed to do step", t_array[tim]
LR.append(float(trnsys.get('DG_BU_Shading')))
tim += 1
except ValueError:
raw_input("Error...")
print "Time for simulating an FMU is:"
del trnsys
print 'LR is given as: ', LR
Can anybody predict the reason for an error. It seems like there is an initialization error.
This question already has answers here:
Is it possible to use ViBe algorithm, implemented in opencv, for systema without GPU?
(3 answers)
Closed 10 years ago.
i have these
codebook.py
#http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.148.9778&rep=rep1&type=pdf
import numpy as np
import cv2
alpha = 5
beta = 0.95
Tdel = 80
Tadd = 140
Th= 80
mn,mx,f,l,p,q=0,1,2,3,4,5
class CodeBook():
def __init__(self,h,w):
self.h = h
self.w = w
self.M = np.empty((h, w), dtype=np.object)
self.H = np.empty((h, w), dtype=np.object)
filler = np.frompyfunc(lambda x: list(), 1, 1)
filler(self.M,self.M)
filler(self.H,self.H)
self.t = 1
def updatev(self,gray,cb):
I,t = gray,self.t
if not cb:
c = [max(0.0,I-alpha),min(255.0,I+alpha),1,t-1,t,t]
cb.append(c)
else:
found = False
for cm in cb:
if(cm[mn]<=I<=cm[mx] and not found):
cm[mn] = ((I-alpha)+(cm[f]*cm[mn]))/(cm[f]+1.0)
cm[mx] = ((I+alpha)+(cm[f]*cm[mx]))/(cm[f]+1.0)
cm[f] += 1
#cm[l] = max(cm[l],t-cm[q])
cm[l] = 0
cm[q] = t
found = True
else:
cm[l] = max(cm[l],10-cm[q]+cm[p]-1)
if not found:
c = [max(0.0,I-alpha),min(255.0,I+alpha),1,t-1,t,t]
cb.append(c)
return cb
def update(self,gray):
h,w,M = self.h,self.w,self.M
updatev = np.vectorize(self.updatev,otypes=[np.object])
self.M=updatev(gray,M)
self.t += 1
def fgv(self,gray,cwm,cwh):
I,t = gray,self.t
pixval = 0
found = False
for cm in cwm:
if(cm[mn]<=I<=cm[mx] and not found):
cm[mn] = (1-beta)*(I-alpha) + (beta*cm[mn])
cm[mx] = (1-beta)*(I+alpha) + (beta*cm[mx])
cm[f] += 1
#cm[l] = max(cm[l],t-cm[q])
cm[l] = 0
cm[q] = t
found = True
else:
cm[l] += 1
#cm[l]=max(cm[l],t-cm[q]+cm[p]-1)
cwm[:] = [cw for cw in cwm if cw[l]<Tdel]
if found: return 0
for cm in cwh:
if(cm[mn]<=I<=cm[mx] and not found):
cm[mn] = (1-beta)*(I-alpha) + (beta*cm[mn])
cm[mx] = (1-beta)*(I+alpha) + (beta*cm[mx])
cm[f] += 1
#cm[l] = max(cm[l],t-cm[q])
cm[l] = 0
cm[q] = t
found = True
else:
#cm[l]=max(cm[l],t-cm[q]+cm[p]-1)
cm[l] += 1
if not found:
c = [max(0.0,I-alpha),min(255.0,I+alpha),1,0,t,t]
cwh.append(c)
cwh[:] = [cw for cw in cwh if cw[l]<Th]
tomove = [cw for cw in cwh if cw[f]>Tadd]
cwh[:] = [cw for cw in cwh if not cw in tomove]
cwm.extend(tomove)
return 255
def fg(self,gray):
h,w,M,H = self.h,self.w,self.M,self.H
fgv = np.vectorize(self.fgv,otypes=[np.uint8])
fg = fgv(gray,M,H)
self.t += 1
return fg
test.py
import cv2
import sys
import numpy as np
import time
import cProfile
import pyximport; pyximport.install(reload_support=True,
setup_args={'script_args':["--compiler=mingw32"]})
import codebook
c = cv2.VideoCapture(0)
c.set(3,320)
c.set(4,240)
cv2.namedWindow('vid',0)
cv2.namedWindow('fg',0)
_,img = c.read()
img = cv2.resize(img,(160,120))
h,w = img.shape[:2]
cb = codebook.CodeBook(h,w)
N=0
def fillholes(gray):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
res = cv2.morphologyEx(gray,cv2.MORPH_OPEN,kernel)
def run():
while(1):
global N
_,img = c.read()
img = cv2.resize(img,(160,120))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('vid',gray)
if N < 10:
cb.update(gray)
else:
start = time.clock()
fg = cb.fg(gray)
print time.clock()-start
element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2))
fg= cv2.erode(fg,element)
fillholes(fg)
cv2.imshow('fg',fg)
N += 1
if cv2.waitKey(5)==27:
break
run()
cv2.destroyAllWindows()
c.release()
the code is stuck # fgv. cython can speed it up somewhat. but it is still running slowly. i am thinking of doing either of the two
make it run in parallel
multithreading. i am using epd's numpy and changed the MKL_NUM_THREADS to 8. yet it is still bound to a single core.
distribute to worker processes array slices
redo some/all(which? i have no exp) parts in cpp although i really would like to avoid this
i have changed fgv as much as i know how. please let me know what should i be looking at next. thanks a lot!
fixed:
Is it possible to use ViBe algorithm, implemented in opencv, for systema without GPU?
more:http://www.changedetection.net/