Deterministic runs in Carla - python

I'd like to be able to run the exact same run, given I didn't change any parameters of the simulation, in the autonomous driving simulator Carla. Before I paste my code, my logic is that I have to set a specific seed for any Random operation to be repeatable, set a specific seed for the traffic manager, and in general to work in synchronous_mode=True so the lag in my computer won't interrupt(?). As you'll see, I log the x,y,z location of the ego vehicle, and run the simulation twice. It is similar, but not the same. What can I do to make it repeatable (not in recording mode, actual live runs)?
Additional info: Carla 0.9.14 on Ubuntu 20.04, Python 3.8.
import random
import numpy as np
import sys
import os
try:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/carla')
except IndexError:
pass
import carla
from agents.navigation.behavior_agent import BehaviorAgent # pylint: disable=import-error
seed = 123
N_vehicles = 50
camera = None
telemetry = []
random.seed(seed)
try:
# Connect the client and set up bp library and spawn points
client = carla.Client('localhost', 2000)
client.set_timeout(60.0)
world = client.get_world()
bp_lib = world.get_blueprint_library()
spawn_points = world.get_map().get_spawn_points()
settings = world.get_settings()
settings.synchronous_mode = True
settings.fixed_delta_seconds = 0.10
world.apply_settings(settings)
traffic_manager = client.get_trafficmanager()
traffic_manager.set_random_device_seed(seed)
traffic_manager.set_synchronous_mode(True)
# Spawn ego vehicle
vehicle_bp = bp_lib.find('vehicle.audi.a2')
# breakpoint()
vehicle = world.try_spawn_actor(vehicle_bp, random.choice(spawn_points))
# Move spectator behind vehicle to motion
spectator = world.get_spectator()
transform = carla.Transform(vehicle.get_transform().transform(carla.Location(x=-6,z=2.5)),vehicle.get_transform().rotation)
spectator.set_transform(transform)
world.tick()
# set the car's controls
agent = BehaviorAgent(vehicle, behavior="normal")
destination = random.choice(spawn_points).location
agent.set_destination(destination)
print('destination:')
print(destination)
print('current location:')
print(vehicle.get_location())
#Iterate this cell to find desired camera location
camera_bp = bp_lib.find('sensor.camera.rgb')
# Spawn camera
camera_init_trans = carla.Transform(carla.Location(z=2))
camera = world.spawn_actor(camera_bp, camera_init_trans, attach_to=vehicle)
# Callback stores sensor data in a dictionary for use outside callback
def camera_callback(image, data_dict):
data_dict['image'] = np.reshape(np.copy(image.raw_data), (image.height, image.width, 4))
# Get gamera dimensions and initialise dictionary
image_w = camera_bp.get_attribute("image_size_x").as_int()
image_h = camera_bp.get_attribute("image_size_y").as_int()
camera_data = {'image': np.zeros((image_h, image_w, 4))}
# Start camera recording
camera.listen(lambda image: camera_callback(image, camera_data))
# Add traffic to the simulation
SpawnActor = carla.command.SpawnActor
SetAutopilot = carla.command.SetAutopilot
FutureActor = carla.command.FutureActor
vehicles_list, batch = [], []
for i in range(N_vehicles):
ovehicle_bp = random.choice(bp_lib.filter('vehicle'))
npc = world.try_spawn_actor(ovehicle_bp, random.choice(spawn_points))
# add it if it was successful
if(npc):
vehicles_list.append(npc)
print(f'only {len(vehicles_list)} cars were spawned')
world.tick()
# Set the all vehicles in motion using the Traffic Manager
for idx, v in enumerate(vehicles_list):
try:
v.set_autopilot(True)
except:
pass
# Game loop
while True:
world.tick()
pose = vehicle.get_location()
telemetry.append([pose.x, pose.y, pose.z])
# keep following the car
transform = carla.Transform(vehicle.get_transform().transform(carla.Location(x=-6,z=2.5)),vehicle.get_transform().rotation)
spectator.set_transform(transform)
if agent.done():
print("The target has been reached, stopping the simulation")
break
control = agent.run_step()
control.manual_gear_shift = False
vehicle.apply_control(control)
finally:
# Stop the camera when we've recorded enough data
if(camera):
camera.stop()
camera.destroy()
settings = world.get_settings()
settings.synchronous_mode = False
settings.fixed_delta_seconds = None
world.apply_settings(settings)
traffic_manager.set_synchronous_mode(True)
if(vehicles_list):
client.apply_batch([carla.command.DestroyActor(v) for v in vehicles_list])
vehicle.destroy()
np.savetxt('telemetry.txt', np.array(telemetry), delimiter=',')
y-axis is the error between two runs, x-axis is the time index of the run

Related

Python NEAT not accepting an array as input

I am attempting to create a script in python that can play the chrome dino game, but i cant get NEAT to accept an image in the form of a numpy array as input. It doesnt work when i convert it to a list either. What can i do to fix this? Is there a config somewhere that i have to find?
Code:
import os
import time
import neat
import pickle
import pyautogui as pg
from PIL import ImageGrab, ImageOps, Image
import numpy as np
gen = 0
positions_standart = ((455, 450), (1242, 605))
positions_restart = ((779, 624), (949, 713))
def get_dead():
box = (positions_restart[0][0], positions_restart[0][1], positions_restart[1][0], positions_restart[1][1])
image = ImageGrab.grab(box)
grayscale = ImageOps.grayscale(image)
a = np.array(grayscale.getcolors())
return a.sum() == 47100
def get_image():
box = (positions_standart[0][0], positions_standart[0][1], positions_standart[1][0], positions_standart[1][1])
image = ImageGrab.grab(box)
grayscale = ImageOps.grayscale(image)
a = np.array(grayscale.getcolors())
a = np.array(image)
return grayscale, a
class Dino:
def jump(self):
pg.press("space")
def eval_genomes(genomes, config):
"""
runs the simulation of the current population of
birds and sets their fitness based on the distance they
reach in the game.
"""
global gen
gen += 1
# start by creating lists holding the genome itself, the
# neural network associated with the genome and the
# bird object that uses that network to play
nets = []
dinos = []
ge = []
for genome_id, genome in genomes:
genome.fitness = 0 # start with fitness level of 0
net = neat.nn.FeedForwardNetwork.create(genome, config)
nets.append(net)
dinos.append(Dino())
ge.append(genome)
score = 0
for x, dino in enumerate(dinos):
pg.click(455, 450)
print(f"Dino {x}")
dino.jump()
run = True
while run:
time.sleep(0.1)
ge[x].fitness += 0.1
# send bird location, top pipe location and bottom pipe location and determine from network whether to jump or not
gs = get_image()[1]
gs = gs.tolist()
output = nets[dinos.index(dino)].activate([gs])
if output[0] > 0.5: # we use a tanh activation function so result will be between -1 and 1. if over 0.5 jump
dino.jump()
print("jump")
### - Check Dead - ###
if (get_dead()):
run = False
print("dead")
# break if score gets large enough
#if score > 600:
# pickle.dump(nets[0],open("best.pickle", "wb"))
# break
def run(config_file):
"""
runs the NEAT algorithm to train a neural network to play flappy bird.
:param config_file: location of config file
:return: None
"""
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
#p.add_reporter(neat.Checkpointer(5))
# Run for up to 50 generations.
winner = p.run(eval_genomes, 50)
# show final stats
print('\nBest genome:\n{!s}'.format(winner))
if __name__ == '__main__':
# Determine path to configuration file. This path manipulation is
# here so that the script will run successfully regardless of the
# current working directory.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward.txt')
run(config_path)
What am i doing wrong? Please help me.

how to create short circuit on different generators with python API

I am finding it hard to create a short circuit of different generator in the IEEE 9 bus system from python.
I tried to create a short circuit on generator 1 with the code below:
the code was executed successfully but short circuit event never happened
Here is the class function:
def create_short_circuit(self, target_name, time,
duration=None, name='short circuit'):
# get element where the short circuit will be applied
target = self.app.GetCalcRelevantObjects(target_name)[0]
# get the events folder from active study case
evt_folder = self.app.GetFromStudyCase('IntEvt')
# create an empty event of type EvtShc (short circuit)
evt_folder.CreateObject('EvtShc', name)
# get the newly created event
# sc = evt_folder.GetContents(name+'.EvtShc')[0][0]
sc = evt_folder.GetContents(name+'.EvtShc')[0]
# set time, target and type of short circuit (3-phase)
sc.time = time
sc.p_target = target
sc.i_shc = 0
# set clearing event if required
if duration is not None:
# create an empty event of type EvtShc (short circuit)
evt_folder.CreateObject('EvtShc', name+'_clear')
# get the newly created event
scc = evt_folder.GetContents(
name+'_clear'+'.EvtShc')[0]
# set time, target and type of event (clearing)
scc.time = time + duration
scc.p_target = target
scc.i_shc = 4
def delete_short_circuit(self, name='short circuit'):
# get the events folder from active study case
evt_folder = self.app.GetFromStudyCase('IntEvt')
# find the short circuit and clear event to delete
sc = evt_folder.GetContents(name+'.EvtShc')[0]
scc = evt_folder.GetContents(name+'_clear'+'.EvtShc')[0]
# delete short circuit and clear events if they exist
if sc:
sc.Delete()
if scc:
scc.Delete()
Here is the object code:
activate project and study case
sim = PowerFactorySimulation(FOLDER_NAME, PROJECT_NAME, STUDY_CASE_NAME)
# get all buses in network
buses = sim.app.GetCalcRelevantObjects('*.ElmTerm')
# create result dictionaries
t = {}
f = {}
for bus in buses:
# create short circuit on every bus
sim.create_short_circuit(
target_name=buses[1].loc_name+'*.ElmTerm',
time=2.0,
duration=0.15)
# prepare RMS simulation
sim.prepare_dynamic_sim(
monitored_variables=MONITORED_VARIABLES)
# run RMS simulation
sim.run_dynamic_sim()
# get and store generator response
t[bus.loc_name], f[bus.loc_name] = \
sim.get_dynamic_results('*.ElmSym', 's:xspeed')
# delete old short circuit before new one
sim.delete_short_circuit()

using a python queue with an external class - not in the same file

I have a question regarding threading and queueing in python. I have a class that recognises faces and another script that is supposed to retrieve these recognised faces. And I'm unable to retrieve the recognised faces, once recognition starts
I posted another question that relates to this one, but here it's more about the actual user recognition part (this just in case someone stumbles over my other question and thinks this may be a duplicate).
So as said, I have a class that uses imutile and face_recognition to do just that - do face recognition. My issue with the class is that, once it's started it does recognise faces perfectly but it's not possible from an outside class (from within another script) to call any other method for example to retrieve a dict with the currently identified faces. I think this is problably because, once the actual recognition is called, the call to other methods within this class is not going through because of threading???. I attached the complete code for reference below.
Here is the code that is supposed to start the recogniser class and retrieve the results from within another script:
class recognizerAsync(Thread):
def __init__(self):
super(recognizerAsync,self).__init__()
print("initiating recognizer class from recognizerAsync")
if (use_user == True):
#myRecognizer = recognizer(consoleLog, debugMsg, run_from_flask)
self.myRecognizer = the_recognizer.recognizerAsync(False, True, True)
#face_thread = recognizerAsync(consoleLog, debugMsg, False)
def run(self):
print("starting up recogizer from callRecognizerThread")
self.myRecognizer.run()
if (use_user == True):
self.myRecognizer.start()
while (True):
if ( not q.full() ):
#fd = random.randint(1,10)
fd = self.myRecognizer.returnRecognizedFaces()
print(f"PRODUCER: putting into queue {fd}")
q.put(fd)
#else:
# print("ERROR :: Queue q is full")
time.sleep(10)
And I start this like so in the very end:
mirror = GUI(window)
mirror.setupGUI()
window.after(1000, mirror.updateNews)
face_thread = recognizerAsync()
face_thread.start()
window.mainloop()
My question is, how would I need to change either the recogniser classier the recognizerAsync class in the other script, so that while the method faceRecognizer() is running indefinitely, one can still call other methods - specifically returnRecognizedFaces()???
Thank you very much, folks.
#!/usr/bin/env python3
# import the necessary packages for face detection
from imutils.video import VideoStream
from imutils.video import FPS
import face_recognition
import imutils
import pickle
import time
import base64
import json
from threading import Thread
class recognizerAsync(Thread):
# How long in ms before a person detection is considered a new event
graceTimeBeforeNewRecognition = 6000 #60000
# the time in ms when a person is detected
timePersonDetected = 0
# ceate an empty faces dictionary to compare later with repetive encounterings
faces_detected_dict = {"nil": 0}
# Determine faces from encodings.pickle file model created from train_model.py
encodingsP = "" # "encodings.pickle"
# load the known faces and embeddings along with OpenCV's Haar
# cascade for face detection - i do this in the class initialization
#data = pickle.loads(open(encodingsP, "rb").read())
data = ''
# print dictionary of recognized faces on the console?
print_faces = False
debug_mnessagesa = False
called_from_flask = True # this changes the path to the
def __init__(self, print_val=False, debug_val=False, called_from_flask=True):
super(recognizerAsync,self).__init__()
self.print_faces = print_val
self.debug_messages = debug_val
if (called_from_flask == False):
encodingsP = "encodings.pickle"
else:
encodingsP = "Recognizer/encodings.pickle"
# load the known faces and embeddings along with OpenCV's Haar
# cascade for face detection
self.data = pickle.loads(open(encodingsP, "rb").read())
if (self.debug_messages == True):
print("Faces class initialized")
def run(self):
self.faceRecognizer()
def returnRecognizedFaces(self):
if (self.debug_messages == True):
print("from returnRecognizedFaces: returning: " + str((({k:self.faces_detected_dict[k] for k in self.faces_detected_dict if k!='nil'}))))
# print(f"from returnRecognizedFaces: returning: {self.faces_detected_dict}")
return(({k:self.faces_detected_dict[k] for k in self.faces_detected_dict if k!='nil'}))
def faceRecognizer(self):
try:
# initialize the video stream and allow the camera sensor to warm up
# Set the ser to the followng
# src = 0 : for the build in single web cam, could be your laptop webcam
# src = 2 : I had to set it to 2 inorder to use the USB webcam attached to my laptop
#vs = VideoStream(src=2,framerate=10).start()
vs = VideoStream(src=0,framerate=10).start()
#vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
# start the FPS counter
fps = FPS().start()
if (self.debug_messages == True):
print("starting face detection - press Ctrl C to stop")
# loop over frames from the video file stream
while (True):
# grab the frame from the threaded video stream and resize it
# to 500px (to speedup processing)
frame = vs.read()
try:
frame = imutils.resize(frame, width=500)
except:
# Error: (h, w) = image.shape[:2]
# AttributeError: 'NoneType' object has no attribute 'shape'
break
# Detect the fce boxes
boxes = face_recognition.face_locations(frame)
# compute the facial embeddings for each face bounding box
encodings = face_recognition.face_encodings(frame, boxes)
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known encodings
matches = face_recognition.compare_faces(self.data["encodings"], encoding)
name = "unknown" #if face is not recognized, then print Unknown
timePersonDetected = time.time()*1000.0
# check to see if we have found a match
if (True in matches):
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = self.data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number
# of votes (note: in the event of an unlikely tie Python
# will select first entry in the dictionary)
name = max(counts, key=counts.get)
# If someone in your dataset is identified, print their name on the screen and provide them through rest
if (max(self.faces_detected_dict, key=self.faces_detected_dict.get) != name or timePersonDetected > self.faces_detected_dict[name] + self.graceTimeBeforeNewRecognition):
# put the face in the dictionary with time detected so we can prrovide this info
# in the rest endpoint for others - this is not really used internally,
# except for the timePersonDetected time comparison above
self.faces_detected_dict[name] = timePersonDetected
# update the list of names
names.append(name)
# exemplary way fo cleaning up the dict and removing the nil entry - kept here for reference:
#new_dict = ({k:self.faces_detected_dict[k] for k in self.faces_detected_dict if k!='nil'})
self.last_recognized_face = name
if (self.print_faces == True):
print(self.last_recognized_face)
# clean up the dictionary
new_dict = {}
for k, v in list(self.faces_detected_dict.items()):
if (v + self.graceTimeBeforeNewRecognition) < (time.time()*1000.0) and str(k) != 'nil':
if (self.debug_messages == True):
print('entry ' + str(k) + " dropped due to age")
else:
new_dict[k] = v
self.faces_detected_dict = new_dict
if (self.debug_messages == True):
print(f"faces dict: {self.faces_detected_dict}")
# update the FPS counter
fps.update()
time.sleep(1)
except KeyboardInterrupt:
if (self.debug_messages == True):
print("Ctrl-C received - cleaning up and exiting")
pass

Communication with Thorlabs uc480 camera

I was able to get the current image from a Thorlabs uc480 camera using instrumental. My issue is when I try to adjust the parameters for grab_image. I can change cx and left to any value and get an image. But cy and top only works if cy=600 and top=300. The purpose is to create a GUI so that the user can select values for these parameters to zoom in/out an image.
Here is my code
import instrumental
from instrumental.drivers.cameras import uc480
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
paramsets = instrumental.list_instruments()
cammer = instrumental.instrument(paramsets[0])
plt.figure()
framer= cammer.grab_image(timeout='1s',copy=True,n_frames=1,exposure_time='5ms',cx=640,
left=10,cy=600,top=300)
plt.pcolormesh(framer)
The above code does not give an image if I choose cy=600 and top=10. Are there any particular value set to be used for these parameters? How can I get an image of the full sensor size?
Thorlabs has a Python programming interface available as a download on their website. It is very well documented, and can be installed locally via pip.
Link:
https://www.thorlabs.com/software_pages/ViewSoftwarePage.cfm?Code=ThorCam
Here is an example of a simple capture algorithm that might help get you started:
from thorlabs_tsi_sdk.tl_camera import TLCameraSDK
from thorlabs_tsi_sdk.tl_mono_to_color_processor import MonoToColorProcessorSDK
from thorlabs_tsi_sdk.tl_camera_enums import SENSOR_TYPE
# open the TLCameraSDK dll
with TLCameraSDK() as sdk:
cameras = sdk.discover_available_cameras()
if len(cameras) == 0:
print("Error: no cameras detected!")
with sdk.open_camera(cameras[0]) as camera:
#camera.disarm() # ensure any previous session is closed
# setup the camera for continuous acquisition
camera.frames_per_trigger_zero_for_unlimited = 0
camera.image_poll_timeout_ms = 2000 # 2 second timeout
camera.arm(2)
# need to save the image width and height for color processing
image_width = camera.image_width_pixels
image_height = camera.image_height_pixels
# initialize a mono to color processor if this is a color camera
is_color_camera = (camera.camera_sensor_type == SENSOR_TYPE.BAYER)
mono_to_color_sdk = None
mono_to_color_processor = None
if is_color_camera:
mono_to_color_sdk = MonoToColorProcessorSDK()
mono_to_color_processor = mono_to_color_sdk.create_mono_to_color_processor(
camera.camera_sensor_type,
camera.color_filter_array_phase,
camera.get_color_correction_matrix(),
camera.get_default_white_balance_matrix(),
camera.bit_depth
)
# begin acquisition
camera.issue_software_trigger()
# get the next frame
frame = camera.get_pending_frame_or_null()
# initialize frame attempts and max limit
frame_attempts = 0
max_attempts = 10
# if frame is null, try to get a frame until
# successful or until max_attempts is reached
if frame is None:
while frame is None:
frame = camera.get_pending_frame_or_null()
frame_attempts += 1
if frame_attempts == max_attempts:
raise TimeoutError("Timeout was reached while polling for a frame, program will now exit")
image_data = frame.image_buffer
if is_color_camera:
# transform the raw image data into RGB color data
color_data = mono_to_color_processor.transform_to_24(image_data, image_width, image_height)
save_data = np.reshape(color_data,(image_height, image_width,3))
camera.disarm()
You can also process the image after capture with the PIL library.

Play a random sequence of 4 sounds while a video is played in Psychopy?

I'm trying to create an experiment using Psychopy.
In the specific I'm trying to create a routine ("trial") where a video ("movie1") is presented and at the same time I would like to play a sequence of 4 sounds (one per second) randomly chosen from a list of 10 in an excel file (sounds.routine.xlsx).
Here's what I have done so far:
from __future__ import absolute_import, division
from psychopy import locale_setup
from psychopy import prefs
from psychopy import sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
psychopyVersion = '3.2.4'
expName = 'dsffdsfads' # from the Builder filename that created this script
expInfo = {'participant': '', 'session': '001'}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath='/Users/Documents/dsffdsfads.py',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
frameTolerance = 0.001 # how close to onset before 'same' frame
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=(1024, 768), fullscr=True, screen=0,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "trial"
trialClock = core.Clock()
sound1 = sound.Sound(Sounds, secs=-1, stereo=True, hamming=True,
name='sound1')
sound1.setVolume(1)
movie1 = visual.MovieStim3(
win=win, name='movie1',
noAudio = True,
filename='Movies/Random_4.mp4',
ori=0, pos=(0, 0), opacity=1,
loop=False,
depth=-1.0,
)
from np.random import choice
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=1, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('../Desktop/Countingpuppet/sounds_routine.xlsx', selection=choice(10, size = 4, replace = False)),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
# ------Prepare to start Routine "trial"-------
# update component parameters for each repeat
sound1.setSound(Sounds, hamming=True)
sound1.setVolume(1, log=False)
# keep track of which components have finished
trialComponents = [sound1, movie1]
for thisComponent in trialComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
trialClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
continueRoutine = True
# -------Run Routine "trial"-------
while continueRoutine:
# get current time
t = trialClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=trialClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# start/stop sound1
if sound1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
sound1.frameNStart = frameN # exact frame index
sound1.tStart = t # local t and not account for scr refresh
sound1.tStartRefresh = tThisFlipGlobal # on global time
sound1.play(when=win) # sync with win flip
# *movie1* updates
if movie1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
movie1.frameNStart = frameN # exact frame index
movie1.tStart = t # local t and not account for scr refresh
movie1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(movie1, 'tStartRefresh') # time at next scr refresh
movie1.setAutoDraw(True)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
sound1.stop() # ensure sound has stopped at end of routine
trials.addData('sound1.started', sound1.tStartRefresh)
trials.addData('sound1.stopped', sound1.tStopRefresh)
trials.addData('movie1.started', movie1.tStartRefresh)
trials.addData('movie1.stopped', movie1.tStopRefresh)
# the Routine "trial" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 1 repeats of 'trials'
# Flip one final time so any remaining win.callOnFlip()
# and win.timeOnFlip() tasks get executed before quitting
win.flip()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
The problem is that using np.choice only one number is reproduced and not the entire sequence of four randomly chosen numbers without repetitions. How can I do this?
Thanks in advance
Not tested, but something like this:
FPS = 60 # Frame rate of your monitor
from random import choice
from psychopy import visual, sound
win = visual.Window()
movie = visual.MovieStim(win, 'my_file.avi')
sounds = [sound.Sound('sound1.wav'), sound.Sound('sound2.wav'), sound.Sound('sound3.wav'), sound.Sound('sound4.wav')]
frame = 1
while movie.status != visual.FINISHED:
movie.draw() # Show the next frame of the movie
if frame % FPS == 0: # If a second has passed
choice(sounds).play() # Play a random sound

Categories