Pyomo energy storage system dispatch optimization - python

I'm trying to create a model optimization for a energy storage system using pyomo. Using the demand in kWh from an household and the electricity prices, I would like to minimize the cost charging and discharging the battery at the right time. I already have a working model for 1 year of data and the model is able to find an optimal solution (see code below). However, when I try find a model for only three months (let's say from October to December), pyomo returns with a termination condition "unbound" but I can't figure out why.
Model for 1 year of data:
#Battery parameters
battery_capacity = 252
total_energy = 13.1
usable_energy = 12.4
nominal_voltage = 51.8
ratio = total_energy/usable_energy
power = usable_energy / ratio
nominal_current = usable_energy / nominal_voltage * 1000
recharging_hours = battery_capacity/nominal_current
battery_level_threshold = 0
model = pyo.ConcreteModel()
#Set time period
model.T = pyo.Set(initialize=pyo.RangeSet(len(df_2021)),ordered=True)
#PARAMETERS:
model.b_efficiency = pyo.Param(initialize=0.9)
model.b_min_cap = pyo.Param(initialize=0)
model.b_max_cap = pyo.Param(initialize=12.4)
model.b_charge_power = pyo.Param(initialize=power)
model.b_discharge_power = pyo.Param(initialize=power)
model.spot_prices = pyo.Param(model.T,initialize=dict(enumerate(df_2021["Price/kWh"],1)),within=pyo.Any)
model.demand = pyo.Param(model.T, initialize=dict(enumerate(df_2021["Demand"],1)),within=pyo.Any)
#Variables : also the variable has to be indexed with the time T
model.b_soc = pyo.Var(model.T, domain = pyo.NonNegativeReals, bounds = (model.b_min_cap, model.b_max_cap))
model.b_discharge = pyo.Var(model.T, domain = pyo.NonNegativeReals)
model.b_charge = pyo.Var(model.T, domain = pyo.NonNegativeReals)
model.elect_purchased = pyo.Var(model.T, domain = pyo.NonNegativeReals)
#CONSTRAINTS
#Purchase constraint
def purchase_constraint(model,t):
return model.elect_purchased[t] >= model.demand[t] - model.b_discharge[t] + model.b_charge[t]
#State of charge constraint
def soc_constraint(model,t):
if t == model.T.first():
return model.b_soc[t] == model.b_max_cap / 2 #- model.b_discharge[t] + model.b_charge[t]
else:
return model.b_soc[t] == model.b_soc[t-1] - model.b_discharge[t-1] + model.b_charge[t-1]
#Discharge and charge constraints
def discharge_constraint_1(model,t):
""" Maximum discharge rate within a single hour """
return model.b_discharge[t] <= model.b_discharge_power
def discharge_constraint_2(model,t):<br/>
""" Sets the maximum energy available to be discharged as the SOC - minimum SOC """
return model.b_discharge[t] <= model.b_soc[t] - model.b_min_cap
def charge_constraint_1(model,t):
""" Maximum charge rate within a single hour """
return model.b_charge[t] <= model.b_charge_power
def charge_constraint_2(model,t):<br/>
""" Sets the maximum energy available to be cahrge as the SOC max """
return model.b_charge[t] <= model.b_max_cap - model.b_soc[t]
model.purchase_c = pyo.Constraint(model.T, rule = purchase_constraint)
model.soc_c = pyo.Constraint(model.T, rule = soc_constraint)<br/>
model.discharge_c1 = pyo.Constraint(model.T,rule = discharge_constraint_1)
model.discharge_c2 = pyo.Constraint(model.T,rule = discharge_constraint_2)
model.charge_c1 = pyo.Constraint(model.T,rule = charge_constraint_1)
model.charge_c2 = pyo.Constraint(model.T,rule = charge_constraint_2)
#OBJECTIVE
expr = sum(model.elect_purchased[t] * model.spot_prices[t] for t in model.T)
model.objective = pyo.Objective(rule = expr, sense = pyo.minimize)
opt = pyo.SolverFactory('cbc',executable='/usr/bin/cbc')
results = opt.solve(model)
results.write()
Result 1 year data
The optimal solution is found
However, when I change the dataset, using the SAME model structure and constraints, pyomo doesn't find a solution.
Model for 3 months:
#Battery parameters
battery_capacity = 252
total_energy = 13.1
usable_energy = 12.4
nominal_voltage = 51.8
ratio = total_energy/usable_energy
power = usable_energy / ratio
nominal_current = usable_energy / nominal_voltage * 1000
recharging_hours = battery_capacity/nominal_current
battery_level_threshold = 0
model = pyo.ConcreteModel()
#Set time period
model.T = pyo.Set(initialize=pyo.RangeSet(len(df_2021)),ordered=True)
#PARAMETERS:
model.b_efficiency = pyo.Param(initialize=0.9)
model.b_min_cap = pyo.Param(initialize=0)
model.b_max_cap = pyo.Param(initialize=12.4)
model.b_charge_power = pyo.Param(initialize=power)
model.b_discharge_power = pyo.Param(initialize=power)
model.spot_prices = pyo.Param(model.T,initialize=dict(enumerate(df_2021["Price/kWh"],1)),within=pyo.Any)
model.demand = pyo.Param(model.T, initialize=dict(enumerate(df_2021["Demand"],1)),within=pyo.Any)
#Variables : also the variable has to be indexed with the time T
model.b_soc = pyo.Var(model.T, domain = pyo.NonNegativeReals, bounds = (model.b_min_cap, model.b_max_cap))
model.b_discharge = pyo.Var(model.T, domain = pyo.NonNegativeReals)
model.b_charge = pyo.Var(model.T, domain = pyo.NonNegativeReals)
model.elect_purchased = pyo.Var(model.T, domain = pyo.NonNegativeReals)
#CONSTRAINTS
#Purchase constraint
def purchase_constraint(model,t):
return model.elect_purchased[t] >= model.demand[t] - model.b_discharge[t] + model.b_charge[t]
#State of charge constraint
def soc_constraint(model,t):
if t == model.T.first():
return model.b_soc[t] == model.b_max_cap / 2 #- model.b_discharge[t] + model.b_charge[t]
else:
return model.b_soc[t] == model.b_soc[t-1] - model.b_discharge[t-1] + model.b_charge[t-1]
#Discharge and charge constraints
def discharge_constraint_1(model,t):
""" Maximum discharge rate within a single hour """
return model.b_discharge[t] <= model.b_discharge_power
def discharge_constraint_2(model,t):<br/>
""" Sets the maximum energy available to be discharged as the SOC - minimum SOC """
return model.b_discharge[t] <= model.b_soc[t] - model.b_min_cap
def charge_constraint_1(model,t):
""" Maximum charge rate within a single hour """
return model.b_charge[t] <= model.b_charge_power
def charge_constraint_2(model,t):<br/>
""" Sets the maximum energy available to be cahrge as the SOC max """
return model.b_charge[t] <= model.b_max_cap - model.b_soc[t]
model.purchase_c = pyo.Constraint(model.T, rule = purchase_constraint)
model.soc_c = pyo.Constraint(model.T, rule = soc_constraint)<br/>
model.discharge_c1 = pyo.Constraint(model.T,rule = discharge_constraint_1)
model.discharge_c2 = pyo.Constraint(model.T,rule = discharge_constraint_2)
model.charge_c1 = pyo.Constraint(model.T,rule = charge_constraint_1)
model.charge_c2 = pyo.Constraint(model.T,rule = charge_constraint_2)
#OBJECTIVE
expr = sum(model.elect_purchased[t] * model.spot_prices[t] for t in model.T)
model.objective = pyo.Objective(rule = expr, sense = pyo.minimize)
opt = pyo.SolverFactory('cbc',executable='/usr/bin/cbc')
results = opt.solv
Pyomo returns:
WARNING: Loading a SolverResults object with a warning status into
model.name="unknown";
- termination condition: unbounded
- message from solver: <undefined>
# ==========================================================
# = Solver Results =
# ==========================================================
# ----------------------------------------------------------
# Problem Information
# ----------------------------------------------------------
Problem:
- Name: unknown
Lower bound: None
Upper bound: inf
Number of objectives: 1
Number of constraints: 16993
Number of variables: 11329
Number of nonzeros: 2832
Sense: minimize
# ----------------------------------------------------------
# Solver Information
# ----------------------------------------------------------
Solver:
- Status: warning
User time: -1.0
System time: 0.45
Wallclock time: 0.58
Termination condition: unbounded
Termination message: Model was proven to be unbounded.
Statistics:
Branch and bound:
Number of bounded subproblems: 0
Number of created subproblems: 0
Black box:
Number of iterations: 0
Error rc: 0
Time: 0.6151924133300781
Since the only change between the two runs is the length of the dataframe, I don't really know where to look for the error.

Given that the model works on some data, but not on an alternate data source, we can obviously focus a bit on the data set (which isn't shown).
We have a huge clue in the error report that the problem is unbounded. This means that there is nothing to prevent the objective function from running away to infinity, or negative infinity in the case of a minimization problem. So, let's look at your objective function. You are:
sum(demand[t] * price[t] for t in T)
The domain of your demand variable is set to non-negative real numbers (good) and price[t] is a parameter read in from data, and you are minimizing. So the only way this could run away to negative infinity is if there are one (or more) negative prices in your data, which the solver would then act on and demand would be unbounded.
So, comb your data set. if you are using pandas, just use a logical search for rows where price is < 0, you'll likely find at least one. Then you'll have to decide if that is a typo or if it is realistic to have a negative price (which could happen in some systems), and if it is legit, you will have to impose some other constraint to limit the model in that situation.

Related

ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() while training Reinforcement learning agent

I am trying to run a deep reinforcement learning algorithm DDPG USING Keras-rl2 on a gym environment provided by the robo-gym (https://github.com/jr-robotics/robo-gym) library. I am facing this error while training and don't know how to fix this issue. I would appreciate it if someone could help me solve this issue.
I am providing the codes which I am running. Also I am very new to reinforcement learning.
import gym
import robo_gym
from robo_gym.wrappers.exception_handling import ExceptionHandling
import random
import numpy as np
##########################################
class ActionNormalizer(gym.ActionWrapper):
"""Rescale and relocate the actions."""
def action(self, action: np.ndarray) -> np.ndarray:
"""Change the range (-1, 1) to (low, high)."""
low = self.action_space.low
high = self.action_space.high
scale_factor = (high - low) / 2
reloc_factor = high - scale_factor
action = action * scale_factor + reloc_factor
action = np.clip(action, low, high)
return action
def reverse_action(self, action: np.ndarray) -> np.ndarray:
"""Change the range (low, high) to (-1, 1)."""
low = self.action_space.low
high = self.action_space.high
scale_factor = (high - low) / 2
reloc_factor = high - scale_factor
action = (action - reloc_factor) / scale_factor
action = np.clip(action, -1.0, 1.0)
return action
target_machine_ip = '127.0.0.1' # or other machine 'xxx.xxx.xxx.xxx'
# initialize environment
env = gym.make('EndEffectorPositioningURSim-v0', ur_model='ur10e', ip=target_machine_ip, gui=True)
env = ActionNormalizer(env)
###############
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten,Input, Concatenate, Activation
from tensorflow.keras.optimizers import Adam
from rl.agents import DDPGAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory, EpisodeParameterMemory
from rl.random import OrnsteinUhlenbeckProcess
##########
assert len(env.action_space.shape) == 1
# Make action space continuous
nb_actions = env.action_space.shape[0]
print("nb_actions: ", nb_actions)
########
actor = Sequential()
actor.add(Flatten(input_shape=(1,) + env.observation_space.shape))
actor.add(Dense(126, activation='relu'))
actor.add(Dense(24, activation='relu'))
actor.add(Dense(nb_actions, activation='linear'))
actor.add(Flatten())
print(actor.summary())
##############################################
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = tensorflow.keras.layers.Input(shape=(1,) + env.observation_space.shape, name='observation_input')
flattened_observation = Flatten()(observation_input)
x = Concatenate()([action_input, flattened_observation])
x = Dense(126, activation='relu')(x)
x = Dense(24, activation='relu')(x)
x = Dense(1, activation='linear')(x)
critic = tensorflow.keras.models.Model(inputs=[action_input, observation_input], outputs=x)
print(critic.summary())
##############################################################
memory = SequentialMemory(limit=100000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3)
agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, nb_steps_warmup_critic=100, nb_steps_warmup_actor=100,
random_process=random_process, gamma=.90, target_model_update=1e-3)
gent.compile(Adam(lr=1e-3), metrics=['mae'])
agent.fit(env, nb_steps=5000, visualize=False, verbose=1, nb_max_episode_steps=80)
Earlier I was getting error: action not contained in action space so I used this class ActionNormalizer and its gone , but now during training process I am receiving this ValueError mentioned in headline.
Here is the complete error which I am receiving.
Training for 5000 steps ...
Interval 1 (0 steps performed)
314/10000 [..............................] - ETA: 50:41 - reward: -0.0066
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/addy/robo-gym-master/docs/examples/random_agent_sim.py in <cell line: 2>()
156 #%%
----> 157 agent.fit(env, nb_steps=5000, visualize=False, verbose=1, nb_max_episode_steps=200)
File ~/.local/lib/python3.8/site-packages/rl/core.py:181, in Agent.fit(self, env, nb_steps, action_repetition, callbacks, verbose, visualize, nb_max_start_steps, start_step_policy, log_interval, nb_max_episode_steps)
179 observation, r, done, info = self.processor.process_step(observation, r, done, info)
180 for key, value in info.items():
--> 181 if not np.isreal(value):
182 continue
183 if key not in accumulated_info:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
I am also sharing the Environments definition codes for your kind reference.
This the environment which I am using for training.
import copy
import numpy as np
import gym
from typing import Tuple
from scipy.spatial.transform import Rotation as R
from robo_gym.utils.exceptions import InvalidStateError, RobotServerError
from robo_gym.utils import utils
from robo_gym_server_modules.robot_server.grpc_msgs.python import robot_server_pb2
from robo_gym.envs.simulation_wrapper import Simulation
from robo_gym.envs.ur.ur_base_env import URBaseEnv
# base, shoulder, elbow, wrist_1, wrist_2, wrist_3
JOINT_POSITIONS = [0.0, -2.5, 1.5, -1.5, -1.4, 0.0]
RANDOM_JOINT_OFFSET = [1.5, 0.25, 0.5, 1.0, 0.4, 3.14]
# distance to target that need to be reached
DISTANCE_THRESHOLD = 0.1
class EndEffectorPositioningUR(URBaseEnv):
"""Universal Robots UR end effector positioning environment.
Args:
rs_address (str): Robot Server address. Formatted as 'ip:port'. Defaults to None.
fix_base (bool): Wether or not the base joint stays fixed or is moveable. Defaults to False.
fix_shoulder (bool): Wether or not the shoulder joint stays fixed or is moveable. Defaults to False.
fix_elbow (bool): Wether or not the elbow joint stays fixed or is moveable. Defaults to False.
fix_wrist_1 (bool): Wether or not the wrist 1 joint stays fixed or is moveable. Defaults to False.
fix_wrist_2 (bool): Wether or not the wrist 2 joint stays fixed or is moveable. Defaults to False.
fix_wrist_3 (bool): Wether or not the wrist 3 joint stays fixed or is moveable. Defaults to True.
ur_model (str): determines which ur model will be used in the environment. Default to 'ur5'.
Attributes:
ur (:obj:): Robot utilities object.
client (:obj:str): Robot Server client.
real_robot (bool): True if the environment is controlling a real robot.
"""
def __init__(self, rs_address=None, fix_base=False, fix_shoulder=False, fix_elbow=False, fix_wrist_1=False, fix_wrist_2=False, fix_wrist_3=True, ur_model='ur5', rs_state_to_info=True, restrict_wrist_1=True, **kwargs):
super().__init__(rs_address, fix_base, fix_shoulder, fix_elbow, fix_wrist_1, fix_wrist_2, fix_wrist_3, ur_model, rs_state_to_info)
self.restrict_wrist_1 = restrict_wrist_1
self.successful_ending = False
self.last_position = np.zeros(6)
def _get_observation_space(self) -> gym.spaces.Box:
"""Get environment observation space.
Returns:
gym.spaces: Gym observation space object.
"""
# Joint position range tolerance
pos_tolerance = np.full(6,0.1)
# Joint positions range used to determine if there is an error in the sensor readings
max_joint_positions = np.add(np.full(6, 1.0), pos_tolerance)
min_joint_positions = np.subtract(np.full(6, -1.0), pos_tolerance)
# Target coordinates range
target_range = np.full(3, np.inf)
# Joint velocities range
max_joint_velocities = np.array([np.inf] * 6)
min_joint_velocities = - np.array([np.inf] * 6)
# Cartesian coords of the target location
max_target_coord = np.array([np.inf] * 3)
min_target_coord = - np.array([np.inf] * 3)
# Cartesian coords of the end effector
max_ee_coord = np.array([np.inf] * 3)
min_ee_coord = - np.array([np.inf] * 3)
# Previous action
max_action = np.array([1.01] * 6)
min_action = - np.array([1.01] * 6)
# Definition of environment observation_space
max_obs = np.concatenate((target_range, max_joint_positions, max_joint_velocities, max_target_coord, max_ee_coord, max_action))
min_obs = np.concatenate((-target_range, min_joint_positions, min_joint_velocities, min_target_coord, min_ee_coord, min_action))
return gym.spaces.Box(low=min_obs, high=max_obs, dtype=np.float32)
def _set_initial_robot_server_state(self, rs_state, ee_target_pose) -> robot_server_pb2.State:
string_params = {"object_0_function": "fixed_position"}
float_params = {"object_0_x": ee_target_pose[0],
"object_0_y": ee_target_pose[1],
"object_0_z": ee_target_pose[2]}
state = {}
state_msg = robot_server_pb2.State(state = state, float_params = float_params, string_params = string_params, state_dict = rs_state)
return state_msg
def _robot_server_state_to_env_state(self, rs_state) -> np.ndarray:
"""Transform state from Robot Server to environment format.
Args:
rs_state (list): State in Robot Server format.
Returns:
numpy.array: State in environment format.
"""
# Target polar coordinates
# Transform cartesian coordinates of target to polar coordinates
# with respect to the end effector frame
target_coord = np.array([
rs_state['object_0_to_ref_translation_x'],
rs_state['object_0_to_ref_translation_y'],
rs_state['object_0_to_ref_translation_z']])
ee_to_ref_frame_translation = np.array([
rs_state['ee_to_ref_translation_x'],
rs_state['ee_to_ref_translation_y'],
rs_state['ee_to_ref_translation_z']])
ee_to_ref_frame_quaternion = np.array([
rs_state['ee_to_ref_rotation_x'],
rs_state['ee_to_ref_rotation_y'],
rs_state['ee_to_ref_rotation_z'],
rs_state['ee_to_ref_rotation_w']])
ee_to_ref_frame_rotation = R.from_quat(ee_to_ref_frame_quaternion)
ref_frame_to_ee_rotation = ee_to_ref_frame_rotation.inv()
# to invert the homogeneous transformation
# R' = R^-1
ref_frame_to_ee_quaternion = ref_frame_to_ee_rotation.as_quat()
# t' = - R^-1 * t
ref_frame_to_ee_translation = -ref_frame_to_ee_rotation.apply(ee_to_ref_frame_translation)
target_coord_ee_frame = utils.change_reference_frame(target_coord,ref_frame_to_ee_translation,ref_frame_to_ee_quaternion)
target_polar = utils.cartesian_to_polar_3d(target_coord_ee_frame)
# Joint positions
joint_positions = []
joint_positions_keys = ['base_joint_position', 'shoulder_joint_position', 'elbow_joint_position',
'wrist_1_joint_position', 'wrist_2_joint_position', 'wrist_3_joint_position']
for position in joint_positions_keys:
joint_positions.append(rs_state[position])
joint_positions = np.array(joint_positions)
# Normalize joint position values
joint_positions = self.ur.normalize_joint_values(joints=joint_positions)
# Joint Velocities
joint_velocities = []
joint_velocities_keys = ['base_joint_velocity', 'shoulder_joint_velocity', 'elbow_joint_velocity',
'wrist_1_joint_velocity', 'wrist_2_joint_velocity', 'wrist_3_joint_velocity']
for velocity in joint_velocities_keys:
joint_velocities.append(rs_state[velocity])
joint_velocities = np.array(joint_velocities)
# Compose environment state
state = np.concatenate((target_polar, joint_positions, joint_velocities, target_coord, ee_to_ref_frame_translation, self.previous_action))
return state.astype(np.float32)
def get_robot_server_composition(self) -> list:
rs_state_keys = [
'object_0_to_ref_translation_x',
'object_0_to_ref_translation_y',
'object_0_to_ref_translation_z',
'object_0_to_ref_rotation_x',
'object_0_to_ref_rotation_y',
'object_0_to_ref_rotation_z',
'object_0_to_ref_rotation_w',
'base_joint_position',
'shoulder_joint_position',
'elbow_joint_position',
'wrist_1_joint_position',
'wrist_2_joint_position',
'wrist_3_joint_position',
'base_joint_velocity',
'shoulder_joint_velocity',
'elbow_joint_velocity',
'wrist_1_joint_velocity',
'wrist_2_joint_velocity',
'wrist_3_joint_velocity',
'ee_to_ref_translation_x',
'ee_to_ref_translation_y',
'ee_to_ref_translation_z',
'ee_to_ref_rotation_x',
'ee_to_ref_rotation_y',
'ee_to_ref_rotation_z',
'ee_to_ref_rotation_w',
'in_collision'
]
return rs_state_keys
def reset(self, joint_positions = JOINT_POSITIONS, ee_target_pose = None, randomize_start=False, continue_on_success=False) -> np.ndarray:
"""Environment reset.
Args:
joint_positions (list[6] or np.array[6]): robot joint positions in radians.
ee_target_pose (list[6] or np.array[6]): [x,y,z,r,p,y] target end effector pose.
randomize_start (bool): if True the starting position is randomized defined by the RANDOM_JOINT_OFFSET
continue_on_success (bool): if True the next robot will continue from it current position when last episode was a success
"""
if joint_positions:
assert len(joint_positions) == 6
else:
joint_positions = JOINT_POSITIONS
self.elapsed_steps = 0
self.previous_action = np.zeros(6)
# Initialize environment state
state_len = self.observation_space.shape[0]
state = np.zeros(state_len)
rs_state = dict.fromkeys(self.get_robot_server_composition(), 0.0)
# Randomize initial robot joint positions
if randomize_start:
joint_positions_low = np.array(joint_positions) - np.array(RANDOM_JOINT_OFFSET)
joint_positions_high = np.array(joint_positions) + np.array(RANDOM_JOINT_OFFSET)
joint_positions = np.random.default_rng().uniform(low=joint_positions_low, high=joint_positions_high)
# Continue from last position if last episode was a success
if self.successful_ending and continue_on_success:
joint_positions = self.last_position
# Set initial robot joint positions
self._set_joint_positions(joint_positions)
# Update joint positions in rs_state
rs_state.update(self.joint_positions)
# Set target End Effector pose
if ee_target_pose:
assert len(ee_target_pose) == 6
else:
ee_target_pose = self._get_target_pose()
# Set initial state of the Robot Server
state_msg = self._set_initial_robot_server_state(rs_state, ee_target_pose)
if not self.client.set_state_msg(state_msg):
raise RobotServerError("set_state")
# Get Robot Server state
rs_state = self.client.get_state_msg().state_dict
# Check if the length and keys of the Robot Server state received is correct
self._check_rs_state_keys(rs_state)
# Convert the initial state from Robot Server format to environment format
state = self._robot_server_state_to_env_state(rs_state)
# Check if the environment state is contained in the observation space
if not self.observation_space.contains(state):
raise InvalidStateError()
# Check if current position is in the range of the initial joint positions
for joint in self.joint_positions.keys():
if not np.isclose(self.joint_positions[joint], rs_state[joint], atol=0.05):
raise InvalidStateError('Reset joint positions are not within defined range')
return state
def step(self, action) -> Tuple[np.array, float, bool, dict]:
if type(action) == list: action = np.array(action)
action = action.astype(np.float32)
state, reward, done, info = super().step(action)
self.previous_action = self.add_fixed_joints(action)
if done:
if info['final_status'] == 'success':
self.successful_ending = True
joint_positions = []
joint_positions_keys = ['base_joint_position', 'shoulder_joint_position', 'elbow_joint_position',
'wrist_1_joint_position', 'wrist_2_joint_position', 'wrist_3_joint_position']
for position in joint_positions_keys:
joint_positions.append(self.rs_state[position])
joint_positions = np.array(joint_positions)
self.last_position = joint_positions
return state, reward, done, info
def reward(self, rs_state, action) -> Tuple[float, bool, dict]:
reward = 0
done = False
info = {}
# Reward weight for reaching the goal position
g_w = 2
# Reward weight for collision (ground, table or self)
c_w = -1
# Reward weight according to the distance to the goal
d_w = -0.005
# Calculate distance to the target
target_coord = np.array([rs_state['object_0_to_ref_translation_x'], rs_state['object_0_to_ref_translation_y'], rs_state['object_0_to_ref_translation_z']])
ee_coord = np.array([rs_state['ee_to_ref_translation_x'], rs_state['ee_to_ref_translation_y'], rs_state['ee_to_ref_translation_z']])
euclidean_dist_3d = np.linalg.norm(target_coord - ee_coord)
# Reward base
reward += d_w * euclidean_dist_3d
if euclidean_dist_3d <= DISTANCE_THRESHOLD:
reward = g_w * 1
done = True
info['final_status'] = 'success'
info['target_coord'] = target_coord
if rs_state['in_collision']:
reward = c_w * 1
done = True
info['final_status'] = 'collision'
info['target_coord'] = target_coord
elif self.elapsed_steps >= self.max_episode_steps:
done = True
info['final_status'] = 'max_steps_exceeded'
info['target_coord'] = target_coord
return reward, done, info
def _get_target_pose(self) -> np.ndarray:
"""Generate target End Effector pose.
Returns:
np.array: [x,y,z,alpha,theta,gamma] pose.
"""
return self.ur.get_random_workspace_pose()
def env_action_to_rs_action(self, action) -> np.array:
"""Convert environment action to Robot Server action"""
rs_action = copy.deepcopy(action)
if self.restrict_wrist_1:
min_action = -1
max_action = 1
max_wrist1 = 0.31
min_wrist1 = -3.48
wrist1 = (((rs_action[3] - min_action) * (max_wrist1 - min_wrist1)) / (max_action- min_action)) + min_wrist1
# Scale action
rs_action = np.multiply(rs_action, self.abs_joint_pos_range)
rs_action[3] = wrist1
# Convert action indexing from ur to ros
rs_action = self.ur._ur_joint_list_to_ros_joint_list(rs_action)
else:
rs_action = super().env_action_to_rs_action(rs_action)
return rs_action
class EndEffectorPositioningURSim(EndEffectorPositioningUR, Simulation):
cmd = "roslaunch ur_robot_server ur_robot_server.launch \
world_name:=tabletop_sphere50_no_collision.world \
reference_frame:=base_link \
max_velocity_scale_factor:=0.1 \
action_cycle_rate:=10 \
rviz_gui:=false \
gazebo_gui:=true \
objects_controller:=true \
rs_mode:=1object \
n_objects:=1.0 \
object_0_model_name:=sphere50_no_collision \
object_0_frame:=target"
def __init__(self, ip=None, lower_bound_port=None, upper_bound_port=None, gui=False, ur_model='ur5', **kwargs):
self.cmd = self.cmd + ' ' + 'ur_model:=' + ur_model
Simulation.__init__(self, self.cmd, ip, lower_bound_port, upper_bound_port, gui, **kwargs)
EndEffectorPositioningUR.__init__(self, rs_address=self.robot_server_ip, ur_model=ur_model, **kwargs)
class EndEffectorPositioningURRob(EndEffectorPositioningUR):
real_robot = True
#roslaunch ur_robot_server ur_robot_server.launch ur_model:=ur5 real_robot:=true rviz_gui:=true gui:=true reference_frame:=base max_velocity_scale_factor:=0.2 action_cycle_rate:=20 objects_controller:=true rs_mode:=1object n_objects:=1.0 object_0_frame:=target
Here is how my enviroment looks and most of the times this error appears when robot end effector reaches the ground or crashes in the ground
Sorry for the long Post.
Thanks in Advance.

Pyomo parameter estimation with time-varying input signals

I want to try Pyomo for parameter estimation problems and this is what I have so far. First the parameters and variables are created. The unknows to the parameter estimation problem are p1 to p6. The time-varying inputs are TVL, mdot and TU.
model = pyo.ConcreteModel()
model.t = dae.ContinuousSet(initialize=time)
model.p1 = pyo.Var(domain=pyo.NonNegativeReals, initialize=5.993867814123688)
model.p2 = pyo.Var(domain=pyo.NonNegativeReals, initialize=0.5254928953213035)
model.p3 = pyo.Var(domain=pyo.NonNegativeReals, initialize=50.507139006670045)
model.p4 = pyo.Var(domain=pyo.NonNegativeReals, initialize=50.349545087852945)
model.p5 = pyo.Var(domain=pyo.NonNegativeReals, initialize=0.03248392142362977)
model.p6 = pyo.Var(domain=pyo.NonNegativeReals, initialize=0.10106006227941483)
model.TU = pyo.Param(model.t, default=273.15)
model.TVL = pyo.Param(model.t, default=333.15)
model.mdot = pyo.Param(model.t, default=0.01)
model.TR = pyo.Var(model.t)
model.TRL = pyo.Var(model.t)
model.TW = pyo.Var(model.t)
model.dTRdt = dae.DerivativeVar(model.TR, wrt=model.t)
model.dTRLdt = dae.DerivativeVar(model.TRL, wrt=model.t)
model.dTWdt = dae.DerivativeVar(model.TW, wrt=model.t)
model.t_meas = pyo.Set(within=model.t, initialize=time)
model.TR_meas = pyo.Param(model.t_meas, initialize=TR_dict)
The system consists of three ODEs.
def _diffeq1(model, t):
return model.dTRdt[t] == model.p1 * (model.TRL[t] - model.TR[t]) - model.p2 * (model.TR[t] - model.TW[t])
def _diffeq2(model, t):
return model.dTRLdt[t] == model.p3 * model.mdot[t] * (model.TVL[t] - model.TRL[t]) - model.p4 * (model.TRL[t] - model.TR[t])
def _diffeq3(model, t):
return model.dTWdt[t] == model.p5 * (model.TR[t] - model.TW[t]) - model.p6 * (model.TW[t] - model.TU[t])
model.diffeq1 = pyo.Constraint(model.t, rule=_diffeq1)
model.diffeq2 = pyo.Constraint(model.t, rule=_diffeq2)
model.diffeq3 = pyo.Constraint(model.t, rule=_diffeq3)
This is the objective function.
def _obj(model):
return sum((model.TR[i] - model.TR_meas[i])**2 for i in model.t_meas)
Simulation works fine according to the documentation
model.var_input = pyo.Suffix(direction=pyo.Suffix.LOCAL)
model.var_input[model.TU] = TU_dict
model.var_input[model.TVL] = TVL_dict
model.var_input[model.mdot] = mdot_dict
sim = dae.Simulator(model, package="casadi")
tsim, profiles = sim.simulate(numpoints=3600, integrator="cvodes", varying_inputs=model.var_input)
but I am having a hard time getting this to work with the optimization. Is there a recommended way to perform the optimization with time-varying inputs?
EDIT:
This is the code, I use for the optimization.
discretizer = pyo.TransformationFactory("dae.finite_difference")
discretizer.apply_to(model, wrt=model.t, nfe=200, scheme="BACKWARD")
solver = pyo.SolverFactory("ipopt")
results = solver.solve(model, tee=True)
I changed the above code from model.TU = pyo.Var(model.t) to model.TU = pyo.Param(model.t, default=273.15) (also for TVL and mdot). Otherwise ipopt tried to also find optimal trajectories for TU, TVL and mdot. With this new implementation only the default values are used for the optimization. I added the following figure showing TU to illustrate my point.

AttributeError: 'bool' object has no attribute 'ndim'

from quantopian.pipeline import Pipeline
from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import SimpleMovingAverage
from quantopian.pipeline.filters.morningstar import Q1500US
from quantopian.pipeline.factors import AnnualizedVolatility
from quantopian.pipeline.factors.morningstar import MarketCap
from quantopian.pipeline import factors, filters, classifiers
Market_Cap=(MarketCap > 1000000000)
def lowvolport():
return filters.make_us_equity_universe(
target_size=50,
rankby=factors.AnnualizedVolatility(window_length=90),
mask=Market_Cap,
)
def initialize(context):
# Schedule our rebalance function to run at the start of each week.
schedule_function(my_rebalance, date_rules.week_start(), time_rules.market_open(hours=1))
# Record variables at the end of each day.
schedule_function(my_record_vars, date_rules.every_day(), time_rules.market_close())
# Create our pipeline and attach it to our algorithm.
my_pipe = make_pipeline()
attach_pipeline(my_pipe, 'my_pipeline')
def make_pipeline():
"""
Create our pipeline.
"""
# Base universe set to the Q1500US.
base_universe = Q1500US()
Market_Cap = (MarketCap > 1000000000)
# Filter to select securities to long.
volatility_bottom = AnnualizedVolatility(inputs=[USEquityPricing.close], window_length=90, mask=base_universe)
volatility_bottom_50=volatility_bottom.bottom(50)
# Filter for all securities that we want to trade.
securities_to_trade = (Market_Cap & volatility_bottom_50)
return Pipeline(
columns={
'Market_Cap': Market_Cap
},
screen=(securities_to_trade),
)
def my_compute_weights(context):
"""
Compute ordering weights.
"""
# Compute even target weights for our long positions and short positions.
long_weight = 0.5 / len(context.longs)
short_weight = -0.5 / len(context.shorts)
return long_weight, short_weight
def before_trading_start(context, data):
# Gets our pipeline output every day.
context.output = pipeline_output('my_pipeline')
# Go long in securities for which the 'longs' value is True.
context.longs = context.output[context.output['longs']].index.tolist()
# Go short in securities for which the 'shorts' value is True.
context.shorts = context.output[context.output['shorts']].index.tolist()
context.long_weight, context.short_weight = my_compute_weights(context)
def my_rebalance(context, data):
"""
Rebalance weekly.
"""
for security in context.portfolio.positions:
if security not in context.longs and security not in context.shorts and data.can_trade(security):
order_target_percent(security, 0)
for security in context.longs:
if data.can_trade(security):
order_target_percent(security, context.long_weight)
for security in context.shorts:
if data.can_trade(security):
order_target_percent(security, context.short_weight)
def my_record_vars(context, data):
"""
Record variables at the end of each day.
"""
longs = shorts = 0
for position in context.portfolio.positions.itervalues():
if position.amount > 0:
longs += 1
elif position.amount < 0:
shorts += 1
# Record our variables.
record(leverage=context.account.leverage, long_count=longs, short_count=shorts)
Hi everyone, I'm new to python with some Matlab experience. The code is what I recently did in Quantopian. The error message is
AttributeError: 'bool' object has no attribute 'ndim'
There was a runtime error on line 27.
the 27th line is
my_pipe = make_pipeline()
The above is my first question.
My second question is that, based on the existing algorithm, how can I perform VAR model over every three months using the formula
Yt = a0 + a1Yt-1 + ..... + apYt-p + b1Xt-1 + ..... + bpXt-p + ut
with Yt being the return over 90 days and Xt-1,...,Xt-p being lags of volatility?
Thank in advance! Please let me know if any details need to be specified.
You are missing parenthesis on line 38 when initializing the MarketCap factor:
Market_Cap = (MarketCap() > 1000000000)
After that you will get a KeyError in line 69 because you haven't added 'longs' to your pipeline's output (same for 'shorts').

How can i avoid the getting this error in pyomo "Error retrieving component Pd[1]: The component has not been constructed."

I am new to pyomo. I am trying to run a simple maximization problem, but i keep getting this error message: Error retrieving component Pd[1]: The component has not been constructed. . Only the last 5 constraints give me this problem, the first three constraints work fine.
I'm using this command on the IPython console to run it !pyomo --solver-manager=neos --solver=cbc battpyomo.py battpyomo.dat
On the data file, I only define the set T and parameter p.
set T :=
1
2
3
4
5
6
7
8
9;
param: p :=
1 51.12
2 48.79
3 39.56
4 36.27
5 36.16
6 34.90
7 33.33
8 21.16
9 24.42;
Here's the code for battbyomo.py:
from pyomo.environ import *
model = AbstractModel()
########Sets#########
#Hours
model.T = Set()
#########Parameters##########
#Price
model.p = Param(model.T,within=PositiveReals)
#Variable OM cost Discharge
model.VOMd = Param(initialize=15)
#Varaible OM cost Charge
model.VOMc = Param(initialize=10)
#State of Charge min
model.SOCmin = Param(initialize=5)
#State of charge max
model.SOCmax = Param(initialize=25)
#ESS efficiency
model.m = Param(initialize=0.99)
#Max discharge rate
model.Pdmax = Param(initialize=20)
#Max charge rate
model.Pcmax = Param(initialize=20)
#Initial State of Charge
model.SOCini = Param(initialize=25)
###########Variables##########
#Power discharged
model.Pd = Var(model.T, within=NonNegativeIntegers)
#Power charged
model.Pc= Var(model.T, within=NonNegativeIntegers)
#Charging Status
model.Uc = Var(model.T, within=NonNegativeIntegers)
#Discharging status
model.Ud = Var(model.T, within=NonNegativeIntegers)
#State of Charge
model.SOC = Var(model.T, within=NonNegativeIntegers)
#######Objective##########
def profit_rule(model):
return sum(model.Pd[i]*model.p[i]-model.Pd[i]*model.VOMd-model.Pc[i]*model.p[i]-model.Pc[i]*model.VOMc for i in model.T)
model.profit = Objective(rule=profit_rule, sense = maximize)
#######Constraints##########
def status_rule(model,i):
return (model.Ud[i] + model.Uc[i] <= 1)
model.status = Constraint(model.T,rule=status_rule)
def Cmax_rule(model,i):
return model.Pc[i] <= model.Pcmax*model.Uc[i]
model.Cmax = Constraint(model.T,rule=Cmax_rule)
def Dmax_rule(model,i):
return model.Pd[i] <= model.Pdmax*model.Ud[i]
model.Dmax = Constraint(model.T,rule=Dmax_rule)
def Dlim_rule(module,i):
return model.Pd[i] <= model.SOC[i] - model.SOCmin
model.Dlim = Constraint(model.T,rule=Dlim_rule)
def Clim_rule(module,i):
return model.Pc[i] <= model.SOCmax-model.SOC[i]
model.Clim = Constraint(model.T,rule=Clim_rule)
def Smin_rule(module,i):
return model.SOC[i] >= model.SOCmin
model.Smin = Constraint(model.T,rule=Smin_rule)
def Smax_rule(module,i):
return model.SOC[i] <= model.SOCmax
model.Smax = Constraint(model.T,rule=Smax_rule)
def E_rule(module,i):
if i == 1:
return model.SOC[i] == model.SOCini + model.Pc[i]*model.m -model.Pd[i]
else:
return model.SOC[i] == model.SOC[i-1] + model.Pc[i]*model.m - model.Pd[i]
model.E = Constraint(model.T,rule=E_rule)
In some of the constraints listed above, the argument in the rule is "module", when "model" is used in the expression e.g.,
def Dlim_rule(module,i):
return model.Pd[i] <= model.SOC[i] - model.SOCmin
model.Dlim = Constraint(model.T,rule=Dlim_rule)
The definition of the rule and the constraint should be,
def Dlim_rule(model,i):
return model.Pd[i] <= model.SOC[i] - model.SOCmin
model.Dlim = Constraint(model.T,rule=Dlim_rule)
Incidentally, the first argument (for the model object) can be called anything you like. However, the name of the argument must match the use of the model within the rule. For example, this is also valid,
def Dlim_rule(m,i):
return m.Pd[i] <= m.SOC[i] - m.SOCmin
model.Dlim = Constraint(model.T,rule=Dlim_rule)
You shouldn't need the very first "import pyomo" line. The only import line you should need is the "from pyomo.environ import *". If this doesn't solve your problem then you should post the data file you're using (or a simplified version of it). Seems like the data isn't being loaded into the Pyomo model correctly.

Optimizing performance of SciPy Minimize while using Concurrent.Futures?

I'm trying to figure out how to speed up a scipy.minimize function.
minimize() is called thousands of times. I run it in parallel using a ProcessPoolExecutor. bootstrap() is the parent function.
def optimize_weights(forecasts, prices, instrument):
guess = [1/forecasts.shape[1]] * forecasts.shape[1]
bounds = [(0.0,1.0)] * forecasts.shape[1]
cons = {'type': 'eq', 'fun': lambda x: 1 - sum(x)}
def function(w, forecasts, prices, instrument):
wf = (w*forecasts).mean(axis=1)
wf = wf*10/wf.std()
wf = wf.clip(-20,20)
l = accountCurve(wf, prices, slippage=instrument.slippage, per_contract_cost=instrument.per_contract_cost)
return -l.sharpe()
result = minimize(function, guess, (forecasts, prices, instrument), bounds=bounds, method='SLSQP', tol=0.0001, constraints=cons, options={'disp': False ,'eps' : 1e0})
return result.x
def mp_optimize_weights(samples, prices, instrument):
with ProcessPoolExecutor() as executor:
return executor.map(partial(optimize_weights, prices=prices, instrument=instrument), samples)
def bootstrap(instrument, parallel_process=True):
print("Parallel Process: ", parallel_process)
forecasts = instrument.forecasts().dropna()
prices = instrument.prices().reset_index('Contract', drop=True)
prices = prices[forecasts.index]
years = list(set(prices.index.year))
years.sort()
result={}
for year in years:
sample_length = np.int(prices[:str(year)].size/10)
end_of_sample_selection_space = prices[:str(year)].tail(1).index[0] - pd.Timedelta(days=sample_length)
sample_dates = pd.to_datetime(np.random.choice(prices[:end_of_sample_selection_space].index,100))
if(sample_length > 50):
samples = [forecasts.loc[date:date+pd.Timedelta(days=sample_length)] for date in sample_dates]
if parallel_process is True:
weights = pd.DataFrame(list(mp_optimize_weights(samples, prices[:str(year)], instrument=instrument)))
else:
weights = pd.DataFrame(list(map(partial(optimize_weights, prices=prices[:str(year)], instrument=instrument), samples)))
if len(weights)<2:
print('Weights error')
break
result[year]=weights.mean()
print(year, sample_length)
output = pd.DataFrame.from_dict(result).transpose()
output.columns = forecasts.columns
pl.plot(output)
display.clear_output(wait=True)
display.display(pl.gcf())
return output
import numpy as np
import pandas as pd
class accountCurve():
def __init__(self, forecasts, prices, annual_volatility_target=0.25, multiplier = 1, per_contract_cost = 0, slippage = 0, capital=100000, costs=True):
if prices.index.names[0] == 'Contract':
prices = prices.reset_index('Contract', drop=True)
#Adjust for contract multiplier/pricing in pennies
prices = prices*multiplier/100
self.prices = prices
daily_volatility_target = annual_volatility_target/np.sqrt(252)
instrument_value_volatility = prices.diff().ewm(span=36, min_periods=36).std()
self.notional_position = (forecasts * daily_volatility_target * capital).divide(10.0 * instrument_value_volatility[forecasts.index], axis=0)
self.notional_position.dropna(inplace=True)
#Chunk trades to be at least 10% move in position (to reduce trading costs)
self.position = chunk_trades(self.notional_position)
#Round positions to integers
self.position = np.around(self.notional_position)
#self.position.mark_to_market = self.position.multiply(prices, axis=0)
self.trades = self.position.diff()
#Calculate returns
self.gross_returns = (prices.diff().shift(-1)*self.position).dropna()
if costs:
self.costs = self.trades.abs() * per_contract_cost
self.slippage = self.trades.abs() * slippage * multiplier/100
self.returns = (self.gross_returns - self.slippage - self.costs).dropna()
else:
self.returns = self.gross_returns
def sharpe(self):
return self.returns.mean()/self.returns.std()*np.sqrt(252)
def losses(self):
return [z for z in self.returns if z<0]
def sortino(self):
returns = self.returns.pct_change()
return returns.mean()/np.std(losses(returns))*np.sqrt(252)
def plot(self):
self.returns.cumsum().plot()
def chunk_trades(A):
#Take a list of notional positions and filter so that trades are only greater than 10% of notional position
last = A[0]
new = []
for x in A.iteritems():
if np.abs((x[1]-last)/last) > 0.1:
new.append(x[1])
last = x[1]
else:
new.append(last)
s = pd.Series(new, index=A.index)
return s
On my data, this takes around 45 minutes to run.
I'd like to know:
Is my approach to parallel processing correct? Should I be using threads instead of processes?
Can I reconfigure minimize to finish faster? This is bootstrapping, which is a monte-carlo based sampling method, may not require such an accurate result.
Anything else I can do to speed it up?
In an ideal world, I'd like to speed it up an order of magnitude.

Categories