Not Sure why my code not working. The code below generate error:
ValueError: Initializer for variable while_7/Variable/ is from inside a control-flow construct, such as a loop or conditional. When creating a variable inside a loop or conditional, use a lambda as the initializer.
Need a quick fix on this loop..
For each tf.while_loop, with index "i". if the minimum value of distance matrix dist[,i] <= caliper then output the first min value index; i.e. (y,i). Then set dist[y,]=[None, None, None, None].
Given the example matix "myx","myy";
The while loop should output match pair indx [[0,None],[1,5],[2,4],[3,None]]
import math
import numpy as np
import tensorflow as tf
myx=np.array([2.4,0.2,0.5,1.6])
myy=np.array([10.1,3.2,7.5,8.6,1,0.1,11,18])
Xxx=np.transpose(np.repeat(myx[:, np.newaxis], myy.size , axis=1))
Yyy=np.repeat(myy[:, np.newaxis], myx.size , axis=1)
X = tf.placeholder(tf.float64, shape=(myy.size,myx.size))
Y = tf.placeholder(tf.float64, shape=(myy.size,myx.size))
# define a caliper
calp=tf.constant(0.5,tf.float64)
with tf.device('/cpu:0'):
dist = tf.abs(tf.subtract(X,Y))
# Use an explicit shape for `i`.
i = tf.placeholder(dtype='int64', shape=[])
# Add a second unused argument to `condition()`.
def condition(i, *arg):
return i <= myx.size-1
# Add a second unused argument to `b()`.
def b(i, temp_pr, _):
tfslic = dist[0:myy.size, i]
# Drop the `axis` argument from `tf.reduce_min()`
minVal=tf.reduce_min(tfslic)
y = tf.cond(
tf.less_equal(minVal, calp),
# Reshape the output of `tf.argmin()` to be a scalar.
lambda: tf.argmin(tfslic, 0),
# Explicitly convert the false-branch value to `tf.int64`.
lambda: tf.constant(99999, dtype=tf.int64))
### Need to drop the matched one ###
#newdist=tf.stack([dist[0:y], dist[y:myx.size-1]])
### Need to save every match 2 dim
'''
:::::::::::::PROBLEM START HERE:::::::::::
For each tf.while_loop, with index "i"
if the minimum value of distance matrix dist[,i] <= caliper
then output the first min value index. i.e. (y,i)
Then set dist[y,]=[None, None, None, None]
Given the example matix "myx","myy";
The while loop should output match pair indx [[0,None],[1,5],[2,4],[3,None]]
'''
varDist=tf.Variable(temp_pr)
temp_af = tf.cond(
tf.less_equal(minVal, calp),
lambda: tf.assign(varDist[y,],[9999.,9999.,9999.,9999.]),
lambda: tf.Variable(dist))
return i+1, y, temp_af
# Add a dummy initial value for the second loop variable.
# Rename the first return value to `i_out` to avoid clashing with `i` above.
i_out, r, dist= tf.while_loop(condition, b, [i, dist, tf.constant(0, dtype=tf.int64)])
#mypair=tf.stack([i_out-1,r],0)
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
dmat = sess.run(dist, feed_dict={X:Xxx, Y: Yyy,i:0})
sess.close()
print(dmat)
Related
I start with the optimization function from scipy.
I tried to create my code by copying the Find optimal vector that minimizes function solution
I have an array that contains series in columns. I need to multiply each of them by a weight so that the sum of last row of these columns multiplied by the weights gives a given number (constraint).
The sum of the series multiplied by the weights gives a new series where I extract the max-draw-down and I want to minimize this mdd.
I wrote my code as best as I can (2 months of Python and 3 hours of scipy) and can't solve the error message on the function used to solve the problem.
Here is my code and any help would be much appreciated:
import numpy as np
from scipy.optimize import fmin_slsqp
# based on: https://stackoverflow.com/questions/41145643/find-optimal-vector-that-minimizes-function
# the number of columns (and so of weights) can vary; it should be generic, regardless the number of columns
def mdd(serie): # finding the max-draw-down of a series (put aside not to create add'l problems)
min = np.nanargmax(np.fmax.accumulate(serie) - serie)
max = np.nanargmax((serie)[:min])
return serie[np.nanargmax((serie)[:min])] - serie[min] # max-draw-down
# defining the input data
# mat is an array of 5 columns containing series of independent data
mat = np.array([[1, 0, 0, 1, 1],[2, 0, 5, 3, 4],[3, 2, 4, 3, 7],[4, 1, 3, 3.1, -6],[5, 0, 2, 5, -7],[6, -1, 4, 1, -8]]).astype('float32')
w = np.ndarray(shape=(5)).astype('float32') # 1D vector for the weights to be used for the columns multiplication
w0 = np.array([1/5, 1/5, 1/5, 1/5, 1/5]).astype('float32') # initial weights (all similar as a starting point)
fixed_value = 4.32 # as a result of constraint nb 1
# testing the operations that are going to be used in the minimization
series = np.sum(mat * w0, axis=1)
# objective:
# minimize the mdd of the series by modifying the weights (w)
def test(w, mat):
series = np.sum(mat * w, axis=1)
return mdd(series)
# constraints:
def cons1(last, w, fixed_value): # fixed_value = 4.32
# the sum of the weigths multiplied by the last value of each column must be equal to this fixed_value
return np.sum(mat[-1, :] * w) - fixed_value
def cons2(w): # the sum of the weights must be equal to 1
return np.sum(w) - 1
# solution:
# looking for the optimal set of weights (w) values that minimize the mdd with the two contraints and bounds being respected
# all w values must be between 0 and 1
result = fmin_slsqp(test, w0, f_eqcons=[cons1, cons2], bounds=[(0.0, 1.0)]*len(w), args=(mat, fixed_value, w0), full_output=True)
weights, fW, its, imode, smode = result
print(weights)
You weren't that far off the mark. The biggest problem lies in the mdd function: In case there is no draw-down, your function spits out an empty list as an intermediate result, which then can no longer cope with the argmax function.
def mdd(serie): # finding the max-draw-down of a series (put aside not to create add'l problems)
i = np.argmax(np.maximum.accumulate(serie) - serie) # end of the period
start = serie[:i]
# check if there is dd at all
if not start.any():
return 0
j = np.argmax(start) # start of period
return serie[j] - serie[i] # max-draw-down
In addition, you must make sure that the parameter list is the same for all functions involved (cost function and constraints).
# objective:
# minimize the mdd of the series by modifying the weights (w)
def test(w, mat,fixed_value):
series = mat # w
return mdd(series)
# constraints:
def cons1(w, mat, fixed_value): # fixed_value = 4.32
# the sum of the weigths multiplied by the last value of each column must be equal to this fixed_value
return mat[-1, :] # w - fixed_value
def cons2(w, mat, fixed_value): # the sum of the weights must be equal to 1
return np.sum(w) - 1
# solution:
# looking for the optimal set of weights (w) values that minimize the mdd with the two contraints and bounds being respected
# all w values must be between 0 and 1
result = fmin_slsqp(test, w0, eqcons=[cons1, cons2], bounds=[(0.0, 1.0)]*len(w), args=(mat,fixed_value), full_output=True)
One more remark: You can make the matrix-vector multiplications much leaner with the #-operator.
I want to loop over a tensor which contains a list of Int, and apply a function to each of the elements.
In the function every element will get the value from a dict of python.
I have tried the easy way with tf.map_fn, which will work on add function, such as the following code:
import tensorflow as tf
def trans_1(x):
return x+10
a = tf.constant([1, 2, 3])
b = tf.map_fn(trans_1, a)
with tf.Session() as sess:
res = sess.run(b)
print(str(res))
# output: [11 12 13]
But the following code throw the KeyError: tf.Tensor'map_8/while/TensorArrayReadV3:0' shape=() dtype=int32 exception:
import tensorflow as tf
kv_dict = {1:11, 2:12, 3:13}
def trans_2(x):
return kv_dict[x]
a = tf.constant([1, 2, 3])
b = tf.map_fn(trans_2, a)
with tf.Session() as sess:
res = sess.run(b)
print(str(res))
My tensorflow version is 1.13.1. Thanks ahead.
There is a simple way to achieve, what you are trying.
The problem is that the function passed to map_fn must have tensors as its parameters and tensor as the return value. However, your function trans_2 takes plain python int as parameter and returns another python int. That's why your code doesn't work.
However, TensorFlow provides a simple way to wrap ordinary python functions, which is tf.py_func, you can use it in your case as follows:
import tensorflow as tf
kv_dict = {1:11, 2:12, 3:13}
def trans_2(x):
return kv_dict[x]
def wrapper(x):
return tf.cast(tf.py_func(trans_2, [x], tf.int64), tf.int32)
a = tf.constant([1, 2, 3])
b = tf.map_fn(wrapper, a)
with tf.Session() as sess:
res = sess.run(b)
print(str(res))
you can see I have added a wrapper function, which expects tensor parameter and returns a tensor, that's why it can be used in map_fn. The cast is used because python by default uses 64-bit integers, whereas TensorFlow uses 32-bit integers.
You cannot use a function like that, because the parameter x is a TensorFlow tensor, not a Python value. So, in order for that to work, you would have to turn your dictionary into a tensor as well, but it's not so simple because keys in the dictionary may not be sequential.
You can instead solve this problem without mapping, but instead doing something similar to what is proposed here for NumPy. In TensorFlow, you could implement it like this:
import tensorflow as tf
def replace_by_dict(x, d):
# Get keys and values from dictionary
keys, values = zip(*d.items())
keys = tf.constant(keys, x.dtype)
values = tf.constant(values, x.dtype)
# Make a sequence for the range of values in the input
v_min = tf.reduce_min(x)
v_max = tf.reduce_max(x)
r = tf.range(v_min, v_max + 1)
r_shape = tf.shape(r)
# Mask replacements that are out of the input range
mask = (keys >= v_min) & (keys <= v_max)
keys = tf.boolean_mask(keys, mask)
values = tf.boolean_mask(values, mask)
# Replace values in the sequence with the corresponding replacements
scatter_idx = tf.expand_dims(keys, 1) - v_min
replace_mask = tf.scatter_nd(
scatter_idx, tf.ones_like(values, dtype=tf.bool), r_shape)
replace_values = tf.scatter_nd(scatter_idx, values, r_shape)
replacer = tf.where(replace_mask, replace_values, r)
# Gather the replacement value or the same value if it was not modified
return tf.gather(replacer, x - v_min)
# Test
kv_dict = {1: 11, 2: 12, 3: 13}
with tf.Graph().as_default(), tf.Session() as sess:
a = tf.constant([1, 2, 3])
print(sess.run(replace_by_dict(a, kv_dict)))
# [11, 12, 13]
This will allow you to have values in the input tensor without replacements (left as they are), and also does not require to have all the replacement values in the tensor. It should be efficient unless the minimum and maximum values in your input are very far away.
I am trying to produce a very easy example for combination of TensorArray and while_loop:
# 1000 sequence in the length of 100
matrix = tf.placeholder(tf.int32, shape=(100, 1000), name="input_matrix")
matrix_rows = tf.shape(matrix)[0]
ta = tf.TensorArray(tf.float32, size=matrix_rows)
ta = ta.unstack(matrix)
init_state = (0, ta)
condition = lambda i, _: i < n
body = lambda i, ta: (i + 1, ta.write(i,ta.read(i)*2))
# run the graph
with tf.Session() as sess:
(n, ta_final) = sess.run(tf.while_loop(condition, body, init_state),feed_dict={matrix: tf.ones(tf.float32, shape=(100,1000))})
print (ta_final.stack())
But I am getting the following error:
ValueError: Tensor("while/LoopCond:0", shape=(), dtype=bool) must be from the same graph as Tensor("Merge:0", shape=(), dtype=float32).
Anyone has on idea what is the problem?
There are several things in your code to point out. First, you don't need to unstack the matrix into the TensorArray to use it inside the loop, you can safely reference the matrix Tensor inside the body and index it using matrix[i] notation. Another issue is the different data type between your matrix (tf.int32) and the TensorArray (tf.float32), based on your code you're multiplying the matrix ints by 2 and writing the result into the array so it should be int32 as well. Finally, when you wish to read the final result of the loop, the correct operation is TensorArray.stack() which is what you need to run in your session.run call.
Here's a working example:
import numpy as np
import tensorflow as tf
# 1000 sequence in the length of 100
matrix = tf.placeholder(tf.int32, shape=(100, 1000), name="input_matrix")
matrix_rows = tf.shape(matrix)[0]
ta = tf.TensorArray(dtype=tf.int32, size=matrix_rows)
init_state = (0, ta)
condition = lambda i, _: i < matrix_rows
body = lambda i, ta: (i + 1, ta.write(i, matrix[i] * 2))
n, ta_final = tf.while_loop(condition, body, init_state)
# get the final result
ta_final_result = ta_final.stack()
# run the graph
with tf.Session() as sess:
# print the output of ta_final_result
print sess.run(ta_final_result, feed_dict={matrix: np.ones(shape=(100,1000), dtype=np.int32)})
Does Tensorflow support runtime determine the shape of Tensor?
The problem is to build a Constant tensor in runtime based on the input vector length_q. The number of columns of the target tensor is the sum of length_q. The code snippet is shown as follows, the length of length_q is fixed to 64.
T = tf.reduce_sum(length_q, 0)[0]
N = np.shape(length_q)[0]
wm = np.zeros((N, T), dtype=np.float32)
# Something inreletive.
count = 0
for i in xrange(N):
ones = np.ones(length_q[i])
wm[i][count:count+length_q[i]] = ones
count += length_q[i]
return tf.Constant(wm)
Update
I want to create a dynamic Tensor according to the input length_q. length_q is some input vector (64*1). The new tensor's shape I want to create depends on the sum of length_q because in each batch the data in length_q changes. The current code snippet is as follows:
def some_matrix(length_q):
T = tf.reduce_sum(length_q, 0)[0]
N = np.shape(length_q)[0]
wm = np.zeros((N, T), dtype=np.float32)
count = 0
return wm
def network_inference(length_q):
wm = tf.constant(some_matrix(length_q));
...
And the problem occurs probably because length_q is the placeholder and doesn't have summation operation. Are there some ways to solve this problem?
It sounds like the tf.fill() op is what you need. This op allows you to specify a shape as a tf.Tensor (i.e. a runtime value) along with a value:
def some_matrix(length_q):
T = tf.reduce_sum(length_q, 0)[0]
N = tf.shape(length_q)[0]
wm = tf.fill([T, N], 0.0)
return wm
Not clear about what you are calculating. If you need to calculate N shape, you can generate ones like this
T = tf.constant(20.0,tf.float32) # tf variable which is reduced sum , 20.0 is example float value
T = tf.cast(T,tf.int32) # columns will be integer only
N = 10 # if numpy integer- assuming np.shape giving 10
# N = length_q.getshape()[0] # if its a tensor, 'lenght_q' replace by your tensor name
wm = tf.ones([N,T],dtype=tf.float32) # N rows and T columns
sess = tf.Session()
sess.run(tf.initialize_all_variables())
sess.run(wm)
I have seen that transpose and reshape together can help but I don't know how to use.
Eg. dimshuffle(0, 'x')
What is its equivalent by using transpose and reshape? or is there a better way?
Thank you.
There are three relevant ops for implementing Theano's dimshuffle in TensorFlow:
tf.transpose() is used to permute the dimensions of a tensor. If the pattern specified in the arguments to dimshuffle is a permutation of the input tensor's dimensions (i.e. there is no 'x' or missing dimension) you can use tf.transpose() to implement dimshuffle().
tf.expand_dims() is used to add one or more size-1 dimensions to a tensor. This handles the case where 'x' is specified as part of the dimshuffle() pattern, but does not reorder the existing dimensions.
tf.squeeze() is used to remove one or more size-1 dimensions from a tensor. This handles the case where a dimension is omitted from a dimshuffle() pattern, but it does not reorder the existing dimensions.
Assuming that the input is a vector, your example (dimshuffle(0, 'x')) can be expressed using tf.expand_dims() only:
input = tf.placeholder(tf.float32, [None]) # Defines an arbitrary-sized vector.
result = tf.expand_dims(input, 1)
print result.get_shape() # ==> TensorShape([Dimension(None), Dimension(1)])
Taking a more complicated example, dimshuffle(1, 'x', 0) applied to a matrix would be:
input = tf.placeholder(tf.float32, [128, 32]) # Defines a matrix.
output = tf.expand_dims(tf.transpose(input, [1, 0]), 1)
print output.get_shape()
# ==> TensorShape([Dimension(32), Dimension(1), Dimension(128)])
I implemented dimshuffle for TensorFlow in our framework Returnn (here). The code is this:
def expand_multiple_dims(x, axes, name="expand_multiple_dims"):
"""
:param tf.Tensor x:
:param list[int]|tuple[int] axes: after completion, tf.shape(y)[axis] == 1 for axis in axes
:param str name: scope name
:return: y where we have a new broadcast axis for each axis in axes
:rtype: tf.Tensor
"""
with tf.name_scope(name):
for i in sorted(axes):
x = tf.expand_dims(x, axis=i, name="expand_axis_%i" % i)
return x
def dimshuffle(x, axes, name="dimshuffle"):
"""
Like Theanos dimshuffle.
Combines tf.transpose, tf.expand_dims and tf.squeeze.
:param tf.Tensor x:
:param list[int|str]|tuple[int|str] axes:
:param str name: scope name
:rtype: tf.Tensor
"""
with tf.name_scope(name):
assert all([i == "x" or isinstance(i, int) for i in axes])
real_axes = [i for i in axes if isinstance(i, int)]
bc_axes = [i for (i, j) in enumerate(axes) if j == "x"]
if x.get_shape().ndims is None:
x_shape = tf.shape(x)
x = tf.reshape(x, [x_shape[i] for i in range(max(real_axes) + 1)]) # will have static ndims
assert x.get_shape().ndims is not None
# First squeeze missing axes.
i = 0
while i < x.get_shape().ndims:
if i not in real_axes:
x = tf.squeeze(x, axis=i)
real_axes = [(j if (j < i) else (j - 1)) for j in real_axes]
else:
i += 1
# Now permute.
assert list(sorted(real_axes)) == list(range(x.get_shape().ndims))
if real_axes != list(range(x.get_shape().ndims)):
x = tf.transpose(x, real_axes)
# Now add broadcast dimensions.
if bc_axes:
x = expand_multiple_dims(x, bc_axes)
assert len(axes) == x.get_shape().ndims
return x
If tensorflow is your backend
from keras import baskend as K
K.permute_dimension should do
tf.transpose is probably what you are looking for. it takes an arbitrary permutation.