tensorflow nested while-loop in python - python

def body_4(i, indices_set):
c_j = lambda j, indices_set_j : tf.less(j, len_src_sent)
j = tf.constant(0)
indices_set_j = indices_set
def body_j(j, indices_set_j):
align_middle_ = ALIGNMENT_SIZE / 2
align_start_ = 0 - j
align_end_ = len_src_sent - j
c_k = lambda k, indices_set_k: tf.less(k, len_src_sent)
k = tf.constant(1)
indices_set_k = indices_set_j
def body_k(k, indices_set_k):
indices_set_k = tf.concat([indices_set_k, tf.stack([tf.cast([i*len_src_sent+j, align_middle_ + align_start_ + k], tf.int64)])], 0)
k = tf.add(k, 1)
return k, indices_set_k
[index, indices_set_k] = tf.while_loop(c_k, body_k, loop_vars=[k, indices_set_k], shape_invariants=[k.get_shape(), tf.TensorShape([None, None])])
j = tf.add(j,1)
indices_set_j = indices_set_k
return j, indices_set_j
[index, indices_set] = tf.while_loop(c_j, body_j, loop_vars=[j, indices_set_j], shape_invariants=[j.get_shape(), tf.TensorShape([None, None])])
i = tf.add(i, 1)
return i, indices_set
c = lambda i, indices_set: tf.less(i, len_trg_sent-1)
i = tf.constant(0)
indices_set = tf.cast([0, ALIGNMENT_SIZE/2], tf.int64)
indices_set = tf.stack([indices_set])
[index, indices_set] = tf.while_loop(c, body_4, loop_vars=[i, indices_set], shape_invariants=[i.get_shape(), tf.TensorShape([None, None])])
I want to create a tensorflow graph to output a few indices_set for later use of sparsetensor. The indice element should look like [i*len_trg_sent+j,align_middle_ + align_start_ + k] where i is the index of first axis, j second axis and k third of a tensor with shape(len_trg_sent-1, len_src_sent, len_src_sent)
But the code above seems nothing but a dead loop. I am confused about the nested while loop in tensorflow and therefore would appreciate it if anyone can help me.

Related

"x" dimension is equal to "none" of binary decision variables using Python-MIP

I’m trying to optimise a model in an optimization problem with Python-MIP.
I made a binary decision variables and stored the reference in x.
Based on articles, the reference of binary decision variables is expected to have an “x” dimension for each index. Then I should be able to use it like x[1][1][1].x. But I get it none.
There are some logs of the code behaviours:
x = [[[model.add_var(var_type=BINARY) for i in V] for j in V] for k in K]
print(x[1][1][1]) # var(73)
print(type(x[1][1][1]) # <class 'mip.entities.Var'>
print(x[1][1][1].x) # none
It should be a number then I can use it in a condition of this line:
DifferentLocation = [(i, j, k) for i in V for j in V for k in K if x[k][i][j].x > 0.99]
Then I get this error:
DifferentLocation = [(i, j, k) for i in V for j in V for k in K if x[k][i][j].x > 0.99]
TypeError: '>' not supported between instances of 'NoneType' and 'float'
Please take a look at my code:
import numpy as np
import matplotlib.pyplot as plt
import ConstantData as cData
import CommonFunctions as cFunc
Data = [[ 140 , 30],
[ 0 , 60],
[ 10 , 0],
[-100, 100],
[-110, 90],
[ 30, 40]]
V = set(range(len(Data) + 2))
V2 = V - {0} - {1}
n = len(Data) + 2
K = set(range(6))
D = set(range(2))
Demand = [4., 3., 2., 2., 9., 5.]
BigConstraint = 1000
Demand1 = np.zeros((n))
Demand1[2:n] = Demand
Data1 = np.ones((n, 2))
Data1[0][0] = cData.Depot1x
Data1[1][0] = cData.Depot2x
Data1[0][1] = cData.Depot1y
Data1[1][1] = cData.Depot2y
Data1[2:n] = Data
distance = np.ones((n, n))
for i in range(n):
for j in range(n):
distance[i][j] = cFunc.Euclidean(Data1[i], Data1[j])
from itertools import product
from mip import Model, xsum, minimize, BINARY
from time import time
startTime = time()
model = Model()
x = [[[model.add_var(var_type=BINARY) for i in V] for j in V] for k in K]
y = [model.add_var() for i in V]
Sk = [model.add_var(var_type=BINARY) for k in K]
Totalcosts = xsum(distance[i][j] * x[k][i][j] * cData.CostPtud[k] for i in V for j in V for k in K) \
+ xsum(Sk[k] * cData.Rprice[k] for k in K)
model.objective = minimize(Totalcosts)
for i in V2:
model += xsum(x[k][i][j] for j in V for k in K) == 1
for h in V:
for k in K:
model += xsum(x[k][i][h] for i in V) - xsum(x[k][h][i] for i in V) == 0
for k in K:
model += xsum(Demand1[i] * x[k][i][j] for i in V for j in V) <= cData.Capacity[k]
for k in K:
model += xsum(distance[i][j] * x[k][i][j] for i in V for j in V) <= cData.MaxRout[k]
for k in K: # Return to Depot
model += xsum(x[k][i][j] for i in D for j in V2) <= 1
for k in K:
model += Sk[k] * BigConstraint >= xsum(x[k][i][j] for i in V for j in V)
model += xsum(Sk[k] for k in K) <= 3
for (i, j) in product(V2, V2):
for k in K:
if i != j and len(K) >= k:
model += y[i] - y[j] + (n * x[k][i][j]) <= n - 1
model.optimize()
solution = model.num_solutions
temp = x[1][1][1]
print("log system")
print(temp.x)
print(type(temp))
DifferentLocation = [(i, j, k) for i in V for j in V for k in K if x[k][i][j].x > 0.99]

How to reduce run time in my python code?

I am currently working on a project that requires me to run a complete python code base. For research purpose, I need to run the code as fast as possible. Yet I am fairly new to programming and have no idea how to reduce run time. So I hope someone can help me on that. Any advice would be appreciated. Here's part of my code base, which used a lot of nested for loops, so it might significantly increase run time.
def a_j(r, a, A): # the Claussius-Mossotti factor, determined by a symmetric (3 × 3) matrix such that (A_i)^T = A_i
alph = np.array([[0,0,0],[0,0,0],[0,0,0]],complex)
for i in range(3):
for j in range(3):
alph[i,j] = (r * a * A[i,j])
return alph
def W_ext(x, k, rho, alpha, A): # particle–particle interaction term
n = x.shape[0] # the number of x vextors
result = np.zeros([3*n,3*n],complex)
u = np.zeros((n, 3)) # u = x - x'
for i in range(n):
for j in range(n):
if i != j:
u[i] = x[i] - x[j]
block_result = a_j(rho[i], alpha, A) * G((u[i]), k) * a_j(rho[j], alpha, A)
for m in range(3):
for l in range(3):
result[3*i + m, 3*j + l] = block_result[m,l]
return result.imag
def A_ext(rho, a, A): # single-particle term
n = rho.shape[0]
result = np.zeros([3*n,3*n],complex)
for i in range(n):
for j in range(n):
if i == j:
block_result = a_j(rho[i], a, A).imag
for m in range(3):
for l in range(3):
result[3*i + m, 3*j + l] = block_result[m,l]
return result # (3 x 3) matrix
def P_ext(e, A, W, omega):
eT = np.matrix.getH(e)
mm1 = np.matmul(A, e)
mm2 = np.matmul(W, e)
extinction = (np.dot(eT, mm1) + np.dot(eT, mm2)) * (omega/2.0)
return extinction
#ABSORPTION
def W_abs(x, k, rho, alpha, A, chi): # particle–particle interaction term
n = x.shape[0]
result = np.zeros([3*n,3*n],complex)
u = np.zeros((n, 3))
for i in range(n):
for j in range(n):
if i != j:
u[i] = x[i] - x[j]
block_result = np.matrix.getH(a_j(rho[i], alpha, A)) * (1.0 / np.conjugate(chi)).imag * a_j(rho[i], alpha, A) * G((u[i]), k) * a_j(rho[j], alpha, A)
for m in range(3):
for l in range(3):
result[3*i + m, 3*j + l] = block_result[m,l]
return 2.0 * result.real # (3 x 3) matrix
def A_abs(rho, a, A, chi): # single-particle term
n = rho.shape[0]
result = np.zeros([3*n,3*n],complex)
for i in range(n):
for j in range(n):
if i == j:
block_result = np.matrix.getH(a_j(rho[i], a, A)) * (1.0 / np.conjugate(chi)).imag * a_j(rho[i], a, A)
for m in range(3):
for l in range(3):
result[3*i + m, 3*j + l] = block_result[m,l]
return result # (3 x 3) matrix

How to combine default input with conditional in order to perform calculations in Python

I would like to make a function that calculates something depending on the input. I have tried to make it so that if the input is not given, then it be set as 0 by default. I will either provide a and d or b and c but no crossover.
import numpy as np
data_t = np.random.uniform(2.25, 2.75, size=10)
data_a = np.random.uniform(9.5, 10.5, size=(20,10)); data_a = list(data_a)
data_b = np.random.uniform(6.5, 7.5, size=(20,10)); data_b = list(data_b)
data_c = np.random.uniform(14.5, 15.5, size=(20,10)); data_c = list(data_c)
data_d = np.random.uniform(18.5, 19.5, size=(20,10)); data_d = list(data_d)
def TEST(t, a=0, b=0, c=0, d=0):
t = np.array(t)
if a and d == 0:
m, n = np.array(a.shape)
x1 = np.array([[b[jx, ix] + t[jx] for ix, i in enumerate(range(n))] for jx, _ in enumerate(range(m))])
y1 = np.array([[c[jx, ix] + t[jx] for ix, i in enumerate(range(n))] for jx, _ in enumerate(range(m))])
elif b and c == 0:
m, n = np.array(a.shape)
x2 = np.array([[a[jx, ix] - t[jx] for ix, i in enumerate(range(n))] for jx, _ in enumerate(range(m))])
y2 = np.array([[d[jx, ix] - t[jx] for ix, i in enumerate(range(n))] for jx, _ in enumerate(range(m))])
else:
raise ValueError('Something is wrong.')
test = TEST(t=data_t, a=data_a, b=0, c=0, d=data_d)
I am obviously doing something wrong related to how Python interprets if combined with and but I have no idea how is the correct way to do it. I end up getting the ValueError I created.
How is the correct way to do what I am trying to do to?
Thank you, #Chris Doyle, for setting me straight. As he said in his comment above, I should set the conditional to be if a == 0 and d == 0. I also improved the rest by:
def TEST(t, a=0, b=0, c=0, d=0):
if a == 0 and d == 0:
x1 = np.array([[i + j for i in k] for j, k in zip(t, b)])
y1 = np.array([[i + j for i in k] for j, k in zip(t, c)])
return x1, y1
elif b == 0 and c == 0:
x2 = np.array([[i - j for i in k] for j, k in zip(t, a)])
y2 = np.array([[i - j for i in k] for j, k in zip(t, d)])
return x2, y2
else:
raise ValueError('Something is wrong.')
Everything works fine now and it is all thanks to #Chris Doyle. Thank you again.

Indexing Ops in TF

Is there a way to index an operation in tensorflow? In particular, I'm interested in indexing by the iterator variable of tf.while_loop.
More concretely, let's say I have my_ops = [op1, op2]. I would like to have:
my_ops = [...]
i = tf.constant(0)
c = lambda i: tf.less(i, 10)
b = lambda i: my_ops[i](...)
r = tf.while_loop(c, b, [i])
which unfortunately will not work, since a python array supports only integer indexing.
I believe this is not possible. However, you could instead use tf.stack to stack the operations' output tensors, and then use tf.gather to obtain the desired output.
Here you have an example:
import tensorflow as tf
def condition(i, x):
return tf.less(i, 10)
def body_1(my_ops):
def b(i, x):
stacked_results = tf.stack([op(x) for op in my_ops])
gather_idx = tf.mod(i, 2)
return [i + 1, tf.gather(stacked_results, gather_idx)]
return b
def body_2(my_ops):
def b(i, x):
nb_ops = len(my_ops)
pred_fn_pairs = [(tf.equal(tf.mod(i, nb_ops), 0), lambda: my_ops[0](x)),
(tf.equal(tf.mod(i, nb_ops), 1), lambda: my_ops[1](x))]
result = tf.case(pred_fn_pairs)
return [i + 1, result]
return b
my_ops = [lambda x: tf.Print(x + 1, [x, 1]),
lambda x: tf.Print(x + 2, [x, 2])]
i = tf.constant(0)
x = tf.constant(0)
r = tf.while_loop(condition, body_2(my_ops), [i, x]) # See the difference with body_1
with tf.Session() as sess:
i, x = sess.run(r)
print(x) # Prints 15 = 5*2 + 5*1

filling a k,k matrix with lower triangular data

I want to put the output of d2arm as a lower triangular in a k by k matrix. Any advice is welcome.
def r(k):
v = np.arange(0, k, 1)
w = abs(v - (k - 1))
return(np.repeat(v, w, axis = None))
def s(k) :
l = pd.DataFrame()
for i in np.arange(1, k):
r = pd.DataFrame(np.arange(i, k, 1))
l = l.append(r)
return(np.array(l)).T
def d2arm(d, f):
k = d.shape[1]
m1 = pd.DataFrame(r(k)).T
m2 = pd.DataFrame(s(k))
H = m2.append(m1)
a = np.array([])
for i in range(H.shape[1]):
lr = round(f(d.iloc[:, H.iloc[:, i]]), 2)
a = np.append(a, lr)
return(a)

Categories