How to compare the features of all the layers - python

I want to compare the geometry of all the feature of a layer to particular feature's geometry in QGIS.
Here is my code:
class geometry_checker(base_prechecker):
def __init__(self):
self.target_layer_name = "layer_1"
def do_geom_check(self, layer, layers):
layer_name = self.get_layer_name(layer)
if layer_name == self.target_layer_name:
iter = layer.getFeatures()
for feat in iter:
geom = feat.geometry()
e = geom.type()
iter1 = layers.getFeatures()
for fea in iter1:
geom_a = fea.geometry()
f = geom.type()
if e == f:
return True
else:
return False
q = geometry_checker()
lay = iface.activeLayer()
layers = QgsMapLayerRegistry.instance().mapLayers()
print q.do_geom_check(lay)
If I run this I am getting None as outout. What I really want is if the geometry type is same it should return True else False.
Somebody pls help me

There is a built in tool that will do this for you at any license level in version 10. It's called the Feature Compare tool. This sounds exactly what you described wanting.
http://help.arcgis.com/en/arcgisdesktop/10.0/help/index.html#//001700000004000000

Related

How to implement binary mask matrix in Keras?

I'm currently working on a project and part of it is reimplementing a model written for a paper in PyTorch in Keras. The overal model classifies proteins based on three elements of their properties: sequence, interaction with other proteins, and domains in their sequence (motifs). The part I'm working on recreating currently is the Protein-Protein Interaction part. Firstly, the input vectors simply go through some fully connected layers which is easy enough to implement in keras. However, the outputs from this model are fed into a 'weight classifier model' which applies a binary mask matrix to inputs using a layer created specifically for this model using PyTorch's nn.functional API.
Here is the code I am struggling to implement in keras:
class Weight_classifier(nn.Module):
def __init__(self, func):
super(Weight_classifier, self).__init__()
# self.weight_layer = nn.Linear(OUT_nodes[func]*3, OUT_nodes[func])
self.weight_layer = MaskedLinear(OUT_nodes[func]*3, OUT_nodes[func], 'data/{}_maskmatrix.csv'.format(func)).cuda()
self.outlayer= nn.Linear(OUT_nodes[func], OUT_nodes[func])
def forward(self, weight_features):
weight_out = self.weight_layer(weight_features)
# weight_out = F.sigmoid(weight_out)
weight_out = F.relu(weight_out)
weight_out = F.sigmoid(self.outlayer(weight_out))
return weight_out
class MaskedLinear(nn.Linear):
def __init__(self, in_features, out_features, relation_file, bias=True):
super(MaskedLinear, self).__init__(in_features, out_features, bias)
mask = self.readRelationFromFile(relation_file)
self.register_buffer('mask', mask)
self.iter = 0
def forward(self, input):
masked_weight = self.weight * self.mask
return F.linear(input, masked_weight, self.bias)
def readRelationFromFile(self, relation_file):
mask = []
with open(relation_file, 'r') as f:
for line in f:
l = [int(x) for x in line.strip().split(',')]
for item in l:
assert item == 1 or item == 0 # relation 只能为0或者1
mask.append(l)
return Variable(torch.Tensor(mask))
And this is the paper I am working to, it contains several diagrams and explanations of the models if I have not explained the issue sufficiently.
Many thanks.

Function inside class is not working when called upon (python)

I am creating simulation library, of which one is using geometric geometric brownian motion (gbm). I have created a class for it, which has a function update to update variable values. Here is the code for gbm class:
import numpy as np
from sn_random_numbers_gen import sn_random_numbers
from generic_simulation_class import simulation_class
class geometric_brownian_motion(simulation_class):
#class to generate simiulated paths usinig gbm
# attriibutes: name, mar_env, corr
#methods: update(to update parameters), generate_paths
def __init__(self, name, mar_env, corr=False):
super().__init__(name, mar_env, corr)
def update(self, initial_value = None, volatility=None, final_date=None):
if initial_value is not None:
self.initial_value = initial_value
if volatility is not None:
self.volatility = volatility
if final_date is not None:
self.final_date = final_date
def generate_paths(self, fixed_seed = False, day_count = 365):
if self.time_grid is None:
self.generate_time_grid()
M = len(self.time_grid)
J = self.paths
paths = np.zeros((M,J))
paths[0] = self.initial_value
if not self.correlated:
rand = sn_random_numbers((1,M,J), fixed_seed=fixed_seed)
else:
rand = self.random_numbers
short_rate = self.discount_curve.short_rate
for t in range(1, len(self.time_grid)):
if not self.correlated:
ran = rand[t]
else:
ran = np.dot(self.cholesky_matrix, rand[:, t, :])
ran = ran[self.rn_set]
dt = (self.time_grid[t]-self.time_grid[t-1]).days/day_count
paths[t] = paths[t-1]*np.exp((short_rate-0.5*self.volatility**2)*dt + self.volatility*np.sqrt(dt)*ran)
self.instrument_values = paths
Here is a use case of the class (this use case also calls upon some other pre-defined classes by me) :
import datetime as dt
from dx_frame import *
me_gbm = market_environment('me_gbm', dt.datetime(2020,1,1))
me_gbm.add_constant('initial_value', 36)
me_gbm.add_constant('volatility', 0.1)
me_gbm.add_constant('final_date', dt.datetime(2020,12,31))
me_gbm.add_constant('currency', 'EUR')
me_gbm.add_constant('frequency', 'M')
me_gbm.add_constant('paths', 10000)
csr = constant_short_rate('csr', 0.05)
me_gbm.add_curve('discount_curve', csr)
gbm = geometric_brownian_motion('gbm', me_gbm)
gbm.generate_time_grid()
paths_1 = gbm.get_instrument_values()
gbm.update(volatility=0.5)
paths_2 = gbm.get_instrument_values()
Here update function is called to change value of variable volatility. But, the variable remains the same. As the value of both paths_1 and paths_2 is same(I have enclosed imiage of paths_1 and paths_2 for reference). Every other function works fine. Can you please help me understand the problem ?
The update function is alright, the problem is with get_instrument_values, as the paths are not none it does not update and returns the same path.
Using fixed_seed = False solves the problem

Why isinstance() doesn't work as expected?

i was following this tutorial on decision trees and I tried to recreate it on my own as python project, instead of a notebook, using spyder.
I create different py files where I put different methods and classes, specifically I create a file named tree_structure with the following classes: Leaf, DecisionNode and Question. (I hope it's correct to put more classes in a single py file)
When I tried to use isinstance() in method "classify" in another py file, I was expecting True instead it returned False:
>>>leaf0
<tree_structure.Leaf at 0x11b7c3450>
>>>leaf0.__class__
tree_structure.Leaf
>>>isinstance(leaf0,Leaf)
False
>>>isinstance(leaf0,tree_structure.Leaf)
False
leaf0 was created iteratively from "build_tree" method (I just saved it to t0 for debugging.. during execution is not saved as a variable) :
t0 = build_tree(train_data)
leaf0 = t0.false_branch.false_branch
I tried also using type(leaf0) is Leaf instead of isinstance but it still returns False.
Can someone explain me why this happen?
tree_structure.py
class Question:
def __init__(self,header, column, value):
self.column = column
self.value = value
self.header = header
def match(self, example):
# Compare the feature value in an example to the
# feature value in this question.
val = example[self.column]
if is_numeric(val):
return val >= self.value
else:
return val == self.value
def __repr__(self):
# This is just a helper method to print
# the question in a readable format.
condition = "=="
if is_numeric(self.value):
condition = ">="
return "Is %s %s %s?" % (
self.header[self.column], condition, str(self.value))
class Leaf:
def __init__(self, rows):
self.predictions = class_counts(rows)
class DecisionNode:
def __init__(self,
question,
true_branch,
false_branch):
self.question = question
self.true_branch = true_branch
self.false_branch = false_branch
classifier.py
from tree_structure import Question,Leaf,DecisionNode
def classify(row, node):
# Base case: we've reached a leaf
if isinstance(node, Leaf):
print("----")
return node.predictions
# Decide whether to follow the true-branch or the false-branch.
# Compare the feature / value stored in the node,
# to the example we're considering.
if node.question.match(row):
print("yes")
return classify(row, node.true_branch)
else:
print("no")
return classify(row, node.false_branch)
build_tree
def build_tree(rows,header):
"""Builds the tree.
Rules of recursion: 1) Believe that it works. 2) Start by checking
for the base case (no further information gain). 3) Prepare for
giant stack traces.
"""
gain, question = find_best_split(rows,header)
print("--best question is ''{}'' with information gain: {}".format(question,round(gain,2)))
# Base case: no further info gain
# Since we can ask no further questions,
# we'll return a leaf.
if isinstance(rows,pd.DataFrame):
rows = rows.values.tolist()
if gain == 0:
return Leaf(rows)
# If we reach here, we have found a useful feature / value
# to partition on.
true_rows, false_rows = partition(rows, question)
# Recursively build the true branch.
print("\n----TRUE BRANCH----")
true_branch = build_tree(true_rows,header)
# Recursively build the false branch.
print("\n----FALSE BRANCH----")
false_branch = build_tree(false_rows,header)
# Return a Question node.
# This records the best feature / value to ask at this point,
# as well as the branches to follow
# dependingo on the answer.
return DecisionNode(question, true_branch, false_branch)

Class implementation in Python

I'm trying to create a class in Python and having some issues.
First I tried inheriting a VectorSystem to do trajectory optimization and I get the error regarding it not having 'AutoDiff'
RuntimeError: The object named [] of type
drake::pydrake::(anonymous)::Impl::PyVectorSystem does not
support ToAutoDiffXd
Code:
import numpy as np
from pydrake.systems.framework import VectorSystem
from pydrake.all import (DirectCollocation, PiecewisePolynomial, Solve)
# Define the system.
class ex1(VectorSystem):
def __init__(self):
VectorSystem.__init__(self,
1, # 1 input.
2) # 2 outputs.
self.DeclareContinuousState(2) # 2 state variable.
# xdot(t) = -x(t) - y(t); ydot(t) = -y(t) - x(t) + u
def DoCalcVectorTimeDerivatives(self, context, u, x, xdot):
xdot[:] = np.array([-x[0] - x[1], -x[1] - x[0] + u[0]])#.reshape(3,1) #u[0]
# y(t) = x(t)
def DoCalcVectorOutput(self, context, u, x, y):
y[:] = x
def runDircol(self, x0, xf, tf0):
N = 11
umax = 10.
context = self.CreateDefaultContext()
dircol = DirectCollocation(self, context, num_time_samples=N,
minimum_timestep=0.1, maximum_timestep=1.0)
u = dircol.input()
dircol.AddEqualTimeIntervalsConstraints()
dircol.AddConstraintToAllKnotPoints(u[0] <= .5*umax)
dircol.AddConstraintToAllKnotPoints(u[0] >= -.5*umax)
dircol.AddBoundingBoxConstraint(x0, x0, dircol.initial_state())
dircol.AddBoundingBoxConstraint(xf, xf, dircol.final_state())
R = 10.0 # Cost on input "effort".
dircol.AddRunningCost(R*u[0]**2)
# Add a final cost equal to the total duration.
dircol.AddFinalCost(dircol.time())
initial_x_trajectory = \
PiecewisePolynomial.FirstOrderHold([0., tf0], np.column_stack((x0, xf)))
dircol.SetInitialTrajectory(PiecewisePolynomial(), initial_x_trajectory)
result = Solve(dircol)
print(result.get_solver_id().name())
print(result.get_solution_result())
assert(result.is_success())
#import pdb; pdb.set_trace()
xtraj = dircol.ReconstructStateTrajectory(result)
utraj = dircol.ReconstructInputTrajectory(result)
return utraj,xtraj
if __name__ == "__main__":
# Declare model
plant = ex1() # Default instantiation
# Trajectory optimization
x0 = (0.0,0.0) #Initial state that trajectory should start from
xf = (1.0,1.0) #Final desired state
tf0 = 0.5 # Guess for how long trajectory should take
utraj, xtraj = plant.runDircol(x0, xf, tf0)
Second, I tried to inherit from the LeafSystem and had issues due to the templates. I cannot create a context by using plant.CreateDefaultContext(). I get the error:
TypeError: unbound method CreateDefaultContext() must be called with
ex1_[float] instance as first argument (got nothing instead)
And if I use plant().CreateDefaultContext() I get weird errors afterwards like getting wrong context.num_output_ports() or not being able to call plant.ToSymbolic()
(TypeError: unbound method ToSymbolic() must be called with ex1_[float] instance as first argument (got nothing instead)) etc ...
Code:
import numpy as np
from pydrake.all import LeafSystem_
from pydrake.systems.scalar_conversion import TemplateSystem
#TemplateSystem.define("ex1_")
def ex1_(T):
class Impl(LeafSystem_[T]):
def _construct(self, converter=None):
LeafSystem_[T].__init__(self, converter)
# one inputs
self.DeclareVectorInputPort("u", BasicVector_[T](1))
# two outputs (full state)
self.DeclareVectorOutputPort("x", BasicVector_[T](2), self.CopyStateOut)
# two positions, no velocities
self.DeclareContinuousState(2, 0, 0)
def _construct_copy(self, other, converter=None):
Impl._construct(self, converter=converter)
def CopyStateOut(self, context, output):
x = context.get_continuous_state_vector().CopyToVector()
output.SetFromVector(x) # = y
def DoCalcTimeDerivatives(self, context, derivatives):
x = context.get_continuous_state_vector().CopyToVector()
u = self.EvalVectorInput(context, 0).CopyToVector()
xdot[:] = np.array([-x[0] - x[1], -x[1] - x[0] + u[0]]) #.reshape(3,1) #u[0]
derivatives.get_mutable_vector().SetFromVector(xdot)
return Impl
if __name__ == "__main__":
# Declare model
plant = ex1_[None] # Default instantiation
#context = plant.CreateDefaultContext(DubinsPlant_[None]())
context = plant().CreateDefaultContext()
import pdb; pdb.set_trace()
sym_system = plant.ToSymbolic()
Would appreciate any help on solving one of these issues.
(Running on Ubuntu 16.04)
To answer your second question, plant is not an instantiation of ex1_[None]. So plant.ToSymbolic() will not work. A workable solution would be:
if __name__ == "__main__":
# Declare model
ex1 = ex1_[None]
plant = ex1()
context = plant.CreateDefaultContext()
ad_system = plant.ToAutoDiffXd()
sym_system = plant.ToSymbolic()
To answer your first question, I've unfortunately not updated VectorSystem to support subclassed type conversion:
https://github.com/RobotLocomotion/drake/issues/10745
Let me try that out in the next few minutes.
EDIT: Ah, may be more complicated. Please see update in the issue.

Creating a List of a List in heavily repeated python functions

I'm rather new to python especially when it comes to class attributes and how they work. I've come across this problem where I have a function 'builddata' which outputs a list(Coarsegraining) of a few ints, and sends this to another function 'coarse_grain'.
Over the coarse of the script, these functions are called hundreds of times with CoarseGraining being different every time. What I want to do, is either:
a) Every time CoarseGraining reaches 'coarse_grain' it use that instance, but also saves it to a larger list, which after several repetitions of the function, will contain however many of these different CoarseGraining configurations there are, which can then be used later.
b) Define this process elsewhere, where CoarseGraining is instead sent to 2 functions, where it goes through its usual process in one, but then also is configured into this so called list of a list, which can then be used.
I should also mention, all these functions are defined within the same class 'MultiFitter'. I'd prefer method a) for simplicity reasons, but any possible solutions would be great. Below is a small excerpt of what i'm talking about.
Cheers
class MultiFitter(object):
def __init__(
self, models, mopt=None, ratio=False, fast=True, extend=False,
fitname=None, wavg_svdcut=None, **fitterargs
):
super(MultiFitter, self).__init__()
models = [models] if isinstance(models, MultiFitterModel) else models
self.models = models
self.fit = None # last fit
self.ratio = ratio
self.mopt = mopt
self.fast = fast
self.extend = extend
self.wavg_svdcut = wavg_svdcut
self.fitterargs = fitterargs
self.fitname = (
fitname if fitname is not None else
lambda x : x
)
def builddata(self, data=None, pdata=None, prior=None, mf=None):
if mf is None:
mf = self._get_mf()
mf['flatmodels'] = self.flatten_models(mf['models'])
if pdata is None:
if data is None:
raise ValueError('no data or pdata')
pdata = gvar.BufferDict()
for m in mf['flatmodels']:
M = m.builddata(data)
CoarseGraining = []
c1 = 1
c2 = 0
for i in range(1, M.shape[0]):
z = gvar.evalcorr([M[c2],M[i]])
corrValue = z[1][0]
if corrValue >= 0.7:
c1 = c1 + 1
if i == M.shape[0]-1:
CoarseGraining.append(int(c1))
else:
CoarseGraining.append(int(c1))
c2 = c2 + c1
c1 = 1
if i == M.shape[0]-1:
CoarseGraining.append(int(1))
pdata[m.datatag] = (
m.builddata(data) if m.ncg <= 1 else
MultiFitter.coarse_grain(m.builddata(data), CoarseGraining)
)
#staticmethod
def coarse_grain(G, CoarseGraining):
G = numpy.asarray(G)
D = []
counter = 0
for i, ncg in enumerate(CoarseGraining):
D.append(str(numpy.sum(G[..., counter:counter + ncg], axis=-1) / ncg))
counter = counter + ncg
D = numpy.asarray(D)
print(array, 'IS THIS IT???')
print(D ,'\n')
#return numpy.transpose([G])
return G
One way is to make coarse_grain a regular method of class MultiFitter and instantiate full_list in your class __init__. Then append to the list in your coarse_grain method.
You can then access your list of lists via self.full_list.
def __init__(...):
self.full_list = []
def coarse_grain(self, G, CoarseGraining):
G = numpy.asarray(G)
D = []
counter = 0
for i, ncg in enumerate(CoarseGraining):
D.append(str(numpy.sum(G[..., counter:counter + ncg], axis=-1) / ncg))
counter = counter + ncg
D = numpy.asarray(D)
self.full_list.append(D)
return G

Categories