Related
For school i have to make an assignment -->
"The city of Amsterdam wants to store the maximum values of the past few years for research
purposes. It is important that the current maximum measured value can be accessed very quickly.
One idea to fulfill this requirement is to use a priority queue. Your job is to implement a priority
queue with a maximum heap and return again a tuple of the current maximal measurement and
its corresponding date when the maximum occurred. Output: date,covid level"
The program takes as input:
(string)’yyyy-mm-dd’, (int)sensor id, (int)covid level.
The expected output is: yyyy-mm-dd,covid level.
Input: 2022−09−08, 23, 371; 2022−09−08, 2, 3171; 2022−09−08, 12, 43; 2021−03−21, 4, 129
Output: 2022 −09 −08, 3171
I have provided my code below. When creating a max heap, the max element should be the first element (root). A Max-Heap is a complete binary tree in which the value in each internal node is greater than or equal to the values in the children of that node, though when inserting the tuples the nodes do not get sorted. My output is very strange, i do not understand where it comes from. When putting in the above input, this is my output:
1.1.1977, 9223372036854775807
could somebody help me? what piece of code am i missing, i have gone over it so many times.
import sys
class MaxHeap:
def __init__(self, maxsize):
self.maxsize = maxsize
self.size = 0
self.Heap = [0] * (self.maxsize + 1)
self.Heap[0] = ('1.1.1977', sys.maxsize)
self.FRONT = 1
# Function to return the position of
# parent for the node currently
# at pos
def parent(self, pos):
return pos // 2
# Function to return the position of
# the left child for the node currently
# at pos
def leftChild(self, pos):
return 2 * pos
# Function to return the position of
# the right child for the node currently
# at pos
def rightChild(self, pos):
return (2 * pos) + 1
# Function that returns true if the passed
# node is a leaf node
def isLeaf(self, pos):
if pos >= (self.size // 2) and pos <= self.size:
return True
return False
# Function to swap two nodes of the heap
def swap(self, fpos, spos):
self.Heap[fpos], self.Heap[spos] = (self.Heap[spos],
self.Heap[fpos])
# Function to heapify the node at pos
def maxHeapify(self, pos):
if not self.isLeaf(pos):
if (self.Heap[pos] < self.Heap[self.leftChild(pos)] or
self.Heap[pos] < self.Heap[self.rightChild(pos)]):
if (self.Heap[self.leftChild(pos)] >
self.Heap[self.rightChild(pos)]):
self.swap(pos, self.leftChild(pos))
self.maxHeapify(self.leftChild(pos))
else:
self.swap(pos, self.rightChild(pos))
self.maxHeapify(self.rightChild(pos))
# Function to insert a node into the heap
def insert(self, element):
if self.size >= self.maxsize:
return
self.size += 1
self.Heap[self.size] = element
current = self.size
while (self.Heap[current] >
self.Heap[self.parent(current)]):
self.swap(current, self.parent(current))
current = self.parent(current)
# Function to print the contents of the heap
def Print(self):
for i in range(1, (self.size // 2) + 1):
print(i)
print("PARENT : " + str(self.Heap[i]) +
"LEFT CHILD : " + str(self.Heap[2 * i]) +
"RIGHT CHILD : " + str(self.Heap[2 * i + 1]))
# Function to remove and return the maximum
# element from the heap
def extractMax(self):
extraction = self.Heap[self.FRONT]
self.Heap[self.FRONT] = self.Heap[self.size]
self.size -= 1
self.maxHeapify(self.FRONT)
return extraction
# Driver Code
if __name__ == "__main__":
input = input()
input = input.split(";")
dates = []
values = []
for d in input:
date = d.split(',', 2)
dates.append(date[0])
values.append(date[2])
values = [int(x) for x in values]
tuples = list(zip(dates, values))
heap = MaxHeap(len(tuples) + 1)
# print(tuples)
for t in tuples:
heap.insert(t)
print(t)
print(heap.extractMax())
I implemented the algortihm of Moschopoulos that gives the density and c.d.f of a sum of gamma random variables. A C++ implementation exists in the dcoga R package, but i need mine to handle arbitrary precision numbers through the mpmath library.
The major problem with the following code is the runtime: some parameters of the class (the _delta slot) need to be updated and re-computed 'on-the-fly' when needed, and it takes a lot of time. I ran a cProfile on a simple exemple so you can see where is the problem quickly, but i dont know enough to make it faster. See for yourself by running the follwoing :
import mpmath as mp
import numpy as np
import scipy.stats.distributions as sc_dist
def gamma_density(x,a,b):
return mp.power(x,a-1) * mp.exp(-x/b) / mp.power(b,a) / mp.gamma(a)
def gamma_cdf(x,a,b):
return mp.gammainc(a,0,x/b,regularized=True)
class GammaConvolution:
def __init__(self,alpha,beta):
#alpha and beta must be provided as numpy array of mpmath.mpf objects
n = len(alpha)
if not len(beta) == n:
raise ValueError('you should provide as much alphas and betas')
if n == 1:
raise ValueError('you should provide at least 2 gammas.')
if not (type(alpha[0]) == mp.mpf and type(beta[0]) == mp.mpf):
raise ValueError('you should provide alpha and beta in mp.mpf format.')
alpha = np.array(alpha)
beta = np.array(beta)
# sanity check :
check = alpha>0
if not np.all(check):
alpha = alpha[check]
beta = beta[check]
print('Some alphas were negatives. We discarded them. {} are remaining'.format(len(alpha)))
self.signs = np.array([-1 if b < 0 else 1 for b in beta])
self.alpha = alpha
self.beta = 1/beta * self.signs
self.n = self.alpha.shape[0]
# Moshopoulos parameters :
self._beta1 = np.min(self.beta)
self._c = np.prod([mp.power(self._beta1/self.beta[i], self.alpha[i]) for i in range(self.n)])
self._rho = np.sum(self.alpha)
self._delta = [mp.mpf('1')]
self._lgam_mod = [np.sum([self.alpha[i] * (1 - (self._beta1 / self.beta[i])) for i in range(self.n)])] # this correpsont o get_lgam(k=1)"
self._to_power = [1 - self._beta1/self.beta[i] for i in range(self.n)]
def _get_delta(self,k):
if(len(self._delta)<=k):
# Then we create it :
n = len(self._delta)
self._lgam_mod.extend([np.sum([self.alpha[i] * mp.power(self._to_power[i], j + 1) for i in range(self.n)])for j in range(n,k+1)])
self._delta.extend([np.sum([self._lgam_mod[i] * self._delta[j - 1 - i] for i in range(j)])/j for j in range(n, k+1)])
return self._delta[k]
def coga(self, x, type='pdf'):
if x < 0:
return 0
k = 0
out = 0
if type=='pdf':
func = gamma_density
if type=='cdf':
func = gamma_cdf
while True:
step = self._get_delta(k) * func(x, self._rho + k, self._beta1)
if mp.isinf(step) or mp.isnan(step):
print('inf or nan happened, the algorithm did not converge')
break
out += step
if mp.almosteq(step, 0):
break
k += 1
out *= self._c
return out
def pdf(self,x):
return self.coga(x, 'pdf')
def cdf(self,x):
return self.coga(x, 'cdf')
if __name__ == "__main__":
mp.mp.dps = 20
# some particular exemples values that 'approximates' a lognormal.
alpha = np.array([mp.mpf(28.51334751960197301778147509487793953959733053134799171090326516406790428180220147416519532643017308),
mp.mpf(11.22775884868121894986129015315963173419663023710308189240288960305130268927466536233373438048018254),
mp.mpf(6.031218085515218207945488717293490366342446718306869797877975835997607997369075273734516467130527887),
mp.mpf(3.566976340452999300401949508136750700482567798832918933344685923750679570986611068640936818600783319),
mp.mpf(2.11321149019108276673514744052403419069919543373601000373799419309581402519772983291547041971629247),
mp.mpf(1.13846760415283260768713745745968197587694610126298554688258480795156541979045502458925706173497129),
mp.mpf(0.4517330810577715647869261976064157403882011767087065171431053299996540080549312203533542184738086012),
mp.mpf(0.07749235677493576352946436194914173772169589371740264101530548860132559560092370430079007024964728383),
mp.mpf(0.002501284133093294545540492059111705453529784044424054717786717782803430937621102255478670439562804153),
mp.mpf(0.000006144939533164067887819376779035687994761732668244591993428755735056093784306786937652351425833352728)])
beta = np.array([mp.mpf(391.6072818187915081052155152400534191999174250784251976117131780922742055385769343508047998043722828),
mp.mpf(77.21898445771279675063405017644417196454232681648725486524482168571564310062495549360709158314560647),
mp.mpf(31.76440960907061013049029007869346161467203121003911885547576503605957915202107379956314233702891246),
mp.mpf(17.44293394293412500742344752991577287098138329678954573112349659319428017788092621396034528552305827),
mp.mpf(11.23444737858955404891602233256282644042451542694693191750203254839756772074776087387688524524329672),
mp.mpf(8.050341288822160015292746577166226701992193848793662515696372301333563919247892899484012106343185691),
mp.mpf(6.255867387720061672816524328464895459610937299629691008594802004659480037331191722532939009895028284),
mp.mpf(5.146993307537222489735861088512006440481952536952402160831342266591666243011939270854579656683779017),
mp.mpf(4.285958039399903253267350243950743396496148339434605882255896571795493305652009075308916145669650666),
mp.mpf(3.455673251219567018227405844933725014914508519853860295284610094904914286228770061300477809395436033)])
dist = GammaConvolution(alpha, beta)
print(sc_dist.lognorm(s=0.25).cdf(1))
import cProfile
pr = cProfile.Profile()
pr.enable()
print(dist.cdf(1))
print(sc_dist.lognorm(s=0.25).cdf(1))
pr.disable()
# after your program ends
import pstats
pstats.Stats(pr).strip_dirs().sort_stats('cumtime').print_stats(20)
Can you help me making it faster ? The problem is clearly if the _get_delta function.
I made a genetic algorithm that writes the target phrase, but i'm feeling that it takes too long on each iteration, so I'd like to know if you have any idea on how to speed it up.
Thanks
from random import randint, random
from time import time
def rescale(X,A,B,C,D,force_float=False):
retval = ((float(X - A) / (B - A)) * (D - C)) + C
if not force_float and all(map(lambda x: type(x) == int, [X,A,B,C,D])):
return int(round(retval))
return retval
Here i create a list of random characters that form the phrase
class DNA:
def __init__(self,num):
self.genes=[]
for i in range(num):
self.genes.append((chr(randint(32,126))))
Transform the genes in a string
def getPhrase(self):
return(''.join(self.genes))
Calculating the fitness(similarity to the target)
def fitness(self,target):
score=0
for i in range(len(self.genes)):
if (self.genes[i] == target[i]):
score+=1
return score*score/(len(target)*len(target))
Mixing two genes (getting some character from one and some from another)
def crossover(self,partner):
child = DNA(len(self.genes))
midpoint = randint(0,len(self.genes))
for i in range(len(self.genes)):
if (i > midpoint):
child.genes[i] = self.genes[i]
else:
child.genes[i] = partner.genes[i]
return child
Mutating a gene(adding some random characters)
def mutate(self,mutationRate):
for i in range(len(self.genes)):
if (random() < mutationRate):
self.genes[i] = chr(randint(32,126))
A list of DNA objects
class Population:
def __init__(self,target,mutationRate,num):
self.population=[]
for i in range(num):
self.population.append(DNA(len(target)))
self.mutationRate=mutationRate
self.calcFitness(target)
self.generations=0
Making a list with the fitness of each gene
def calcFitness(self,target):
self.fitness=[]
for i in range(len(self.population)):
self.fitness.append(self.population[i].fitness(target))
Making a list to the crossover function, with entries proportional to the fitness
def naturalSelection(self):
global index, x
self.matingPool=[]
self.maxFitness = 0
for i in range(len(self.population)):
if (self.fitness[i] > self.maxFitness):
self.maxFitness = self.fitness[i]
index=i
print(DNA.getPhrase(population.population[index]),' generation: ',self.generations)
if (DNA.getPhrase(population.population[index]))==target:
x=False
for i in range(len(self.population)):
fitness = rescale(self.fitness[i],0,float(self.maxFitness),0,1)
n = (fitness * 100)
for j in range(int(n)):
self.matingPool.append(population.population[i])
Updating the population
def generate(self):
for i in range(len(self.population)):
a = randint(0,len(self.matingPool)-1)
b = randint(0,len(self.matingPool)-1)
partnerA = self.matingPool[a]
partnerB = self.matingPool[b]
child = partnerA.crossover(partnerB)
child.mutate(self.mutationRate)
population.population[i] = child
self.generations+=1
start=time()
target= input("Target: ")
population = Population(target, 0.05,300)
x=True
while x==True:
population.naturalSelection()
population.generate()
population.calcFitness(target)
end=time()
print(end-start)
I have two Python classes: Agent and Group...
Each Group has a centerGroup property, plus a static list of groups, i.e. GroupList
Here is a brief overview of the Group class:
import Agent
class Group(object):
"""description of class"""
GroupIdentifier = 1
GroupThreshold = 10
GroupList = []
def __init__(self, agentList = None ,groupCenter = None, gruopIdentifier = None):
global GroupIdentifier
global GroupList
self.groupIdentifier = GroupIdentifier
Group.GroupIdentifier += 1
Group.GroupList.append(self)
self.groupCenter = groupCenter
self.agentList = agentList
Furthermore, within the Agent class, I am going to find the minimum euclidean distance of a typical agent from all centerGroup properties corresponding to the groups in the groupList... (There is an offset, is which GAMMA_TRESHOLD)...
One can depict the related part of Agent class, as below snippet:
import Group
class Agent(object):
"""description of class"""
GAMMA_TRESHOLD = 20
def __init__(self, point = None, groupId = None):
self.locationX = point.x
self.locationY = point.y
self.groupId = 0
def get_agent_distance_from_groupCenter(self, object):
return math.sqrt(math.pow(self.locationX - point.x, 2) +
math.pow(self.locationY - point.y, 2))
def gamma_condition(self):
#I KNOW THIS IMPLEMENTATION IS WRONG... JUST GOTTA SHOW THE TARGET!
return Group.Group.GroupList[Group.Group.GroupList.index(min(get_agent_distance_from_groupCenter(agent, group.groupCenter) - GAMMA_TRESHOLD))]
From a mathematical manner perspective, the problem is minimizing the below norm and introducing the group, which its centerGroup is nearest to the agent:
min \norm{centerGroup_{i} - agent - TRESHOLD}
Would you please helping me to write such query (valid processing for gamma_condition method) by list comprehension of Python?!
All in all, with due attention to lack of any better idea from the other people, my investigations lead to below solution for this problem:
def gamma_condition(self):
temp = []
maxValue = 0
temp = [[item.groupIdentifier, JOIN_TRESHOLD - self.get_agent_distance_from_groupCenter(item.groupCenter)] for item in Group.Group.GroupList]
for item in temp:
maxValue = max(float(i) for i in item[1])
if maxValue > 0:
index = temp.index(maxValue)
NearestGroupIdToJoin = temp[index][0]
return NearestGroupIdToJoin
else:
return None
I'm making a program that will go through at least 1,016,064 gear permutations on Diablo 3. I've been experimenting with different theoretical implementations and I decided that I wanted to use classes to represent each permutation rather than having to deal with massive and convoluted dictionaries. With this method I can store an instance and then replace it when a new permutation is superior to the former.
In any case it takes my computer (i7-3632QM) about 40 seconds go through all of the permutations just doing about 30 flops per permutation, and I cant even imagine how long it'll take if it has to define all 50 methods each time a class is instantiated. Anyway, this is what I think it'll look like:
class perm:
def __init__(self, *args):
self.x = 10
self.y = 5
self.z = 100
for item in args:
if hasattr(self, item):
getattr(self, item)()
self.val_1 = self.x * 2
self.val_2 = self.y * 5
self.val_3 = self.z/(self.z+300)
def head_1(self):
self.x += 5
self.z + 200
def head_2(self):
self.x += 10
self.y += 10
def feet_1(self):
self.y += 5
self.z += 250
def feet_2(self):
self.x += 10
self.z += 500
current_best = perm('head_1','feet_2')
It seems like the correct way to do this is to make objects for each of the gear options you have, then a function that calculates them all.
import itertools
class Gear(object):
def __init__(self, *args, **kwargs):
# I have no idea what Gear should do...
class Headpiece(Gear):
...
class Legs(Gear):
...
# etc
def calculate_perm(gear_tuple):
result = do_some_calculation_over(gear_tuple)
return result
best = max(itertools.permutations(all_your_gear), key=calculate_perm)
You could even create one class that's analogous to your perm, though I'd give it a more descriptive name:
class EquipmentSet(object):
slots = ['head', 'legs', ... ]
def __init__(self, head=None, legs=None, ...)
self.head = head
self.legs = legs
...
self.equipment = [getattr(self, item) for item in self.slots]
#property
def x(self)
return sum(item.x for item in self.equipment)
# similar for y and z
#property
def val_1(self):
return self.x * 2
# similar for val_2, val_3
# implement dunder rich comparison methods?
result = max(EquipmentSet(*gearset) for \
gearset in itertools.permutations(all_your_gear))
Strings are just as a example. These lists should contain Gear class, which instances knows what type of 'bonuses' gear gives.
import itertools
headpieces = ['headpiece1', 'headpiece2', 'headpiece3']
armors = ['armor1', 'armor2']
weapons = ['weapon1', 'weapon2']
print list(itertools.product(headpieces, armors, weapons))
# result:
[('headpiece1', 'armor1', 'weapon1'),
('headpiece1', 'armor1', 'weapon2'),
('headpiece1', 'armor2', 'weapon1'),
('headpiece1', 'armor2', 'weapon2'),
('headpiece2', 'armor1', 'weapon1'),
('headpiece2', 'armor1', 'weapon2'),
('headpiece2', 'armor2', 'weapon1'),
('headpiece2', 'armor2', 'weapon2'),
('headpiece3', 'armor1', 'weapon1'),
('headpiece3', 'armor1', 'weapon2'),
('headpiece3', 'armor2', 'weapon1'),
('headpiece3', 'armor2', 'weapon2')]
This code gives you all possible gears in lazy way (without passing it to list() it returns generator), is optimized (itertools are implemented in C) as is elegant. Note that in each element there is only one headpiece / weapon / armor. May be generalized to additional piece of gears.
After that you'll just have to write some kind of aggregator which takes input gear and returns 'score'.
Well I decided to use itertools, a module I have no experience with (but that will change after this!), and I've already half made the script making a test. It works so I might as well finish it even if it isn't the most efficient way, although I'm open to suggestions...
import time, itertools
class Barb:
def __init__(_, args):
_.elements = ['arcane','cold','fire','lightning','poison','physical']
_.strength = 5460 # max ancient str
_.vitality = 140
_.armor = 10188
_.all_res = 250
_.resistances = {element:7.7 for element in _.elements}
_.dodge = 0
_.armor_bonus_percent = .25
_.all_res_bonus_percent = 0
_.life_bonus_percent = .25
_.elemental_damage_reduction = 1
_.regen = 10730
_.life_on_hit = 8035
_.life_per_fury_spent = 0
_.life_percent_per_second_regen = 0
_.damage_mod = 1
_.cc = .05
_.cd = 2.8
_.ias = .25
_.attacks_per_second = 1.69
_.ww_damage_percent = 0
_.dibs = 0
_.cdr = 1
_.elemental_damage_bonus = .2
_.bastions = False
# apply gear bonuses
for arg in args:
getattr(_, arg)()
def helm_1(_):
_.cc += .06
_.ww_damage_percent += .15
_.life_bonus_percent += .23
_.resistances['arcane'] += 210
def helm_2(_):
_.cc += .06
_.vitality += 1000
_.life_bonus_percent += .23
_.resistances['arcane'] += 210
def torso_1(_):
_.vitality += 650
_.life_bonus_percent += .15
_.resistances['fire'] += 210
def torso_2(_):
_.all_res += 120
_.vitality += 650
def pants_1(_):
_.vitality += 650
_.armor += 700
_.resistances['physical'] += 210
def pants_2(_):
_.vitality += 650
_.all_res += 120
def bastions_1(_):#ring set
_.strength += 1000
_.cc += .12
_.cd += 1
_.resistances['physical'] += 210
_.resistances['poison'] += 210
_.bastions = True
def bastions_2(_):
_.strength += 500
_.cc += .12
_.cd += 1
_.cdr *= .92
_.resistances['physical'] += 210
_.resistances['poison'] += 210
_.bastions = True
def bk_1(_): # (str, dmg, cdr) + (str, cdr, vit)
_.strength += 2000
_.damage_mod *= 1.05
_.cdr *= .9 * .9
_.vitality += 1000
def bk_2(_): # (str, dmg, cdr) + (str, dmg, loh)
_.strength += 2000
_.damage_mod *= 1.1
_.cdr *= .9
_.life_on_hit += 18000
def best_score():
def available_items(prefix):
#automagically check barb for possible item variants of the item slot 'prefix'
# so more can be added at a later time
r = []
i = 1
while True:
name = '%s_%s'%(prefix, i)
if hasattr(Barb, name):
r.append(name)
else: return r
i += 1
gear_slots = [
'helm','torso','pants','bastions','bk']
helms, torso, pants, bastions, bk = [available_items(i) for i in gear_slots]
gears = itertools.product(helms, torso, pants, bastions, bk)
bestOffense = {'gear':[],
'health':0,
'mitigation':0,
'damage':0}
elapsed = time.time()
while True:
try:
args = next(gears)
barb = Barb(args)
armor = barb.armor * (1 + barb.armor_bonus_percent)
damage_reduction = armor / (armor + 3500)
resistances = {res:(barb.resistances[res] + barb.all_res) \
* (1 + barb.all_res_bonus_percent) for \
res in barb.resistances}
elemental_dr = {res:resistances[res]/(resistances[res] + 350) \
for res in resistances}
health = barb.vitality * 100 * (1 + barb.life_bonus_percent)
aps = barb.attacks_per_second * (1 + barb.ias)
damage_mod = barb.damage_mod * (1 + (barb.strength / 100))
damage_mod *= (1 - barb.cc) + (barb.cc * barb.cd)
damage_mod *= 2.25 if barb.bastions else 1
damage_mod *= 1 + barb.elemental_damage_bonus
dust_devils = 25 * damage_mod * (1 + barb.dibs + barb.ww_damage_percent)
min_elemental_dr = elemental_dr[min(elemental_dr)]
mitigation = 1 - ((1-damage_reduction) * (1-min_elemental_dr))
if dust_devils > bestOffense['damage']:
bestOffense = {'gear':args,
'health':health,
'mitigation':mitigation,
'damage':dust_devils}
except: return bestOffense, time.time() - elapsed
Python static methods will stop the interpreter making a new function in memory for every instance of a class. You can only use it for functions that don't need an instance to operate on though, i.e. functions that don't use self.