How can I find anomalous values from following data. I am simulating a sinusoidal pattern. While I can plot the data and spot any anomalies or noise in data, but how can I do it without plotting the data. I am looking for simple approaches other than Machine learning methods.
import random
import numpy as np
import matplotlib.pyplot as plt
N = 10 # Set signal sample length
t1 = -np.pi # Simulation begins at t1
t2 = np.pi; # Simulation ends at t2
in_array = np.linspace(t1, t2, N)
print("in_array : ", in_array)
out_array = np.sin(in_array)
plt.plot(in_array, out_array, color = 'red', marker = "o") ; plt.title("numpy.sin()")
Inject random noise
noise_input = random.uniform(-.5, .5); print("Noise : ",noise_input)
in_array[random.randint(0,len(in_array)-1)] = noise_input
print(in_array)
plt.plot(in_array, out_array, color = 'red', marker = "o") ; plt.title("numpy.sin()")
Data with noise
I've thought of the following approach to your problem, since you have only some values that are anomalous in the time vector, it means that the rest of the values have a regular progression, which means that if we gather all the data points in the vector under clusters and calculate the average step for the biggest cluster (which is essentially the pool of values that represent the real deal), then we can use that average to do a triad detection, in a given threshold, over the vector and detect which of the elements are anomalous.
For this we need two functions: calculate_average_step which will calculate that average for the biggest cluster of close values, and then we need detect_anomalous_values which will yield the indexes of the anomalous values in our vector, based on that average calculated earlier.
After we detected the anomalous values, we can go ahead and replace them with an estimated value, which we can determine from our average step value and by using the adjacent points in the vector.
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def calculate_average_step(array, threshold=5):
"""
Determine the average step by doing a weighted average based on clustering of averages.
array: our array
threshold: the +/- offset for grouping clusters. Aplicable on all elements in the array.
"""
# determine all the steps
steps = []
for i in range(0, len(array) - 1):
steps.append(abs(array[i] - array[i+1]))
# determine the steps clusters
clusters = []
skip_indexes = []
cluster_index = 0
for i in range(len(steps)):
if i in skip_indexes:
continue
# determine the cluster band (based on threshold)
cluster_lower = steps[i] - (steps[i]/100) * threshold
cluster_upper = steps[i] + (steps[i]/100) * threshold
# create the new cluster
clusters.append([])
clusters[cluster_index].append(steps[i])
# try to match elements from the rest of the array
for j in range(i + 1, len(steps)):
if not (cluster_lower <= steps[j] <= cluster_upper):
continue
clusters[cluster_index].append(steps[j])
skip_indexes.append(j)
cluster_index += 1 # increment the cluster id
clusters = sorted(clusters, key=lambda x: len(x), reverse=True)
biggest_cluster = clusters[0] if len(clusters) > 0 else None
if biggest_cluster is None:
return None
return sum(biggest_cluster) / len(biggest_cluster) # return our most common average
def detect_anomalous_values(array, regular_step, threshold=5):
"""
Will scan every triad (3 points) in the array to detect anomalies.
array: the array to iterate over.
regular_step: the step around which we form the upper/lower band for filtering
treshold: +/- variation between the steps of the first and median element and median and third element.
"""
assert(len(array) >= 3) # must have at least 3 elements
anomalous_indexes = []
step_lower = regular_step - (regular_step / 100) * threshold
step_upper = regular_step + (regular_step / 100) * threshold
# detection will be forward from i (hence 3 elements must be available for the d)
for i in range(0, len(array) - 2):
a = array[i]
b = array[i+1]
c = array[i+2]
first_step = abs(a-b)
second_step = abs(b-c)
first_belonging = step_lower <= first_step <= step_upper
second_belonging = step_lower <= second_step <= step_upper
# detect that both steps are alright
if first_belonging and second_belonging:
continue # all is good here, nothing to do
# detect if the first point in the triad is bad
if not first_belonging and second_belonging:
anomalous_indexes.append(i)
# detect the last point in the triad is bad
if first_belonging and not second_belonging:
anomalous_indexes.append(i+2)
# detect the mid point in triad is bad (or everything is bad)
if not first_belonging and not second_belonging:
anomalous_indexes.append(i+1)
# we won't add here the others because they will be detected by
# the rest of the triad scans
return sorted(set(anomalous_indexes)) # return unique indexes
if __name__ == "__main__":
N = 10 # Set signal sample length
t1 = -np.pi # Simulation begins at t1
t2 = np.pi; # Simulation ends at t2
in_array = np.linspace(t1, t2, N)
# add some noise
noise_input = random.uniform(-.5, .5);
in_array[random.randint(0, len(in_array)-1)] = noise_input
noisy_out_array = np.sin(in_array)
# display noisy sin
plt.figure()
plt.plot(in_array, noisy_out_array, color = 'red', marker = "o");
plt.title("noisy numpy.sin()")
# detect anomalous values
average_step = calculate_average_step(in_array)
anomalous_indexes = detect_anomalous_values(in_array, average_step)
# replace anomalous points with an estimated value based on our calculated average
for anomalous in anomalous_indexes:
# try forward extrapolation
try:
in_array[anomalous] = in_array[anomalous-1] + average_step
# else try backwward extrapolation
except IndexError:
in_array[anomalous] = in_array[anomalous+1] - average_step
# generate sine wave
out_array = np.sin(in_array)
plt.figure()
plt.plot(in_array, out_array, color = 'green', marker = "o");
plt.title("cleaned numpy.sin()")
plt.show()
Noisy sine:
Cleaned sine:
Your problem relies in the time vector (which is of 1 dimension). You will need to apply some sort of filter on that vector.
First thing that came to mind was medfilt (median filter) from scipy and it looks something like this:
from scipy.signal import medfilt
l1 = [0, 10, 20, 30, 2, 50, 70, 15, 90, 100]
l2 = medfilt(l1)
print(l2)
the output of this will be:
[ 0. 10. 20. 20. 30. 50. 50. 70. 90. 90.]
the problem with this filter though is that if we apply some noise values to the edges of the vector like [200, 0, 10, 20, 30, 2, 50, 70, 15, 90, 100, -50] then the output would be something like [ 0. 10. 10. 20. 20. 30. 50. 50. 70. 90. 90. 0.] and obviously this is not ok for the sine plot since it will produce the same artifacts for the sine values array.
A better approach to this problem is to treat the time vector as an y output and it's index values as the x input and do a linear regression on the "time linear function", not the quotes, it just means we're faking the 2 dimensional model by applying a fake X vector. The code implies the use of scipy's linregress (linear regression) function:
from scipy.stats import linregress
l1 = [5, 0, 10, 20, 30, -20, 50, 70, 15, 90, 100]
l1_x = range(0, len(l1))
slope, intercept, r_val, p_val, std_err = linregress(l1_x, l1)
l1 = intercept + slope * l1_x
print(l1)
whose output will be:
[-10.45454545 -1.63636364 7.18181818 16. 24.81818182
33.63636364 42.45454545 51.27272727 60.09090909 68.90909091
77.72727273]
Now let's apply this to your time vector.
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import linregress
N = 20
# N = 10 # Set signal sample length
t1 = -np.pi # Simulation begins at t1
t2 = np.pi; # Simulation ends at t2
in_array = np.linspace(t1, t2, N)
# add some noise
noise_input = random.uniform(-.5, .5);
in_array[random.randint(0, len(in_array)-1)] = noise_input
# apply filter on time array
in_array_x = range(0, len(in_array))
slope, intercept, r_val, p_val, std_err = linregress(in_array_x, in_array)
in_array = intercept + slope * in_array_x
# generate sine wave
out_array = np.sin(in_array)
print("OUT ARRAY")
print(out_array)
plt.plot(in_array, out_array, color = 'red', marker = "o") ; plt.title("numpy.sin()")
plt.show()
the output will be:
the resulting signal will be an approximation of the original, as it is with any form of extrapolation/interpolation/regression filtering.
Related
I want to update python data which was originally created for a 1d array to process data. I tried different ways but still got errors. if I flatten my 2d data the data loses meaning sing it is voice data. Below is a made-up data and the function to reproduce the error.
x = np.random.normal(0,1,(40,2))
print(cpp_function(x=signal, fs=44100, pitch_range=[75, 300], trendline_quefrency_range=[0.001, 0.05]))
def cpp_function(x, fs, pitch_range, trendline_quefrency_range, smooth=False, time_smooth_len=None, quefrency_smooth_len=None):
"""
Computes cepstral peak prominence for a given signal
Parameters
-----------
x: ndarray
The audio signal
fs: integer
The sampling frequency
pitch_range: list of 2 elements
The pitch range where a peak is searched for
trendline_quefrency_range: list of 2 elements
The quefrency range for which the amplitudes will be modelled by a straight line
Returns
-----------
integer
The cepstral peak prominence of the audio signal
"""
# Cepstrum
x = np.hamming(len(x))*x
spectrum = np.fft.rfft(x)
spectrum = 20*np.log10(np.abs(spectrum))
ceps = np.fft.rfft(spectrum)
ceps = 20*np.log10(np.abs(ceps))
# Smoothing
if smooth == True:
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
ceps = smooth(ceps.T, time_smooth_len).T
ceps = smooth(ceps, quefrency_smooth_len)
# Quefrency
dt = 1/fs
freq_vector = np.fft.rfftfreq(len(x), d=dt)
df = freq_vector[1] - freq_vector[0]
quefrency_vector = np.fft.rfftfreq(2*ceps.size-2, df)
# Selecting part of cepstrum
quefrency_range = [1/pitch_range[1], 1/pitch_range[0]]
index_range = np.where((quefrency_vector >= quefrency_range[0]) & (quefrency_vector <=quefrency_range[1]))
# For trend line
index_range_tl = np.where((quefrency_vector >= trendline_quefrency_range[0]) & (quefrency_vector <=trendline_quefrency_range[1]))
# Linear regression
linear_regressor = LinearRegression()
linear_regressor.fit(quefrency_vector[index_range_tl].reshape(-1, 1), ceps[index_range_tl].reshape(-1, 1))
Y_pred = linear_regressor.predict(quefrency_vector.reshape(-1, 1))
# CPP
peak_value = np.max(ceps[index_range])
peak_index = np.argmax(ceps[index_range])
cpp = peak_value - Y_pred[index_range][peak_index][0]
return cpp
I have a existing distribution of values and I want to draw samples of size 5, but those 5 samples need to have a std of X within some tolerance. For example, I need 5 samples that have a std of 10 (even though the overall distribution is std=~32).
The example code below somewhat works, but is quite slow for large dataset. It randomly samples the distribution until it finds something close to the target std, then removes those elements so they can't be drawn again.
Is there a smarter way to do this properly and faster? It works ok for some target_std (above 6), but it isn't accurate below 6.
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(23)
# Create a distribution
d1 = np.random.normal(95, 5, 200)
d2 = np.random.normal(125, 5, 200)
d3 = np.random.normal(115, 10, 200)
d4 = np.random.normal(70, 10, 100)
d5 = np.random.normal(160, 5, 200)
d6 = np.random.normal(170, 20, 100)
dist = np.concatenate((d1, d2, d3, d4, d5, d6))
print(f"Full distribution: len={len(dist)}, mean={np.mean(dist)}, std={np.std(dist)}")
plt.hist(dist, bins=100)
plt.title("Full Distribution")
plt.show();
batch_size = 5
num_batches = math.ceil(len(dist)/batch_size)
target_std = 10
tolerance = 1
# how many samples to search
num_samples = 100
result = []
# Find samples of batch_size that are closest to target_std
for i in range(num_batches):
samples = []
idxs = np.arange(len(dist))
for j in range(num_samples):
indices = np.random.choice(idxs, size=batch_size, replace=False)
sample = dist[indices]
std = sample.std()
err = abs(std - target_std)
samples.append((sample, indices, std, err, np.mean(sample), max(sample), min(sample)))
if err <= tolerance:
# close enough, stop sampling
break
# sort by smallest err first, then take the first/best result
samples = sorted(samples, key=lambda x: x[3])
best = samples[0]
if i % 100 == 0:
pass
print(f"{i}, std={best[2]}, err={best[3]}, nsamples={num_samples}")
result.append(best)
# remove the data from our source
dist = np.delete(dist, best[1])
df_samples = pd.DataFrame(result, columns=["sample", "indices", "std", "err", "mean", "max", "min"])
df_samples["err"].plot(title="Errors (target_std - batch_std)")
batch_std = df_samples["std"].mean()
batch_err = df_samples["err"].mean()
print(f"RESULT: Target std: {target_std}, Mean batch std: {batch_std}, Mean batch err: {batch_err}")
Since your problem is not restricted to a certain distribution, I use a normally random distribution, but this should work for any distribution. However the run time will depend on the population size.
population = np.random.randn(1000)*32
std = 10.
tol = 1.
n_samples = 5
samples = list(np.random.choice(population, n_samples))
while True:
center = np.mean(samples)
dis = [abs(i-center) for i in samples]
if np.std(samples)>(std+tol):
samples.pop(dis.index(max(dis)))
elif np.std(samples)<(std-tol):
samples.pop(dis.index(min(dis)))
else:
break
samples.append(np.random.choice(population, 1)[0])
Here is how the code works.
First, draw n_samples, probably the std is not in the range you want, so we calculate the mean and absolute distance of each sample to the mean. Then if the std is larger than the desired value plus tolerance, we kick the furthest sample and draw a new one and vice versa.
Note that if this takes too much time to calculate for your data, after kicking the outlier out, you can calculate what should be the range of the next element that should be drawn in the population, instead of randomly taking one. Hopefully this works for you.
DISCLAIMER: This is not a random draw anymore, and you should be aware that the draw is biased and is not representative of the population.
Given this simulated data:
import numpy as np
from statsmodels.tsa.arima_process import ArmaProcess
from statsmodels.tsa.statespace.structural import UnobservedComponents
np.random.seed(12345)
ar = np.r_[1, 0.9]
ma = np.array([1])
arma_process = ArmaProcess(ar, ma)
X = 100 + arma_process.generate_sample(nsample=100)
y = 1.2 * X + np.random.normal(size=100)
We build a UnobservedComponents model with the first 70 points to run inferences on the last 30 points like so:
model = UnobservedComponents(y[:70], level='llevel', exog=X[:70])
f_model = model.fit()
forecaster = f_model.get_forecast(
steps=30,
exog=X[70:].reshape(-1, 1)
)
conf_int = forecaster.conf_int()
If we observe the mean for the 95% confidence interval, we get the following:
conf_int.mean(axis=0)
array([118.19789195, 122.14101161])
But when trying to get the same values through model simulations, we don't quite get the same results. Here's the script we run for the simulated boundaries:
sim_model = UnobservedComponents(np.zeros(30), level='llevel', exog=X[70:])
res = []
predicted_state = f_model.predicted_state[..., -1]
predicted_state_cov = f_model.predicted_state_cov[..., -1]
for i in range(1000):
init_state = np.random.multivariate_normal(
predicted_state,
predicted_state_cov
)
sim = sim_model.simulate(
f_model.params,
30,
initial_state=init_state)
res.append(sim.mean())
Printing the lower 2.5 and upper 97.5 percentile we get:
np.percentile(res, [2.5, 97.5])
array([119.06735028, 121.26810407])
As we use model simulations to distinguish signal from noise in data, this difference ended up being big enough to lead to contradictory conclusions. If we make for instance:
y[70:] += 1
Then according to the first technique we conclude the new y carries no signal as its mean is lower than 122.14. But the same is not true if we use the second technique: as the upper boundary is 121.2, we conclude that there's signal.
What we are trying to understand now is whether this is expected. Shouldn't the lower and upper 95% confidence interval of both techniques be equal?
I would like to have a random list where the occurence of ones is 10% and the rest of the items are zeros. The length of this list is 1000. I would like for the values to be in a random order so that there is an adjustable minimum distance between ones. So for example if I choose a value of 3, the list would look something like this:
[0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, ...]
What is the most elegant way to achieve this?
Edit. I was asked for more information and to show some effort.
This is for a study where 0 signifies one type of stimulus and 1 an other kind of stimulus and we want to have a minimum distance between stimulus type 1.
So far I have achieved this with:
trials = [0]*400
trials.extend([1]*100)
random.shuffle(trials)
#Make sure a fixed minimum number of standard runs follow each deviant
i = 0
while i < len(trials):
if trials[i] == 1:
trials[i+1:i+1] = 5*[0]
i = i + 6
else:
i = i + 1
This gives me a list of length 1000 but to me seems a little clumsy so out of curiosity I was wondering if there is a better way to do this.
You have essentially a binomial random variable. The waiting time between successes for a binomial random variable is given by the negative binomial distribution. Using this distribution, we can get a random sequence of intervals between successes for a binomial variable with the specified success rate. Then we simply add your "refractory period" to all intervals and create a binary representation.
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import nbinom
min_failures = 3 # refractory period
total_successes = 100
total_time = 1000
# create a negative binomial distribution to model the waiting times to the next success for a Bernoulli RV;
rv = nbinom(1, total_successes / float(total_time))
# get interval lengths between successes;
intervals = rv.rvs(size=total_successes)
# get event times
events = np.cumsum(intervals)
# rescale event times to fit into the total time - refractory time
total_refractory = total_successes * min_failures
remaining_time = total_time - total_refractory
events = events.astype(np.float) / np.max(events) * remaining_time
# add refractory periods
intervals = np.diff(np.r_[0, events])
intervals += min_failures
events = np.r_[0, np.cumsum(intervals[:-1])] # series starts with success
# create binary representation
binary = np.zeros((total_time), dtype=np.uint8)
binary[events.astype(np.int)] = 1
To check that the inter-event intervals match your expectations, plot a histogram:
# check that intervals match our expectations
fig, ax = plt.subplots(1,1)
ax.hist(intervals, bins=20, normed=True);
ax.set_xlabel('Interval length')
ax.set_ylabel('Normalised frequency')
xticks = ax.get_xticks()
ax.set_xticks(np.r_[xticks, min_failures])
plt.show()
My approach to this problem is to maintain a list of candidate positions from which the next position is chosen randomly. Then, the surrounding range of positions is checked to be empty. If so, this position is chosen and the whole range around it in which no future position is allowed is removed from the list of available candidates. This ensures a minimum number of loops.
It may happen (if mindist is big compared to the number of positions) that less than the required positions are returned. In this case, the function needs to be called again, like shown.
import random
def array_ones(mindist, length_array, numones):
result = [0]*length_array
candidates = range(length_array)
while sum(result) < numones and len(candidates) > 0:
# choose one position randomly from candidates
pos = candidates[random.randint(0, len(candidates)-1)]
L = pos-mindist if pos >= mindist else 0
U = pos+mindist if pos <= length_array-1-mindist else length_array-1
if sum(result[L:U+1]) == 0: # no taken positions around
result[pos] = 1
# remove all candidates around this position
no_candidates = set(range(L, U+1))
candidates = list(set(candidates).difference(no_candidates))
return result, sum(result)
def main():
numones = 5
numtests = 50
mindist = 4
while True:
arr, ones = array_ones(mindist, numtests, numones)
if ones == numones:
break
print arr
if __name__ == '__main__':
main()
The function returns the array of ones and it's number of ones. Set difference is used to remove a range of candidate positions noniteratively.
Seems that there wasn't a very simple one-line answer to this problem. I finally came up with this:
import numpy as np
def construct_list(n_zeros, n_ones, min_distance):
if min_distance > (n_zeros + n_ones) / n_ones:
raise ValueError("Minimum distance too high.")
initial_zeros = n_zeros - min_distance * n_ones
block = np.random.permutation(np.array([0]*initial_zeros + [1]*n_ones))
ones = np.where(block == 1)[0].repeat(min_distance)
#Insert min_distance number of 0s after each 1
block = np.insert(block, ones+1, 0)
return block.tolist()
This seems simpler than the other answers although Paul's answer was just a little faster with values n_zeros=900, n_ones=100, min_distance=3
I wish to generate this in python:
http://classes.yale.edu/fractals/RandFrac/Market/TradingTime/Example1/Example1.html
but I'm incredibly stuck and new to this concept. Does anybody know of a library or gist for this?
Edit:
From what I can understand is that you need to split the fractal in 2 every time. So you have to calculate the y-axis point from the line between the two middle points. Then the two sections need to be formed according to the fractal?
Not 100% sure what you are asking, but as I understood from your comments, you want to generate a realistically looking stock market curve using the recursion described in the link.
As far as I understood the description in the linked page and some of the parent pages, it works like this:
You are given a start and an end point and a number of turning points in the form (t1, v1), (t2, v2), etc., for example start=(0,0), end=(1,1), turns = [(1/4, 1/2), (3/4, 1/4)], where ti and vi are fractions between 0 and 1.
You determine the actual turning points scaled to that interval between start and end and calculate the differences between those points, i.e. how far to go from pi to reach pi+1.
You shuffle those segments to introduce some randomness; when put together, they still cover exactly the same distance, i.e. they connect the original start and end point.
Repeat by recursively calling the function for the different segments between the new points.
Here's some Python code I just put together:
from __future__ import division
from random import shuffle
def make_graph(depth, graph, start, end, turns):
# add points to graph
graph.add(start)
graph.add(end)
if depth > 0:
# unpack input values
fromtime, fromvalue = start
totime, tovalue = end
# calcualte differences between points
diffs = []
last_time, last_val = fromtime, fromvalue
for t, v in turns:
new_time = fromtime + (totime - fromtime) * t
new_val = fromvalue + (tovalue - fromvalue) * v
diffs.append((new_time - last_time, new_val - last_val))
last_time, last_val = new_time, new_val
# add 'brownian motion' by reordering the segments
shuffle(diffs)
# calculate actual intermediate points and recurse
last = start
for segment in diffs:
p = last[0] + segment[0], last[1] + segment[1]
make_graph(depth - 1, graph, last, p, turns)
last = p
make_graph(depth - 1, graph, last, end, turns)
from matplotlib import pyplot
depth = 8
graph = set()
make_graph(depth, graph, (0, 0), (1, 1), [(1/9, 2/3), (5/9, 1/3)])
pyplot.plot(*zip(*sorted(graph)))
pyplot.show()
And here some example output:
I had a similar interest and developed a python3 library to do just what you want.
pip install fractalmarkets
See https://github.com/hyperstripe50/fractal-market-analysis/blob/master/README.md
Using #tobias_k solution and pandas, we can translate and scale the normalized fractal to a time-based one.
import arrow
import pandas as pd
import time
depth = 5
# the "geometry" of fractal
turns = [
(1 / 9, 0.60),
(5 / 9, 0.30),
(8 / 9, 0.70),
]
# select start / end time
t0 = arrow.now().floor("hours")
t1 = t0.shift(days=5)
start = (pd.to_datetime(t0._datetime), 1000)
end = (pd.to_datetime(t1._datetime), 2000)
# create a non-dimensionalized [0,0]x[1,1] Fractal
_start, _end = (0, 0), (1, 1)
graph = set()
make_graph(depth, graph, _start, _end, turns)
# just check graph length
assert len(graph) == (len(turns) + 1) ** depth + 1
# create a pandas dataframe from the normalized Fractal
df = pd.DataFrame(graph)
df.sort_values(0, inplace=True)
df.reset_index(drop=True, inplace=True)
# translate to real coordinates
X = pd.DataFrame(
data=[(start[0].timestamp(), start[1]), (end[0].timestamp(), end[1])]
).T
delta = X[1] - X[0]
Y = df.mul(delta) + X[0]
Y[0] = [*map(lambda x: pd.to_datetime(x, unit="s"), Y[0])]
# now resample and interpolate data according to *grid* size
grid ="min"
Z = Y.set_index(0)
A = Z.resample(grid).mean().interpolate()
# plot both graph to check errors
import matplotlib.pyplot as plt
ax = Z.plot()
A.plot(ax=ax)
plt.show()
showing both graphs:
and zooming to see interpolation and snap-to-grid differences: