OpenCV Color Matching - python

I'm trying to follow a python function from here to apply Color Matching in OpenCV.
This is the python function (without the mask option):
#!/usr/bin/env python
import cv2 # Import the OpenCV library
import numpy as np # Import Numpy library
import matplotlib.pyplot as plt # Import matplotlib functionality
import sys # Enables the passing of arguments
# Define the file name of the images
SOURCE_IMAGE = "aspens_in_fall.jpg"
REFERENCE_IMAGE = "forest_resized.jpg"
MASK_IMAGE = "mask.jpg"
OUTPUT_IMAGE = "aspens_in_fall_forest_output"
OUTPUT_MASKED_IMAGE = "aspens_in_fall_forest_output_masked.jpg"
def calculate_cdf(histogram):
"""
This method calculates the cumulative distribution function
:param array histogram: The values of the histogram
:return: normalized_cdf: The normalized cumulative distribution function
:rtype: array
"""
# Get the cumulative sum of the elements
cdf = histogram.cumsum()
# Normalize the cdf
normalized_cdf = cdf / float(cdf.max())
return normalized_cdf
def calculate_lookup(src_cdf, ref_cdf):
"""
This method creates the lookup table
:param array src_cdf: The cdf for the source image
:param array ref_cdf: The cdf for the reference image
:return: lookup_table: The lookup table
:rtype: array
"""
lookup_table = np.zeros(256)
lookup_val = 0
for src_pixel_val in range(len(src_cdf)):
lookup_val
for ref_pixel_val in range(len(ref_cdf)):
if ref_cdf[ref_pixel_val] >= src_cdf[src_pixel_val]:
lookup_val = ref_pixel_val
break
lookup_table[src_pixel_val] = lookup_val
return lookup_table
def match_histograms(src_image, ref_image):
"""
This method matches the source image histogram to the
reference signal
:param image src_image: The original source image
:param image ref_image: The reference image
:return: image_after_matching
:rtype: image (array)
"""
# Split the images into the different color channels
# b means blue, g means green and r means red
src_b, src_g, src_r = cv2.split(src_image)
ref_b, ref_g, ref_r = cv2.split(ref_image)
# Compute the b, g, and r histograms separately
# The flatten() Numpy method returns a copy of the array c
# collapsed into one dimension.
src_hist_blue, bin_0 = np.histogram(src_b.flatten(), 256, [0,256])
src_hist_green, bin_1 = np.histogram(src_g.flatten(), 256, [0,256])
src_hist_red, bin_2 = np.histogram(src_r.flatten(), 256, [0,256])
ref_hist_blue, bin_3 = np.histogram(ref_b.flatten(), 256, [0,256])
ref_hist_green, bin_4 = np.histogram(ref_g.flatten(), 256, [0,256])
ref_hist_red, bin_5 = np.histogram(ref_r.flatten(), 256, [0,256])
# Compute the normalized cdf for the source and reference image
src_cdf_blue = calculate_cdf(src_hist_blue)
src_cdf_green = calculate_cdf(src_hist_green)
src_cdf_red = calculate_cdf(src_hist_red)
ref_cdf_blue = calculate_cdf(ref_hist_blue)
ref_cdf_green = calculate_cdf(ref_hist_green)
ref_cdf_red = calculate_cdf(ref_hist_red)
# Make a separate lookup table for each color
blue_lookup_table = calculate_lookup(src_cdf_blue, ref_cdf_blue)
green_lookup_table = calculate_lookup(src_cdf_green, ref_cdf_green)
red_lookup_table = calculate_lookup(src_cdf_red, ref_cdf_red)
# Use the lookup function to transform the colors of the original
# source image
blue_after_transform = cv2.LUT(src_b, blue_lookup_table)
green_after_transform = cv2.LUT(src_g, green_lookup_table)
red_after_transform = cv2.LUT(src_r, red_lookup_table)
# Put the image back together
image_after_matching = cv2.merge([
blue_after_transform, green_after_transform, red_after_transform])
image_after_matching = cv2.convertScaleAbs(image_after_matching)
return image_after_matching
And this is my C++ try:
Mat Flatten(const Mat& mat)
{
auto m2 = mat.reshape(1, 1);
return m2;
}
Mat calculate_cdf(Mat m)
{
cv::Mat accumulatedHist = m.clone();
for (int i = 1; i < m.rows; i++) {
{
float& f1 = accumulatedHist.at<float>(i);
f1 += accumulatedHist.at<float>(i - 1);
}
}
float maxx = 0;
for (int i = 0; i < m.rows; i++) {
if (accumulatedHist.at<float>(i) > maxx)
maxx = accumulatedHist.at<float>(i);
}
for (int i = 0; i < m.rows; i++) {
accumulatedHist.at<float>(i) /= maxx;
}
return accumulatedHist;
}
Mat calculate_lookup(Mat src_cdf, Mat ref_cdf)
{
Mat lookup_table = Mat::zeros(256, 1, CV_32FC1);
float lookup_val = 0;
for (int src_pixel_val = 0; src_pixel_val < src_cdf.rows; src_pixel_val++)
{
float lookup_val = 0;
for (int ref_pixel_val = 0; ref_pixel_val < ref_cdf.rows; ref_pixel_val++)
{
if (ref_cdf.data[ref_pixel_val] >= src_cdf.data[src_pixel_val])
{
lookup_val = ref_pixel_val;
break;
}
}
lookup_table.data[src_pixel_val] = lookup_val;
}
return lookup_table;
}
Mat hm(Mat src_image, Mat ref_image)
{
// Split images
Mat src[3];
split(src_image,src);
Mat ref[3];
split(ref_image, ref);
// Compute the b, g, and r histograms separately
float range[] = { 0, 256 };
const float* histRange = { range };
bool uniform = 1, accumulate = 0;
Mat src_hist_blue, src_hist_green, src_hist_red;
Mat ref_hist_blue, ref_hist_green, ref_hist_red;
int histSize = 256; // expected
calcHist(&Flatten(src[0]), 1, 0, Mat(), src_hist_blue, 1, &histSize, &histRange, uniform, accumulate);
calcHist(&Flatten(src[1]), 1, 0, Mat(), src_hist_green, 1, &histSize, &histRange, uniform, accumulate);
calcHist(&Flatten(src[2]), 1, 0, Mat(), src_hist_red, 1, &histSize, &histRange, uniform, accumulate);
calcHist(&Flatten(ref[0]), 1, 0, Mat(), ref_hist_blue, 1, &histSize, &histRange, uniform, accumulate);
calcHist(&Flatten(ref[1]), 1, 0, Mat(), ref_hist_green, 1, &histSize, &histRange, uniform, accumulate);
calcHist(&Flatten(ref[2]), 1, 0, Mat(), ref_hist_red, 1, &histSize, &histRange, uniform, accumulate);
auto src_cdf_blue = calculate_cdf(src_hist_blue);
auto src_cdf_green = calculate_cdf(src_hist_green);
auto src_cdf_red = calculate_cdf(src_hist_red);
auto ref_cdf_blue = calculate_cdf(ref_hist_blue);
auto ref_cdf_green = calculate_cdf(ref_hist_green);
auto ref_cdf_red = calculate_cdf(ref_hist_red);
auto blue_lookup_table = calculate_lookup(src_cdf_blue, ref_cdf_blue);
auto green_lookup_table = calculate_lookup(src_cdf_green, ref_cdf_green);
auto red_lookup_table = calculate_lookup(src_cdf_red, ref_cdf_red);
Mat at[3];
auto to = src[0].total();
to = blue_lookup_table.total();
to = blue_lookup_table.channels();
LUT(src[0], blue_lookup_table,at[2]);
LUT(src[1], green_lookup_table, at[1]);
LUT(src[2], red_lookup_table, at[0]);
Mat image_after_matching;
merge(at, 3, image_after_matching);
Mat dst;
convertScaleAbs(image_after_matching, dst);
return dst;
}
int main()
{
Mat image_src = imread("r:\\15.jpg");
Mat image_ref = imread("r:\\130.jpg");
Mat i3 = hm(image_src, image_ref);
DeleteFile(L"r:\\r.jpg");
imwrite("r:\\r.jpg", i3);
ShellExecute(0, L"open", L"r:\\r.jpg", 0, 0, 0);
}
And this is my result:
If the accumulate parameter is true, the results are equally wrong. I'm not sure if I have converted the Python code correctly, so any help would be appreciated.

Related

How do I translate Decode(Packet) function in C++?

I am learning depth ai and I found this example on their repo. https://github.com/luxonis/depthai-experiments/tree/master/gen2-road-segmentation. I started translating this code in C++ to be consistent with the project I am putting together. I run into this function named "Decode"
def decode(packet):
data = np.squeeze(toTensorResult(packet)["L0317_ReWeight_SoftMax"])
class_colors = [[0, 0, 0], [0, 255, 0], [255, 0, 0], [0, 0, 255]]
class_colors = np.asarray(class_colors, dtype=np.uint8)
indices = np.argmax(data, axis=0)
output_colors = np.take(class_colors, indices, axis=0)
return output_colors
Adding more detail regarding the problem.
DepthAI offers a lot of examples in their core repo
https://github.com/luxonis/depthai-core
I used some of those example to start shaping the segmentation script since its a feature that I don't find written in C++ between all of the examples.
Here is my progress so far.
#include <chrono>
#include "depthai-core/examples/utility/utility.hpp"
#include <depthai/depthai.hpp>
#include "slar.hpp"
using namespace slar;
using namespace std;
using namespace std::chrono;
static std::atomic<bool> syncNN{true};
void slar_depth_segmentation::segment(int argc, char **argv, dai::Pipeline &pipeline,
cv::Mat frame,
dai::Device *device_unused) {
// blob model
std::string nnPath("/Users/alessiograncini/road-segmentation-adas-0001.blob");
if (argc > 1) {
nnPath = std::string(argv[1]);
}
printf("Using blob at path: %s\n", nnPath.c_str());
// in
auto camRgb = pipeline.create<dai::node::ColorCamera>();
auto imageManip = pipeline.create<dai::node::ImageManip>();
auto mobilenetDet = pipeline.create<dai::node::MobileNetDetectionNetwork>();
// out
auto xoutRgb = pipeline.create<dai::node::XLinkOut>();
auto nnOut = pipeline.create<dai::node::XLinkOut>();
auto xoutManip = pipeline.create<dai::node::XLinkOut>();
// stream names
xoutRgb->setStreamName("camera");
xoutManip->setStreamName("manip");
nnOut->setStreamName("segmentation");
//
imageManip->initialConfig.setResize(300, 300);
imageManip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p);
// properties
camRgb->setPreviewSize(300, 300);
camRgb->setBoardSocket(dai::CameraBoardSocket::RGB);
camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P);
camRgb->setInterleaved(false);
camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::RGB);
//
mobilenetDet->setConfidenceThreshold(0.5f);
mobilenetDet->setBlobPath(nnPath);
mobilenetDet->setNumInferenceThreads(2);
mobilenetDet->input.setBlocking(false);
// link
camRgb->preview.link(xoutRgb->input);
imageManip->out.link(mobilenetDet->input);
//
if (syncNN) {
mobilenetDet->passthrough.link(xoutManip->input);
} else {
imageManip->out.link(xoutManip->input);
}
//
mobilenetDet->out.link(nnOut->input);
// device
dai::Device device(pipeline);
// queues
auto previewQueue = device.getOutputQueue("camera", 4, false);
auto detectionNNQueue = device.getOutputQueue("segmentation", 4, false);
// fps
auto startTime = steady_clock::now();
int counter = 0;
float fps = 0;
auto color = cv::Scalar(255, 255, 255);
// main
while (true) {
auto inRgb = previewQueue->get<dai::ImgFrame>();
auto inSeg = detectionNNQueue->get<dai::NNData>();
//?
auto segmentations = inSeg->getData();
//
counter++;
auto currentTime = steady_clock::now();
auto elapsed = duration_cast<duration<float>>(currentTime - startTime);
if(elapsed > seconds(1)) {
fps = counter / elapsed.count();
counter = 0;
startTime = currentTime;
}
// testing if mat is a good replacement for
// the input array as in "decode" the inSeg data is manipulated
// cv::Mat img(500, 1000, CV_8UC1, cv::Scalar(70));
// slar_depth_segmentation::draw(segmentations, frame);
std::stringstream fpsStr;
fpsStr << std::fixed << std::setprecision(2) << fps;
cv::imshow("camera window", inRgb->getCvFrame());
//cv::imshow("camera window", frame);
int key = cv::waitKey(1);
if (key == 'q' || key == 'Q') {
break;
}
}
}
void slar_depth_segmentation::draw(cv::InputArray data, cv::OutputArray frame) {
cv::addWeighted(frame, 1, data, 0.2, 0, frame);
}
//https://jclay.github.io/dev-journal/simple_cpp_argmax_argmin.html
void slar_depth_segmentation::decode( cv::InputArray data) {
vector <int> class_colors [4] =
{{0,0,0},{0,255,0},{255, 0, 0}, {0, 0, 255}};
}
I can successfully play the camera using this script - but as you can tell the only part of the segmentation that is translated is the draw method, that is a function equivalent for both py and c++ since it's part of the openCV library.
I am getting stuck in trying to write the equivalent of the decode method. Thanks
[edit]
Any suggestion regarding this follow up??
C++
cv::InputArray slar_depth_segmentation::decode(std::vector<std::uint8_t> data) {
// reshape or np.squeeze
data.resize(1, 1);
// create a vector array
std::vector<std::vector<int>> classColors{
{0, 0, 0},
{0, 255, 0},
{255, 0, 0},
{0, 0, 255}};
double minVal;
double maxVal;
cv::minMaxIdx(
data,
&minVal,
&maxVal);
// get max value of class colors
auto output_colors = classColors[&maxVal, 0];
return output_colors;
}
Py
def decode(packet):
data = np.squeeze(toTensorResult(packet)["L0317_ReWeight_SoftMax"])
class_colors = [[0, 0, 0], [0, 255, 0], [255, 0, 0], [0, 0, 255]]
class_colors = np.asarray(class_colors, dtype=np.uint8)
indices = np.argmax(data, axis=0)
output_colors = np.take(class_colors, indices, axis=0)
return output_colors
#I think you are looking for the
cv::argmax # function.
cv::argmax # returns the index of the maximum value in an array.
cv::argmax # is available in OpenCV 3.4.3 and later.

Ising model in 2d wrong heat capacity

The problem.
I'm trying to make a metropolis simulation of the 2D Ising model. My code reproduces some of the expected behaviour: i.e. there's a critical transition at vaguely the right temperature, but:
Energy has a smoothed-out transition. Magnetisation is a sharp step down from 1 to 0, while energy continues to rise, and doesn't show any change of gradient.
The heat capacity shows a peak, but instead of rising when I increase the lattice size, the peak gets smaller.
The heat capacity is noisy: the peak is often doubled, and fitting it to a Lorentzian is a nightmare. Worse yet, running the simulation multiple times gets me the same data (as evidenced by the small errorbar.
My project is a c++ program that simulates a single lattice, and a python script that analyses the output. As of now the script pipes the program stdout and assigns it to internal variables. It's inefficient, but that's for debugging purposes.
the full source code can be found here. For convenience, see the main routines of the c++ program and the python script below.
The code:
lattice.h
...
/* Converts given pair of indices to those with periodic boundary conditions. */
int inline to_periodic(int row, int col, int size) {
...
}
class lattice {
private:
unsigned int size_;
std::vector<short> *spins_;
float J_;
float H_;
public:
lattice() noexcept : size_(0), spins_(NULL), J_(1.0), H_(0.0) {}
lattice(int new_size, double new_J, double new_H) noexcept
: size_(new_size), spins_(new std::vector<short>(size_ * size_, 1)),
J_(new_J), H_(new_H) {}
lattice(const lattice &other) noexcept
: lattice(other.size_, other.J_, other.H_) {
...
float compute_point_energy(int row, int col);
};
#endif
simulation.h
#ifndef simulation_h
#define simulation_h
#include "lattice.h"
#include "rng.h"
#include <gsl/gsl_rng.h>
class simulation {
private:
unsigned int time_ = 0; // Current time of the simulation.
rng r_ = rng();
lattice spin_lattice_;
double temperature_;
double mean_magnetisation_ = 1;
double mean_energy_;
double total_magnetisation_;
double total_energy_;
unsigned int print_interval_ = 1;
void advance();
public:
void set_print_interval(unsigned int new_print_interval) { print_interval_ = new_print_interval; }
simulation(int new_size, double new_temp, double new_J, double new_H)
: time_(0), spin_lattice_(lattice(new_size, new_J, new_H)), temperature_(new_temp),
mean_energy_(new_J * (-4)), total_magnetisation_(new_size * new_size),
total_energy_(compute_energy(spin_lattice_)) {}
void print_status(FILE *f) {
f = f==NULL? stdout : f;
fprintf(f, "%4d\t%e \t%e\t%e\n", time_, mean_magnetisation_,
mean_energy_, temperature_);
}
void advance(unsigned int time_steps, FILE *output);
double compute_energy(lattice &other);
double compute_dE(int row, int col) {
return -2*spin_lattice_.compute_point_energy(row, col);
}
void set_to_chequerboard(int step);
void print_lattice(){
spin_lattice_.print();
};
// void load_custom(const lattice& custom);
};
#endif
simulation.cpp
...
void simulation::advance(unsigned int time_steps, FILE *output) {
unsigned int area = spin_lattice_.get_size() * spin_lattice_.get_size();
for (unsigned int i = 0; i < time_steps; i++) {
if (time_ % print_interval_ == 0) {
total_magnetisation_ = spin_lattice_.total_magnetisation();
mean_magnetisation_ = total_magnetisation_ / area;
total_energy_ = compute_energy(spin_lattice_);
mean_energy_ = total_energy_ / area;
print_status(output);
}
advance();
}
}
void simulation::advance() {
// #pragma omp parallel for collapse(2)
for (unsigned int row = 0; row < spin_lattice_.get_size(); row++) {
for (unsigned int col = 0; col < spin_lattice_.get_size(); col++) {
double dE = compute_dE(row, col);
double p = r_.random_uniform();
if (dE <0 || exp(-dE / temperature_) > p) {
spin_lattice_.flip(row, col);
}
}
}
time_++;
}
double simulation::compute_energy(lattice &other) {
double energy_sum = 0;
unsigned int max = other.get_size();
#pragma omp parallel for reduction(+ : energy_sum)
for (unsigned int i = 0; i < max; i++) {
for (unsigned int j = 0; j < max; j++) {
energy_sum += other.compute_point_energy(i, j);
}
}
return energy_sum/2;
}
lattice.cpp
.....
void lattice::print() {
...
}
float lattice::compute_point_energy(int row, int col) {
int accumulator = get(row + 1, col) + get(row - 1, col) + get(row, col - 1) +
get(row, col + 1);
return -get(row, col) * (accumulator * J_ + H_);
}
int lattice::total_magnetisation() {
...
}
investigator.py
.....
# -------------------------------------------------------------------------
def smart_duration(temperature, multiplier=1.):
return int(((10**3)*base_duration*multiplier)/(
(temperature - t_c)**2 + breadth))
def investigate_temperature_dependence(temps=None, lattice_sizes=None,
**kwargs):
if temps is None:
temps = append(linspace(1.5, 2.2, 8), linspace(2.2, 2.3, 8))
temps = append(temps, linspace(2.3, 4, 8))
if lattice_sizes is None:
lattice_sizes = [50, 250, 300]
fig, ax = plt.subplots(nrows=2, sharex='col')
ax[0].set_ylabel('mean magnetisation / arb. u.')
ax[1].set_ylabel('mean energy / arb. u.')
ax[0].axvline(x=t_c, color='k', ls='--')
ax[1].axvline(x=t_c, color='k', ls='--')
for l in lattice_sizes:
print(l)
simulations = [Simulation(lattice_size=l, temperature=t,
duration=smart_duration(l, t)) for t in
temps]
data = [multi_run(s, **kwargs) for s in simulations]
terminal_magnetisations = array(data).T[0]
sigma_magnetisations = array(data).T[1]
terminal_energies = array(data).T[2]
ax[0].plot(temps, terminal_magnetisations, '-',
label='N = ' + str(l))
ax[1].plot(temps, terminal_energies, '-', label='N =' + str(l))
plt.xlabel('temperature / arb. u.')
plt.xticks(
append(linspace(min(temps), t_c, 3), linspace(t_c, max(temps), 3)))
save_plot('Critical temperature')
# -------------------------------------------------------------------------
def theoretical_capacity(x, a, b, c, d):
return c + (b*x)/((x - a)**2 + d)
def investigate_heat_capacity(lattice_sizes=None, temps=None, **kwargs):
if temps is None:
temps = linspace(1.5, 3, 60)
if lattice_sizes is None:
lattice_sizes = [16,32, 34, 36]
fig, axes = plt.subplots(nrows=len(lattice_sizes), sharex='col')
crit_temps = []
for l, ax in zip(lattice_sizes, axes):
crit_temps.append(fit_and_plot_capacity(ax, l, temps, **kwargs))
fig.set_size_inches(10.5, 10.5)
plt.xlabel('temperature / arb. u.')
save_plot('Heat capacity')
return crit_temps
def fit_and_plot_capacity(ax, l, temps, **kwargs):
"""
Plot the heat capacity of simulations at given temperature and lattice size.
Afterwards fit a lorentzian and plot.
Parameters:
-----------
ax : pyplot.axis
what to plot to.
l : int
lattice size
temps: numpy.array
Array of temperatures where to evaluate heat capacity.
Returns:
-------
popt[0]: float
most likely critical temperature.
"""
global use_disk
use_disk = False
simulations = [Simulation(lattice_size=l, temperature=t, duration=2)
for t in temps]
# sigmas = [stdev(s.mean_energies[:]) for s in simulations]
sigmas = array([multi_run(s, **kwargs) for s in simulations]).T[3]
meta_sigmas = array([multi_run(s, **kwargs) for s in simulations]).T[4]
# print(meta_sigmas)
Cs = [sigma**2/(temp**2)*10**3 for temp, sigma in zip(temps, sigmas)]
C_errs = Cs[:]*meta_sigmas[:]
try:
popt, pcov = curve_fit(theoretical_capacity, temps, Cs,
sigma=meta_sigmas, bounds=(
[min(temps) - .2, 0, 0, 0],
[max(temps) + .2, inf, inf, .7]))
except RuntimeError:
popt = [temps[argmax(Cs)], (max(Cs) - min(Cs))/4, min(Cs),
(max(temps) - min(temps))/4]
print('I\'m too dumb to fit')
ax.axvline(x=popt[0], ls='--', color='g')
ax.plot(temps, theoretical_capacity(temps, *popt), 'g-',
label='N = ' + str(l) + 'd = ' + str(popt[3]) + ' fit')
ax.errorbar(temps*10, Cs, fmt='b.', yerr=C_errs, label='N = ' + str(l))
ax.set_ylabel(r'C $\cdot 10^3$/ arb. u.')
ax.axvline(x=t_c, ls='-.', color='k')
ax.set_xticks(
append(linspace(min(temps), t_c, 6), linspace(t_c, max(temps), 6)))
ax.legend(loc='best')
return popt[0]
def multi_run(sim, re_runs:int=2, take_last:int=300):
"""
Re run the Ising model simulation multiple times, and gather statistics.
Parameters:
----------
sim: simulation
re_runs: int
number of times to repeat simulation
take_last: int
How many of the final points to take statistics over.
Returns:
list:
"""
global use_disk
use_disk = False
sim.duration = smart_duration(sim.temperature)
print(sim.duration)
magnetizations = []
sigma_magnetizations = []
energies = []
sigma_energies = []
for i in range(re_runs):
sim.data = sim.run()
# Make each run take 50% longer, so that we
# can see if a system is still settling
sim.duration *= 2
last_magnetizations = sim.mean_magnetizations()[-take_last:]
magnetizations.append(abs(mean(last_magnetizations)))
sigma_magnetizations.append(std(last_magnetizations))
last_energies = sim.mean_energies()[-take_last:]
energies.append(mean(last_energies))
sigma_energies.append(std(last_energies))
return [mean(magnetizations), mean(sigma_magnetizations),
mean(energies), mean(sigma_energies),
std(sigma_energies)/mean(sigma_energies)]
# -------------------------------------------------------------------------
def finite_size_scale(N, t_inf, a, v):
return t_inf + a*(N**(-1/v))
def investigate_finite_size_scaling(critical_temperatures, lattice_sizes,
**kwargs):
if critical_temperatures is None:
critical_temperatures = investigate_heat_capacity(lattice_sizes,
**kwargs)
args, cov = curve_fit(finite_size_scale, lattice_sizes,
critical_temperatures)
plt.plot(lattice_sizes, critical_temperatures, 'b+', label='data')
plt.plot(lattice_sizes, finite_size_scale(lattice_sizes, *args), 'r-',
label='fit')
plt.ylabel('critical temperature / arb. u.')
plt.xlabel('Lattice size')
save_plot('Finite size scaling')
return args[0], sqrt(cov[0, 0])
# -------------------------------------------------------------------------
t_c = 2/log(1 + sqrt(2))
use_disk = False
breadth = 1
base_duration = 50
sizes = [16, 32, 40, 50, 70]
# investigate_time_evolution()
# investigate_temperature_dependence(lattice_sizes=sizes, re_runs=4)
critical_temps = investigate_heat_capacity(lattice_sizes=sizes,
take_last=300)
# temp_inf = investigate_finite_size_scaling(critical_temps, sizes)
# print(temp_inf)
# print((t_c - temp_inf[0])/ temp_inf[1], ' Standard errors away')
Pictures
To illustrate the problem take a look here:
As I said, magnetisation is fine, but energy isn't. There isn't a sharp transition.
Heat capacity is even worse. It starts to plateau for
My thoughts.
I might simply not be running the simulation over a long enough time tens of thousands of MCS, and averaging over too few datapoints (the last 300), but then the errorbars would be bigger, or at least visible.

Draw Longest Straight Line in Contours OpenCv [duplicate]

I am using OpenCV and Python. I am trying to draw the longest line inside a contours.
I have a contour named cnt. The image is binary, the inside of the contours is white and the outside is black. I would like to draw the longest line inside the white contours. I found how to draw lines using cv2.lines but I didn't find how to draw the longest one. Do you have any ideas?
img_copy = cv2.dilate(copy.deepcopy(img), np.ones((2,2),np.uint8),iterations = 2)
contours, hierarchy = cv2.findContours(copy.deepcopy(img_copy),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt = contours[max_index]
The below method using to draw the number of number of lines from the image and get the degree of max value try this. its working fine
Mat Compute_skewAngle (Mat& src,Mat& src_gray,int drawLine) {
int thresh = 100;
RNG rng(12345);
// 1. Load Gray Scae Image
// 2. Get Size of Image
cv::Size size = src_gray.size();
// 3. blur the Grayscale image
cv::blur(src_gray, src_gray, cv::Size(3,3) );
cv::Mat threshold_output;
std::vector<std::vector<cv::Point> > contours;
std::vector<Vec4i> hierarchy;
// 4. Detect edges using Threshold / Canny edge Detector
//cv::threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
Mat dst, cdst;
cv::Canny(src_gray, dst, thresh, 200, 3);
// 5. Gray Image to BGR
cvtColor(dst, cdst, CV_GRAY2BGR);
#if 0
vector<Vec2f> lines;
HoughLines(dst, lines, 1, CV_PI/180, 100, 0, 0 );
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
}
#else
vector<Vec4i> lines;
double angle = 0.;
int countNegative = 0;
int countPositive =0;
HoughLinesP(dst, lines, 1, CV_PI/180, 100, 10, 100);
NSMutableDictionary *angleCountDict = [[NSMutableDictionary alloc] init];
for( size_t i = 0; i < lines.size(); i++ )
{
if(drawLine == 1) { // draw line while pass flag value 1
Vec4i l = lines[i];
line( cdst, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA);
}
double delta_y = lines[i][3] - lines[i][1];
double delta_x = lines[i][2] - lines[i][0];
double currentAngle =atan2(delta_y,delta_x);
int angleAsDeg = abs(currentAngle * 180 / CV_PI);
NSString *_retValue = [angleCountDict objectForKey:[NSString stringWithFormat:#"%d", angleAsDeg]];
int angleCount = [_retValue intValue];
[angleCountDict setObject:[NSNumber numberWithInt:angleCount + 1] forKey:[NSString stringWithFormat:#"%d", angleAsDeg]];
double slope = delta_y / delta_x ; // find the slope to detect the angle " - " or " + "
if(slope < 0)
countNegative ++;
else
countPositive ++;
}
#endif
// sort the dictionary to get the largest value of degree count
NSArray *blockSortedKeys = [angleCountDict keysSortedByValueUsingComparator: ^(id obj1, id obj2) {
return [obj2 compare:obj1];
}];
NSString *degreeVal;
if([blockSortedKeys count] > 0)
degreeVal = [blockSortedKeys objectAtIndex:0];
angle = [degreeVal doubleValue];
if(countNegative > countPositive) {
angle = - angle;
}
Mat outPut;
outPut = rotateMatImage(src,angle,cdst);
return outPut;
}

Draw Longest Line in Contours OPENCV

I am using OpenCV and Python. I am trying to draw the longest line inside a contours.
I have a contour named cnt. The image is binary, the inside of the contours is white and the outside is black. I would like to draw the longest line inside the white contours. I found how to draw lines using cv2.lines but I didn't find how to draw the longest one. Do you have any ideas?
img_copy = cv2.dilate(copy.deepcopy(img), np.ones((2,2),np.uint8),iterations = 2)
contours, hierarchy = cv2.findContours(copy.deepcopy(img_copy),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt = contours[max_index]
The below method using to draw the number of number of lines from the image and get the degree of max value try this. its working fine
Mat Compute_skewAngle (Mat& src,Mat& src_gray,int drawLine) {
int thresh = 100;
RNG rng(12345);
// 1. Load Gray Scae Image
// 2. Get Size of Image
cv::Size size = src_gray.size();
// 3. blur the Grayscale image
cv::blur(src_gray, src_gray, cv::Size(3,3) );
cv::Mat threshold_output;
std::vector<std::vector<cv::Point> > contours;
std::vector<Vec4i> hierarchy;
// 4. Detect edges using Threshold / Canny edge Detector
//cv::threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
Mat dst, cdst;
cv::Canny(src_gray, dst, thresh, 200, 3);
// 5. Gray Image to BGR
cvtColor(dst, cdst, CV_GRAY2BGR);
#if 0
vector<Vec2f> lines;
HoughLines(dst, lines, 1, CV_PI/180, 100, 0, 0 );
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
}
#else
vector<Vec4i> lines;
double angle = 0.;
int countNegative = 0;
int countPositive =0;
HoughLinesP(dst, lines, 1, CV_PI/180, 100, 10, 100);
NSMutableDictionary *angleCountDict = [[NSMutableDictionary alloc] init];
for( size_t i = 0; i < lines.size(); i++ )
{
if(drawLine == 1) { // draw line while pass flag value 1
Vec4i l = lines[i];
line( cdst, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA);
}
double delta_y = lines[i][3] - lines[i][1];
double delta_x = lines[i][2] - lines[i][0];
double currentAngle =atan2(delta_y,delta_x);
int angleAsDeg = abs(currentAngle * 180 / CV_PI);
NSString *_retValue = [angleCountDict objectForKey:[NSString stringWithFormat:#"%d", angleAsDeg]];
int angleCount = [_retValue intValue];
[angleCountDict setObject:[NSNumber numberWithInt:angleCount + 1] forKey:[NSString stringWithFormat:#"%d", angleAsDeg]];
double slope = delta_y / delta_x ; // find the slope to detect the angle " - " or " + "
if(slope < 0)
countNegative ++;
else
countPositive ++;
}
#endif
// sort the dictionary to get the largest value of degree count
NSArray *blockSortedKeys = [angleCountDict keysSortedByValueUsingComparator: ^(id obj1, id obj2) {
return [obj2 compare:obj1];
}];
NSString *degreeVal;
if([blockSortedKeys count] > 0)
degreeVal = [blockSortedKeys objectAtIndex:0];
angle = [degreeVal doubleValue];
if(countNegative > countPositive) {
angle = - angle;
}
Mat outPut;
outPut = rotateMatImage(src,angle,cdst);
return outPut;
}

Python vs. C++ OpenCV matchTemplate

I have a weird problem with OpenCV. I was doing template matching with OpenCV on both Python and C++, however, even though Python uses the C++ methods under the hood, I get very different results. Python method gives me really accurate place, C++ is just not even close. What is the reason for this? Is it my C++ code or something else??
I use Python 2.7.11, Apple LLVM version 7.3.0 (clang-703.0.29), and OpenCV3.0.
My Python Code:
def toGray(img):
_, _, channels = img.shape
if channels == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
return gray
def template_match(img, template):
w, h = template.shape[::-1]
res = cv2.matchTemplate(img,template,cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(img,top_left, bottom_right, 255, 2)
plt.subplot(121),plt.imshow(res,cmap = 'gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(img,cmap = 'gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.suptitle("TM_CCOEFF_NORMED")
plt.show()
if __name__ == "__main__":
img_name = sys.argv[1]
img_name2 = sys.argv[2]
img_rgb = cv2.imread(img_name)
img_rgb2 = cv2.imread(img_name2)
gimg1 = toGray(img_rgb)
gimg2 = toGray(img_rgb2)
template_match(gimg1, gimg2)
My C++ code (It is exactly the same with OpenCV documentation):
Mat img; Mat templ; Mat result;
char* image_window = "Source Image";
char* result_window = "Result window";
int match_method;
int max_Trackbar = 5;
/// Function Headers
void MatchingMethod( int, void* );
/** #function main */
int main( int argc, char** argv )
{
/// Load image and template
img = imread( argv[1], 1 );
templ = imread( argv[2], 1 );
/// Create windows
namedWindow( image_window, CV_WINDOW_AUTOSIZE );
namedWindow( result_window, CV_WINDOW_AUTOSIZE );
/// Create Trackbar
char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED";
createTrackbar( trackbar_label, image_window, &match_method, max_Trackbar, MatchingMethod );
MatchingMethod( 0, 0 );
waitKey(0);
return 0;
}
/**
* #function MatchingMethod
* #brief Trackbar callback
*/
void MatchingMethod( int, void* )
{
/// Source image to display
Mat img_display;
img.copyTo( img_display );
/// Create the result matrix
int result_cols = img.cols - templ.cols + 1;
int result_rows = img.rows - templ.rows + 1;
result.create( result_rows, result_cols, CV_32FC1 );
/// Do the Matching and Normalize
matchTemplate( img, templ, result, match_method );
normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
/// Localizing the best match with minMaxLoc
double minVal; double maxVal; Point minLoc; Point maxLoc;
Point matchLoc;
minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
{ matchLoc = minLoc; }
else
{ matchLoc = maxLoc; }
/// Show me what you got
rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
imshow( image_window, img_display );
imshow( result_window, result );
cv::imwrite("rec.jpg", img_display);
return;
}
Original Images:
Python Output:
C++ Output
Looking through the two implementations, the most evident difference between them is the colour format of the images used.
In the Python version, you load the images "as-is". Since your input images are RGB (as the variable names also suggest), you will be doing the template matching on colour images.
img_rgb = cv2.imread(img_name)
img_rgb2 = cv2.imread(img_name2)
However in C++ you load the images as grayscale, since you pass the 1 as second parameter.
img = imread( argv[1], 1 );
templ = imread( argv[2], 1 );
According to cv::matchTemplate documentation:
In case of a color image, template summation in the numerator and each
sum in the denominator is done over all of the channels and separate
mean values are used for each channel. That is, the function can take
a color template and a color image. The result will still be a
single-channel image, which is easier to analyze.
That would suggest that it's quite possible to get different results when applying it on a 3-channel image, than when applying it to a single channel version of the same image.

Categories