Unpacking hex-encoded floats - python

I'm trying to translate the following Python code into C++:
import struct
import binascii
inputstring = ("0000003F" "0000803F" "AD10753F" "00000080")
num_vals = 4
for i in range(num_vals):
rawhex = inputstring[i*8:(i*8)+8]
# <f for little endian float
val = struct.unpack("<f", binascii.unhexlify(rawhex))[0]
print val
# Output:
# 0.5
# 1.0
# 0.957285702229
# -0.0
So it reads 32-bit worth of the hex-encoded string, turns it into a byte-array with the unhexlify method, and interprets it as a little-endian float value.
The following almost works, but the code is kind of crappy (and the last 00000080 parses incorrectly):
#include <sstream>
#include <iostream>
int main()
{
// The hex-encoded string, and number of values are loaded from a file.
// The num_vals might be wrong, so some basic error checking is needed.
std::string inputstring = "0000003F" "0000803F" "AD10753F" "00000080";
int num_vals = 4;
std::istringstream ss(inputstring);
for(unsigned int i = 0; i < num_vals; ++i)
{
char rawhex[8];
// The ifdef is wrong. It is not the way to detect endianness (it's
// always defined)
#ifdef BIG_ENDIAN
rawhex[6] = ss.get();
rawhex[7] = ss.get();
rawhex[4] = ss.get();
rawhex[5] = ss.get();
rawhex[2] = ss.get();
rawhex[3] = ss.get();
rawhex[0] = ss.get();
rawhex[1] = ss.get();
#else
rawhex[0] = ss.get();
rawhex[1] = ss.get();
rawhex[2] = ss.get();
rawhex[3] = ss.get();
rawhex[4] = ss.get();
rawhex[5] = ss.get();
rawhex[6] = ss.get();
rawhex[7] = ss.get();
#endif
if(ss.good())
{
std::stringstream convert;
convert << std::hex << rawhex;
int32_t val;
convert >> val;
std::cerr << (*(float*)(&val)) << "\n";
}
else
{
std::ostringstream os;
os << "Not enough values in LUT data. Found " << i;
os << ". Expected " << num_vals;
std::cerr << os.str() << std::endl;
throw std::exception();
}
}
}
(compiles on OS X 10.7/gcc-4.2.1, with a simple g++ blah.cpp)
Particularly, I'd like to get rid of the BIG_ENDIAN macro stuff, as I'm sure there is a nicer way to do this, as this post discusses.
Few other random details - I can't use Boost (too large a dependency for the project). The string will usually contain between 1536 (83*3) and 98304 float values (323*3), at most 786432 (643*3)
(edit2: added another value, 00000080 == -0.0)

The following is your updated code modified to remove the #ifdef BIG_ENDIAN block. It uses a read technique that should be host byte order independent. It does this by reading the hex bytes (which are little endian in your source string) into a big endian string format compatible with the iostream std::hex operator. Once in this format it should not matter what the host byte order is.
Additionally, it fixes a bug in that rawhex needs to be zero terminated to be inserted into convert without trailing garbage in some cases.
I do not have a big endian system to test on, so please verify on your platform. This was compiled and tested under Cygwin.
#include <sstream>
#include <iostream>
int main()
{
// The hex-encoded string, and number of values are loaded from a file.
// The num_vals might be wrong, so some basic error checking is needed.
std::string inputstring = "0000003F0000803FAD10753F00000080";
int num_vals = 4;
std::istringstream ss(inputstring);
size_t const k_DataSize = sizeof(float);
size_t const k_HexOctetLen = 2;
for (uint32_t i = 0; i < num_vals; ++i)
{
char rawhex[k_DataSize * k_HexOctetLen + 1];
// read little endian string into memory array
for (uint32_t j=k_DataSize; (j > 0) && ss.good(); --j)
{
ss.read(rawhex + ((j-1) * k_HexOctetLen), k_HexOctetLen);
}
// terminate the string (needed for safe conversion)
rawhex[k_DataSize * k_HexOctetLen] = 0;
if (ss.good())
{
std::stringstream convert;
convert << std::hex << rawhex;
uint32_t val;
convert >> val;
std::cerr << (*(float*)(&val)) << "\n";
}
else
{
std::ostringstream os;
os << "Not enough values in LUT data. Found " << i;
os << ". Expected " << num_vals;
std::cerr << os.str() << std::endl;
throw std::exception();
}
}
}

I think the whole istringstring business is an overkill. It's much easier to parse this yourself one digit at a time.
First, create a function to convert a hex digit into an integer:
signed char htod(char c)
{
c = tolower(c);
if(isdigit(c))
return c - '0';
if(c >= 'a' && c <= 'f')
return c - 'a' + 10;
return -1;
}
Then simply convert the string into an integer. The code below doesn't check for errors and assumes big endianness -- but you should be able to fill in the details.
unsigned long t = 0;
for(int i = 0; i < s.length(); ++i)
t |= (t << 4) & htod(s[i]);
Then your float is
float f = * (float *) &t;

This is what we ended up with, OpenColorIO/src/core/FileFormatIridasLook.cpp
(Amardeep's answer with the unsigned uint32_t fix would likely work also)
// convert hex ascii to int
// return true on success, false on failure
bool hexasciitoint(char& ival, char character)
{
if(character>=48 && character<=57) // [0-9]
{
ival = static_cast<char>(character-48);
return true;
}
else if(character>=65 && character<=70) // [A-F]
{
ival = static_cast<char>(10+character-65);
return true;
}
else if(character>=97 && character<=102) // [a-f]
{
ival = static_cast<char>(10+character-97);
return true;
}
ival = 0;
return false;
}
// convert array of 8 hex ascii to f32
// The input hexascii is required to be a little-endian representation
// as used in the iridas file format
// "AD10753F" -> 0.9572857022285461f on ALL architectures
bool hexasciitofloat(float& fval, const char * ascii)
{
// Convert all ASCII numbers to their numerical representations
char asciinums[8];
for(unsigned int i=0; i<8; ++i)
{
if(!hexasciitoint(asciinums[i], ascii[i]))
{
return false;
}
}
unsigned char * fvalbytes = reinterpret_cast<unsigned char *>(&fval);
#if OCIO_LITTLE_ENDIAN
// Since incoming values are little endian, and we're on little endian
// preserve the byte order
fvalbytes[0] = (unsigned char) (asciinums[1] | (asciinums[0] << 4));
fvalbytes[1] = (unsigned char) (asciinums[3] | (asciinums[2] << 4));
fvalbytes[2] = (unsigned char) (asciinums[5] | (asciinums[4] << 4));
fvalbytes[3] = (unsigned char) (asciinums[7] | (asciinums[6] << 4));
#else
// Since incoming values are little endian, and we're on big endian
// flip the byte order
fvalbytes[3] = (unsigned char) (asciinums[1] | (asciinums[0] << 4));
fvalbytes[2] = (unsigned char) (asciinums[3] | (asciinums[2] << 4));
fvalbytes[1] = (unsigned char) (asciinums[5] | (asciinums[4] << 4));
fvalbytes[0] = (unsigned char) (asciinums[7] | (asciinums[6] << 4));
#endif
return true;
}

Related

Using python ctype, an error occurred when matching the type of the pointer array and the array, which is an argument of a C library function

The following error occurred while checking whether the hash generated by Python on the PC and the hash generated by the board match with the md5.c file of the embedded board I am using.
TypeError: byref() argument must be a ctypes instance, not 'list'
OSError: exception: access violation reading 0x033D2000
import os
from ctypes import *
import subprocess
class MD5_CTX(Structure):
_pack_ = 1
_fields_ = [("i", c_int * 2),
("buf", c_int * 4),
("in", c_short * 64),
("digest", c_short * 16)]
current_machine_id = subprocess.check_output('wmic csproduct get uuid').decode().split('\n')[1].strip()
current_machine_id = current_machine_id.replace('-','')
print(current_machine_id)
Hash = create_string_buffer(16)
Init_read_byte = CDLL('md5lib').MD5Init
Init_read_byte.argtypes = [POINTER(MD5_CTX)]
Update_read_byte = CDLL('md5lib').MD5Update
Update_read_byte.argtypes = [POINTER(MD5_CTX),POINTER(c_short),c_int]
Final_read_byte = CDLL('md5lib').MD5Update
Final_read_byte.argtypes = [c_char_p, POINTER(MD5_CTX)]
context = MD5_CTX()
Init_read_byte(byref(context))
#Update_read_byte(byref(context), byref(current_machine_id), 16) !! First Error
Final_read_byte(Hash, byref(context)) !! Second Error
print(sizeof(Hash), repr(Hash.raw))
The resulting values are as follows:
123456789ABCDEF123456789ABCDEFFF (example)
First Error : TypeError: byref() argument must be a ctypes instance, not 'str'
Second Error : OSError: exception: access violation reading 0x03BB1000
The following is the md5.c file's functions.
typedef unsigned int u32_t;
/* Data structure for MD5 (Message-Digest) computation */
typedef struct
{
u32_t i[2]; /* number of _bits_ handled mod 2^64 */
u32_t buf[4]; /* scratch buffer */
unsigned short in[64]; /* input buffer */
unsigned short digest[16]; /* actual digest after MD5Final call */
}MD5_CTX;
__declspec(dllexport) void
MD5Init(MD5_CTX *mdContext)
{
mdContext->i[0] = mdContext->i[1] = (u32_t)0;
/* Load magic initialization constants. */
mdContext->buf[0] = (u32_t)0x67452301UL;
mdContext->buf[1] = (u32_t)0xefcdab89UL;
mdContext->buf[2] = (u32_t)0x98badcfeUL;
mdContext->buf[3] = (u32_t)0x10325476UL;
}
__declspec(dllexport) void
MD5Update(MD5_CTX *mdContext, unsigned short *inBuf, unsigned short inLen)
{
u32_t in[16];
short mdi;
unsigned short i, ii;
/* compute number of bytes mod 64 */
mdi = (short)((mdContext->i[0] >> 3) & 0x3F);
/* update number of bits */
if ((mdContext->i[0] + ((u32_t)inLen << 3)) < mdContext->i[0])
{
mdContext->i[1]++;
}
mdContext->i[0] += ((u32_t)inLen << 3);
mdContext->i[1] += ((u32_t)inLen >> 29);
while (inLen--)
{
/* add new character to buffer, increment mdi */
mdContext->in[mdi++] = *inBuf++;
/* transform if necessary */
if (mdi == 0x40)
{
for (i = 0, ii = 0; i < 16; i++, ii += 4)
{
in[i] = (((u32_t)mdContext->in[ii + 3]) << 24) |
(((u32_t)mdContext->in[ii + 2]) << 16) |
(((u32_t)mdContext->in[ii + 1]) << 8) |
((u32_t)mdContext->in[ii]);
}
Transform(mdContext->buf, in);
mdi = 0;
}
}
}
__declspec(dllexport) void
MD5Final(unsigned short hash[], MD5_CTX *mdContext)
{
u32_t in[16];
short mdi;
unsigned short i, ii;
unsigned short padLen;
/* save number of bits */
in[14] = mdContext->i[0];
in[15] = mdContext->i[1];
/* compute number of bytes mod 64 */
mdi = (short)((mdContext->i[0] >> 3) & 0x3F);
/* pad out to 56 mod 64 */
padLen = (mdi < 56) ? (56 - mdi) : (120 - mdi);
MD5Update(mdContext, PADDING, padLen);
/* append length in bits and transform */
for (i = 0, ii = 0; i < 14; i++, ii += 4)
{
in[i] = (((u32_t)mdContext->in[ii + 3]) << 24) |
(((u32_t)mdContext->in[ii + 2]) << 16) |
(((u32_t)mdContext->in[ii + 1]) << 8) |
((u32_t)mdContext->in[ii]);
}
Transform(mdContext->buf, in);
/* store buffer in digest */
for (i = 0, ii = 0; i < 4; i++, ii += 4)
{
mdContext->digest[ii] = (unsigned short)(mdContext->buf[i] & 0xFF);
mdContext->digest[ii + 1] =
(unsigned short)((mdContext->buf[i] >> 8) & 0xFF);
mdContext->digest[ii + 2] =
(unsigned short)((mdContext->buf[i] >> 16) & 0xFF);
mdContext->digest[ii + 3] =
(unsigned short)((mdContext->buf[i] >> 24) & 0xFF);
}
memcpy(hash, mdContext->digest, 32);
}
What I want to print out in C is as follows.
INT8U ID[16] = {0, };
INT8U Hash[16] = {0, };
MD5_CTX context;
MD5Init(&context);
MD5Update(&context, ID, 16); // current machine id instead of ID
MD5Final(Hash, &context);
for (i = 0; i < 16; i++)
printf ("%02x", Hash[i]);
printf("\n");
I have been trying to match the parameters of MD5Update and MD5Final, but I was not familiar with python and asked a question.
Thank you if you can help me.

Python like Slicing operation in c++

I have following code, where 'Snap.JPG' is a RGB format type.
import cv2
img = cv2.imread("./Snap.JPG")
img[:,:,:2] = 255
cv2.imshow("Img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
I want to convert this code into c++. What is the fastest way to implement img[:,:,:2] = 255 part of the code? Channel splitting and merging is one of the options i know, but is there any smarter way to do slicing in c++?
Edit:
Apologies, i should have mentioned what i want in the output. I need a fading effect, because i wanted to overlay a drawing on top of it.
This is an example of how to change pixels:
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
int main(int argc, char** argv) {
cv::Mat src_image = cv::imread("image.jpg", CV_LOAD_IMAGE_COLOR);
if(!src_image.data) {
std::cout << "Error: the image wasn't correctly loaded." << std::endl;
return -1;
}
cv::Mat image = src_image.clone();
// We iterate over all pixels of the image
for(int r = 0; r < image.rows; r++) {
// We obtain a pointer to the beginning of row r
cv::Vec3b* ptr = image.ptr<cv::Vec3b>(r);
for(int c = 0; c < image.cols; c++) {
ptr[c] = cv::Vec3b(255, 255, ptr[c][2]);
}
}
cv::imshow("Inverted Image", image);
cv::waitKey();
return 0;
}
Thanks for #Manuel's reply, it works quite well. but i could achieve the same result with faster speeds. I have added my code snippets inline your code.
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
#include <iomanip>
int main(int argc, char** argv) {
clock_t start, end;
cv::Mat src_image = cv::imread("Snap.JPG", CV_LOAD_IMAGE_COLOR);
if(!src_image.data) {
std::cout << "Error: the image wasn't correctly loaded." << std::endl;
return -1;
}
/* 1st method */
cv::Mat image = src_image.clone();
start = clock();
// We iterate over all pixels of the image
for(int r = 0; r < image.rows; r++) {
// We obtain a pointer to the beginning of row r
cv::Vec3b* ptr = image.ptr<cv::Vec3b>(r);
for(int c = 0; c < image.cols; c++) {
ptr[c] = cv::Vec3b(255, 255, ptr[c][2]);
}
}
end = clock();
double time_taken = double(end - start) / double(CLOCKS_PER_SEC);
std::cout << "Time taken by 1st method : " << std::fixed << time_taken << std::setprecision(5);
std::cout << " sec " << std::endl;
/* 2nd Method */
start = clock();
src_image = src_image | cv::Scalar(255, 255, 0);
end = clock();
time_taken = double(end - start) / double(CLOCKS_PER_SEC);
std::cout << "Time taken by 2nd method : " << std::fixed << time_taken << std::setprecision(5);
std::cout << " sec " << std::endl;
bool isEqual = (sum(src_image != image) == cv::Scalar(0,0,0,0));
if (isEqual)
{
std::cout << "\nIdentical Mats !" << std::endl;
}
cv::imshow("Inverted Image", image);
cv::waitKey();
return 0;
}
output is following:
Time taken by 1st method : 0.001765 sec
Time taken by 2nd method : 0.00011 sec
Identical Mats !

Tensorflow frozen graph protobuf does not predict using c api

I have trained model for semantic segmentation using this repo, got good results and tried to use this net in small library writen with tensorflow c API. I turned my keras model into protobuf file using this repo and run session using this code:
typedef struct model_t {
TF_Graph* graph;
TF_Session* session;
TF_Status* status;
TF_Output input, target, output;
TF_Operation *init_op, *train_op, *save_op, *restore_op;
TF_Output checkpoint_file;
} model_t;
typedef struct NetProperties {
int width;
int height;
int border;
int classes;
int inputSize;
} NetProperties;
static model_t * model;
static NetProperties * properties;
extern "C" EXPORT int ModelCreate(const char* nnFilename, const char* inputName, const char* outputName, int pictureWidth, int pictureHeight, int border, int classes) {
ModelDestroy();
model = (model_t*)malloc(sizeof(model_t));;
model->status = TF_NewStatus();
model->graph = TF_NewGraph();
properties = (NetProperties*)malloc(sizeof(NetProperties));
properties->width = pictureWidth;
properties->height = pictureHeight;
properties->border = border;
properties->classes = classes;
properties->inputSize = (pictureWidth + border * 2) * (pictureHeight + border * 2) * 3;
{
// Create the session.
TF_SessionOptions* opts = TF_NewSessionOptions();
model->session = TF_NewSession(model->graph, opts, model->status);
TF_DeleteSessionOptions(opts);
if (!Okay(model->status)) return 0;
}
TF_Graph* g = model->graph;
{
// Import the graph.
TF_Buffer* graph_def = read_file(nnFilename);
if (graph_def == NULL) return 0;
printf("Read GraphDef of %zu bytes\n", graph_def->length);
TF_ImportGraphDefOptions* opts = TF_NewImportGraphDefOptions();
TF_GraphImportGraphDef(g, graph_def, opts, model->status);
TF_DeleteImportGraphDefOptions(opts);
TF_DeleteBuffer(graph_def);
if (!Okay(model->status)) return 0;
}
// Handles to the interesting operations in the graph.
model->input.oper = TF_GraphOperationByName(g, inputName);
model->input.index = 0;
model->target.oper = TF_GraphOperationByName(g, "target");
model->target.index = 0;
model->output.oper = TF_GraphOperationByName(g, outputName);
model->output.index = 0;
model->init_op = TF_GraphOperationByName(g, "init");
model->train_op = TF_GraphOperationByName(g, "train");
model->save_op = TF_GraphOperationByName(g, "save/control_dependency");
model->restore_op = TF_GraphOperationByName(g, "save/restore_all");
model->checkpoint_file.oper = TF_GraphOperationByName(g, "save/Const");
model->checkpoint_file.index = 0;
// first prediction is slow
unsigned char * randomData = (unsigned char*)malloc(properties->inputSize * sizeof(unsigned char));
for (int i = 0; i < properties->inputSize; i++) {
randomData[i] = (unsigned char)100;
}
ModelPredict(randomData);
free(randomData);
return 1;
}
extern "C" EXPORT void ModelDestroy() {
if (model == nullptr) return;
TF_DeleteSession(model->session, model->status);
Okay(model->status);
TF_DeleteGraph(model->graph);
TF_DeleteStatus(model->status);
free(model);
}
extern "C" EXPORT unsigned char* ModelPredict(unsigned char * batch1) {
if (model == NULL) return NULL;
const int64_t dims[4] = { 1, properties->height + properties->border * 2, properties->width + properties->border * 2, 3 };
size_t nbytes = properties->inputSize;
// can be faster
float * arrayOfFloats = (float*)malloc(nbytes * sizeof(float));
//float sumUp = 0;
for (int i = 0; i < properties->inputSize; i++) {
arrayOfFloats[i] = batch1[i] * (1.f / 255.f);
//sumUp += arrayOfFloats[i];
}
//std::cout << sumUp << std::endl;
// removed due to jdehesa answer
//float ** inputFloats = (float**)malloc(nbytes * sizeof(float*));
//inputFloats[0] = arrayOfFloats;
// Optionally, you can check that your input_op and input tensors are correct
//// by using some of the functions provided by the C API.
//std::cout << "Input op info: " << TF_OperationNumOutputs(input_op) << "\n";
//std::cout << "Input data info: " << TF_Dim(input, 0) << "\n";
std::vector<TF_Output> inputs;
std::vector<TF_Tensor*> input_values;
TF_Operation* input_op = model->input.oper;
TF_Output input_opout = { input_op, 0 };
inputs.push_back(input_opout);
// reworked due to jdehesa answer
//TF_Tensor* input = TF_NewTensor(TF_FLOAT, dims, 4, (void*)inputFloats, //nbytes * sizeof(float), &Deallocator, NULL);
TF_Tensor* input = TF_NewTensor(TF_FLOAT, dims, 4, (void*)arrayOfFloats, nbytes * sizeof(float), &Deallocator, NULL);
input_values.push_back(input);
int outputSize = properties->width * properties->height * properties->classes;
int64_t out_dims[] = { 1, properties->height, properties->width, properties->classes };
// Create vector to store graph output operations
std::vector<TF_Output> outputs;
TF_Operation* output_op = model->output.oper;
TF_Output output_opout = { output_op, 0 };
outputs.push_back(output_opout);
// Create TF_Tensor* vector
//std::vector<TF_Tensor*> output_values(outputs.size(), nullptr);
// Similar to creating the input tensor, however here we don't yet have the
// output values, so we use TF_AllocateTensor()
TF_Tensor* output_value = TF_AllocateTensor(TF_FLOAT, out_dims, 4, outputSize * sizeof(float));
//output_values.push_back(output_value);
//// As with inputs, check the values for the output operation and output tensor
//std::cout << "Output: " << TF_OperationName(output_op) << "\n";
//std::cout << "Output info: " << TF_Dim(output_value, 0) << "\n";
TF_SessionRun(model->session, NULL,
&inputs[0], &input_values[0], inputs.size(),
&outputs[0], &output_value, outputs.size(),
/* No target operations to run */
NULL, 0, NULL, model->status);
if (!Okay(model->status)) return NULL;
TF_DeleteTensor(input_values[0]);
// memory allocations take place here
float* prediction = (float*)TF_TensorData(output_value);
//float* prediction = (float*)malloc(sizeof(float) * properties->inputSize / 3 * properties->classes);
//memcpy(prediction, TF_TensorData(output_value), sizeof(float) * properties->inputSize / 3 * properties->classes);
unsigned char * charPrediction = new unsigned char[outputSize * sizeof(unsigned char)];
sumUp = 0;
for (int i = 0; i < outputSize; i++) {
charPrediction[i] = (unsigned char)((prediction[i] * 255));
//sumUp += prediction[i];
}
//std::cout << sumUp << std::endl << std::endl;
//free(prediction);
TF_DeleteTensor(output_value);
return charPrediction;
}
The problem is that prediction result is always the same. I tried to pass random data and real images but the result was equal. However, defferent trained models give different prediction result, but for each model it's always same. As you can see in code snippet, I checked that pass different data and get same prediction every time
// first is float sum of passed picture, second is the float sum of answer
724306
22982.6
692004
22982.6
718490
22982.6
692004
22982.6
720861
22982.6
692004
22982.6
I tried to write my own keras to tensorflow .pb converter but result was the same.
import os, argparse
import tensorflow as tf
from tensorflow.keras.utils import get_custom_objects
from segmentation_models.losses import bce_dice_loss,dice_loss,cce_dice_loss
from segmentation_models.metrics import iou_score
# some custom functions from segmentation_models
get_custom_objects().update({
'dice_loss': dice_loss,
'bce_dice_loss': bce_dice_loss,
'cce_dice_loss': cce_dice_loss,
'iou_score': iou_score,
})
def freeze_keras(model_name):
tf.keras.backend.set_learning_phase(0)
model = tf.keras.models.load_model(model_name)
sess = tf.keras.backend.get_session()
constant_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [out.op.name for out in model.outputs])
tf.train.write_graph(constant_graph, './', 'saved_model.pb', as_text=False)
freeze_keras('best-weights.hdf5')
Help me to find out how to fix prediction result in c api.
UPDATE 1: Reworked input array as jdehesa suggested
UPDATE 2: Added definition of model and NetProperties
I think you are not setting the input data correctly. Let's see.
float * arrayOfFloats1 = (float*)malloc(nbytes * sizeof(float));
float sumUp = 0;
Here you create arrayOfFloats1 to hold all the image data.
for (int i = 0; i < properties->inputSize; i++) {
arrayOfFloats1[i] = batch1[i] * (1.f / 255.f);
sumUp += arrayOfFloats1[i];
}
std::cout << sumUp << std::endl;
Here you set arrayOfFloats1 to the image data. This is all fine.
But then:
float ** inputFloats = (float**)malloc(nbytes * sizeof(float*));
Here you have inputFloats, which has space for nbytes float pointers. First, you probably would want to allocate space for float values, not float pointers (which probably do not have the same size). And then:
inputFloats[0] = arrayOfFloats1;
Here you are setting the first of those nbytes pointers to the pointer arrayOfFloats1. And then inputFloats is used as input to the model. But the remaining nbytes - 1 pointers have not been set to anything. Although not required, they are probably set all to zero.
If you just want to make an "array of arrays of floats" with arrayOfFloats1 you don't need to allocate any memory, you can simply do:
float ** inputFloats = &arrayOfFloats1;
But then you actually use inputFloats like this:
TF_Tensor* input = TF_NewTensor(
TF_FLOAT, dims, 4, (void*)inputFloats, nbytes * sizeof(float), &Deallocator, NULL);
So here you are saying that input is made up of the data in inputFloats, which will be a pointer to arrayOfFloats1 and then uninitialized memory. Probably you actually want something like:
TF_Tensor* input = TF_NewTensor(
TF_FLOAT, dims, 4, (void*)arrayOfFloats1, nbytes * sizeof(float), &Deallocator, NULL);
Which means input will be a tensor made up of the data in arrayOfFloats1 that you copied before. In fact, I don't think your code needs inputFloats at all.
Otherwise, from what I can tell the rest of the code seems correct. You should ensure that all allocated memory is properly freed in all cases (e.g. when you do if (!Okay(model->status)) return NULL; you should probably delete the input and output tensors before returning), but that is a different issue.
The issue was in the model. I have trained it using not normalized data from images (pixel values are between 0.0 and 255.0) and tried to interfere normilezed data (I devided each pixel value by 255 arrayOfFloats[i] = batch1[i] * (1.f / 255.f); and got values between 0.0 and 1.0) so my model thought that it gets black images every time and gave me similar answers. So I removed normalization and the model started to predict.

Inputting, splitting, and sorting in C

I am a python programmer. My girlfriend is taking a C class. This frustrates me, something so simple I can't find online nor I can figure out. Let's cut to the chase. I have a simple Python program that I need help trying to translate to C.
lst = input("Enter a list of numbers with a space in between each number\n")
newList = lst.split(" ")
#selection sort has been pre defined
x = newList.selectSort()
print(x)
Sorry this was done on my phone.
Her assignment isn't just this. It's adding multiple functions that work together. I just need to know how this works in order to pull the full program together.
First of all, you have to define the number of item in the list then you can input them.
Then, you have to store them in an array and do the sorting process manually.
I've done the sorting process without defining a function. If you want to use a function, just pass the array and return the sorted array.
#include <stdio.h>
int main()
{
int n, c, d, position, swap;
printf("Enter number of elements\n");
scanf("%d", &n);
int array[n];
printf("Enter %d integers\n", n);
for ( c = 0 ; c < n ; c++ )
scanf("%d", &array[c]);
for ( c = 0 ; c < ( n - 1 ) ; c++ )
{
position = c;
for ( d = c + 1 ; d < n ; d++ )
{
if ( array[position] > array[d] )
position = d;
}
if ( position != c )
{
swap = array[c];
array[c] = array[position];
array[position] = swap;
}
}
printf("Sorted list in ascending order:\n");
for ( c = 0 ; c < n ; c++ )
printf("%d\n", array[c]);
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
// Macro for sorting
#define sort(name, data_set, len, comparator, inverse) \
name##_sort(data_set, len, comparator, inverse)
#define SORT_DEFINE(name, data_type) \
\
/* Sort data set
#data_set data set to sort
#len length of data set
#comparator comparator to compare two elements, return positive value when first element is bigger
#inverse whether the result should be inversed
*/\
void name##_sort(data_type *data_set, int len, int (*comparator)(data_type, data_type), bool inverse) \
{ \
int i; \
int j; \
bool change = true; \
int ret; \
data_type tmp; \
\
for (i = 0; change && i < len - 1; i++) \
{ \
change = false; \
for (j = 0; j < len - 1 - i; j++) \
{ \
ret = comparator(data_set[j], data_set[j + 1]); \
if ((!inverse && ret > 0) || (inverse && ret < 0)) \
{ \
change = true; \
tmp = data_set[j]; \
data_set[j] = data_set[j + 1]; \
data_set[j + 1] = tmp; \
} \
} \
} \
}
/* Split string
#content origin string content
#delim delimiter for splitting
#psize pointer pointing at the variable to store token size
#return tokens after splitting
*/
const char **split(char *content, const char *delim, int *psize)
{
char *token;
const char **tokens;
int capacity;
int size = 0;
token = strtok(content, delim);
if (!token)
{
return NULL;
}
// Initialize tokens
tokens = malloc(sizeof(char *) * 64);
if (!tokens)
{
exit(-1);
}
capacity = 64;
tokens[size++] = token;
while ((token = strtok(NULL, delim)))
{
if (size >= capacity)
{
tokens = realloc(tokens, sizeof(char *) * capacity * 2);
if (!tokens)
{
exit(-1);
}
capacity *= 2;
}
tokens[size++] = token;
}
*psize = size;
return tokens;
}
// Define sort function for data_type = const char *
SORT_DEFINE(str, const char *);
// Define sort function for data_type = int
SORT_DEFINE(int, int)
int intcmp(int v1, int v2)
{
return v1 - v2;
}
int main(int argc, char *argv[])
{
char buff[128];
const char **tokens;
int size;
int i;
int *ints;
// Get input from stdin
fgets(buff, 128, stdin);
// Split string
tokens = split(buff, " \t\n", &size);
ints = malloc(sizeof(int) * size);
// Sort strings [min -> max]
sort(str, tokens, size, strcmp, false);
// Print strings and transfer them to integers
for (i = 0; i < size; i++)
{
printf("[%02d]: <%s>\n", i, tokens[i]);
ints[i] = atoi(tokens[i]);
}
// Sort integers [max -> min]
sort(int, ints, size, intcmp, true);
// Print integers
for (i = 0; i < size; i++)
{
printf("[%02d]: <%d>\n", i, ints[i]);
}
free(ints);
free(tokens);
return 0;
}
Use macro SORT_DEFINE(), sort(), and function split() to do your own job. The main() function is just a demo to show how to use them.

Using a named (fifo) pipe to transport arrays (images) between python and c++

I need to send an array (representing an image) through a named FIFO pipe from a python process to a c++ process, and then back the other way (on a Linux system).
The below code works great when using named pipes between two Python processes. It uses numpy's tostring() and fromstring() functions:
Send frames over named pipe (Python)
import cv2
import numpy as np
from time import sleep
##########################################################
FIFO_Images = "./../pipes/images.fifo"
videoName = "./../../videos/videoName.avi"
delim = "break"
##########################################################
def sendImage(h, w, d, pixelarray):
imageString = pixelarray.tostring()
with open(FIFO_Images, "w") as f:
f.write(str(h)+ delim + str(w)+ delim + str(d) + delim + imageString)
sleep(.01)
return
##########################################################
cap = cv2.VideoCapture(videoName)
while(cap.isOpened()):
ret, frame_rgb = cap.read()
h, w, d = frame_rgb.shape
sendImage(h, w, d, frame_rgb)
cap.release()
cv2.destroyAllWindows()
Read frames over named pipe (Python)
import cv2
import numpy as np
##########################################################
FIFO_Images = "./../pipes/images.fifo"
delim = "break"
##########################################################
def getFrame():
with open(FIFO_Images, "r") as f:
data = f.read().split(delim)
#parse incoming string, which has format (height, width, depth, imageData)
h=int(data[0])
w=int(data[1])
d=int(data[2])
imageString = data[3]
#convert array string into numpy array
array = np.fromstring(imageString, dtype=np.uint8)
#reshape numpy array into the required dimensions
frame = array.reshape((h,w,d))
return frame
##########################################################
while(True):
frame = getFrame()
cv2.imshow('frame', frame)
cv2.waitKey(1) & 0xFF
However, I couldn't figure out how to read the entire image from the pipe on the cpp side, since it takes "\n" as a delimiter for the read automatically.
My workaround was to do a base64 encoding on the "tostring()" image, then send that over the pipe. This works, but the base64 decoding on the other slide is much too slow for real-time applications (~0.2 seconds per frame). Code:
Send base64-encoded images over named pipe (Python)
import cv2
import numpy as np
from time import time
from time import sleep
import base64
##########################################################
FIFO_Images = "./../pipes/images.fifo"
videoName = "./../../videos/videoName.avi"
delim = ";;"
##########################################################
def sendImage(h, w, d, pixelarray):
flat = pixelarray.flatten()
imageString = base64.b64encode(pixelarray.tostring())
fullString = str(h)+ delim + str(w)+ delim + str(d)+ delim + imageString + delim + "\n"
with open(FIFO_Images, "w") as f:
f.write(fullString)
return
##########################################################
cap = cv2.VideoCapture(videoName)
count = 0
while(cap.isOpened()):
ret, frame_rgb = cap.read()
h, w, d = frame_rgb.shape
frame_gbr = cv2.cvtColor(frame_rgb, cv2.COLOR_RGB2BGR)
sendImage(h, w, d, frame_rgb)
cap.release()
cv2.destroyAllWindows()
Read base64-encoded images over named pipe (C++)
#include "opencv2/opencv.hpp"
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <unistd.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <linux/stat.h>
#include <ctime>
using namespace std;
using namespace cv;
#define FIFO_FILE "./../../../pipes/images.fifo"
#define MAX_BUF 10000000
FILE *fp;
char readbuf[MAX_BUF + 1]; //add 1 to the expected size to accomodate the mysterious "extra byte", which I think signals the end of the line.
/************************BASE64 Decoding*********************************************/
std::string base64_encode(unsigned char const* , unsigned int len);
std::string base64_decode(std::string const& s);
static const std::string base64_chars =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
static inline bool is_base64(unsigned char c) {
return (isalnum(c) || (c == '+') || (c == '/'));
}
std::string base64_encode(unsigned char const* bytes_to_encode, unsigned int in_len) {
std::string ret;
int i = 0;
int j = 0;
unsigned char char_array_3[3];
unsigned char char_array_4[4];
while (in_len--) {
char_array_3[i++] = *(bytes_to_encode++);
if (i == 3) {
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for(i = 0; (i <4) ; i++)
ret += base64_chars[char_array_4[i]];
i = 0;
}
}
if (i)
{
for(j = i; j < 3; j++)
char_array_3[j] = '\0';
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for (j = 0; (j < i + 1); j++)
ret += base64_chars[char_array_4[j]];
while((i++ < 3))
ret += '=';
}
return ret;
}
std::string base64_decode(std::string const& encoded_string) {
int in_len = encoded_string.size();
int i = 0;
int j = 0;
int in_ = 0;
unsigned char char_array_4[4], char_array_3[3];
std::string ret;
while (in_len-- && ( encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
char_array_4[i++] = encoded_string[in_]; in_++;
if (i ==4) {
for (i = 0; i <4; i++)
char_array_4[i] = base64_chars.find(char_array_4[i]);
char_array_3[0] = (char_array_4[0] << 2) + ((char_array_4[1] & 0x30) >> 4);
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
for (i = 0; (i < 3); i++)
ret += char_array_3[i];
i = 0;
}
}
if (i) {
for (j = i; j <4; j++)
char_array_4[j] = 0;
for (j = 0; j <4; j++)
char_array_4[j] = base64_chars.find(char_array_4[j]);
char_array_3[0] = (char_array_4[0] << 2) + ((char_array_4[1] & 0x30) >> 4);
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
for (j = 0; (j < i - 1); j++) ret += char_array_3[j];
}
return ret;
}
/*********************************************************************/
int stringToInt(string str)
{
int num;
if (!(istringstream(str) >> num)) num = 0;
return num;
}
/*********************************************************************/
bool timerOn = 0;
clock_t timerStart;
void Timer(string process)
{
if (!timerOn)
{
timerStart = clock();
timerOn = true;
}
else if (timerOn)
{
double duration = (clock() - timerStart) / (double) CLOCKS_PER_SEC;
cout << "Time to complete: ";
printf("%.2f", duration);
cout << ": " << process << endl;
timerOn = false;
}
}
/*********************************************************************/
void getFrame()
{
string fullString;
string delimiter = ";;";
size_t pos = 0;
string token;
int h;
int w;
int d;
string imgString;
int fifo;
bool cont(true);
/***************************
Read from the pipe
www.tldp.org/LDP/lpg/node18.html
***************************/
Timer("Read from pipe");
fp = fopen(FIFO_FILE, "r");
fgets(readbuf, MAX_BUF + 1, fp); // Stops when MAX_BUF characters are read, the newline character ("\n") is read, or the EOF (end of file) is reached
string line(readbuf);
fclose(fp);
Timer("Read from pipe");
//////parse the string into components
Timer("Parse string");
int counter = 0;
while ((pos = line.find(delimiter)) != string::npos)
{
token = line.substr(0,pos);
if (counter == 0)
{
h = stringToInt(token);
}
else if (counter == 1)
{
w = stringToInt(token);
}
else if (counter == 2)
{
d = stringToInt(token);
}
else if (counter == 3)
{
imgString = token;
//cout << imgString[0] << endl;
}
else
{
cout << "ERROR: Too many paramaters passed" << endl;
return;
}
line.erase(0, pos + delimiter.length());
counter ++;
}
if (counter == 3)
{
imgString = token;
}
if (counter < 3)
{
cout << "ERROR: Not enough paramaters passed: " << counter << endl;
//return;
}
Timer("Parse string");
/***************************
Convert from Base64
***************************/
Timer("Decode Base64");
std::string decoded = base64_decode(imgString);
Timer("Decode Base64");
/***************************
Convert to vector of ints
***************************/
Timer("Convert to vector of ints");
std::vector<uchar> imgVector;
for (int i = 0; i < decoded.length(); i = i+1) // + 4)
{
int temp = (char(decoded[i]));
imgVector.push_back(temp);
}
Timer("Convert to vector of ints");
//////convert the vector into a matrix
Mat frame = Mat(imgVector).reshape(d, h);
namedWindow("Frame", WINDOW_AUTOSIZE);
imshow("Frame", frame);
waitKey(1);
}
int main()
{
/* Create the FIFO if it does not exist */
umask(0);
mknod(FIFO_FILE, S_IFIFO|0666, 0);
while(1)
{
getFrame();
}
return 0;
}
There must be a more efficient way to accomplish this. Can anyone make a recommendation? While I'm happy to hear suggestions for other methods to accomplish this, I am constrained to using named pipes for now.
This is overcomplicated. If you need to send binary data, send their length first, then newline (\n), and then the data (raw, no base64). Receive it on the other side by readling a line, parsing the number and then just reading a block of data of given length.
Example - writing binary data to a FIFO (or file) in Python:
#!/usr/bin/env python3
import os
fifo_name = 'fifo'
def main():
data = b'blob\n\x00 123'
try:
os.mkfifo(fifo_name)
except FileExistsError:
pass
with open(fifo_name, 'wb') as f:
# b for binary mode
f.write('{}\n'.format(len(data)).encode())
f.write(data)
if __name__ == '__main__':
main()
Reading binary data from FIFO in C++:
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <sys/stat.h>
int main(int argc, char *argv[]) {
const char *fifo_name = "fifo";
mknod(fifo_name, S_IFIFO | 0666, 0);
std::ifstream f(fifo_name);
std::string line;
getline(f, line);
auto data_size = std::stoi(line);
std::cout << "Size: " << data_size << std::endl;
std::string data;
{
std::vector<char> buf(data_size);
f.read(buf.data(), data_size);
// write to vector data is valid since C++11
data.assign(buf.data(), buf.size());
}
if (!f.good()) {
std::cerr << "Read failed" << std::endl;
}
std::cout << "Data size: " << data.size() << " content: " << data << std::endl;
}

Categories