Please I am not experienced with audio processing, I am trying to record and stream audio from my microphone and send the audio data to a flask server. I have set up the javascript for the recording. The thing is it is for a speech recognition engine and it needs the audio stream to be in 16bit 16Khz monochannel and wav format. I was trying to use recorder.js (https://github.com/mattdiamond/Recorderjs/blob/master/dist/recorder.js) but the context.createScriptProcessor was deprecated so I switched to using the audioworklet processor (https://developer.mozilla.org/en-US/docs/Web/API/AudioWorklet). I just took some code from the recorder.js that encodes in wav and another code that downsamples the default 44100 to 16Khz i.e 16000 samples per second. The problem is
1.The bytes I am recieving in the flask server is corrupted ( I tried writing them to a wav file and it is an wav audio that is zero seconds long)
2. I am not sure why but I think its from the javascript code (or the flask I don't know).
If any one knows where I got it wrong, or better still how I can achieve streaming in 16bit 16Khz monochannel and wav format, I would really appreciate. The codes are below.
javascript code using the audioworklet
let dataArray = [];
var recording = true;
const main = async () => {
const context = new AudioContext()
const microphone = await navigator.mediaDevices.getUserMedia({
audio:true
})
let sampleRate = 16000
let numOfChannels = 1
const source = context.createMediaStreamSource(microphone)
await context.audioWorklet.addModule('js/recorderWorkletProcessor.js')
const recorder = new AudioWorkletNode(context, "recorder.worklet")
source.connect(recorder).connect(context.destination)
recorder.port.onmessage = (e) => {
// downsample to 16KHz sample rate
downSampledData = downsampleBuffer(e.data, sampleRate, context.sampleRate)
// convert to audio/wav format
let dataView = encodeWAV(downSampledData, context, sampleRate)
dataArray.push(e.data)
// Create a blob file
let blob = new Blob([ dataView ], { type: 'audio/wav' });
// send to the server
upload(blob)
if (!recording){
console.log("RECORDING STOPPED");
recorder.disconnect(context.destination);
source.disconnect(recorder);
}
}
};
// sorry I am not using this but floatTo16BitPCM()
function convertFloat32To16BitPCM(input) {
const output = new Int16Array(input.length)
for (let i = 0; i < input.length; i++) {
const s = Math.max(-1, Math.min(1, input[i]))
output[i] = s < 0 ? s * 0x8000 : s * 0x7fff
}
return output
}
function startRec () {
// start the recording
main()
}
function stopRec () {
// stop the recording
console.log('stopped')
recording = false
}
// convert to 16Bit PCM
function floatTo16BitPCM(output, offset, input){
for (var i = 0; i < input.length; i++, offset+=2){
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string){
for (var i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
// convert to wave format
function encodeWAV(samples, context, sampleRate) {
let buffer = new ArrayBuffer(44 + samples.length * 2);
let view = new DataView(buffer);
/* RIFF identifier */
writeString(view, 0, 'RIFF');
/* RIFF chunk length */
view.setUint32(4, 36 + samples.length * 2, true);
/* RIFF type */
writeString(view, 8, 'WAVE');
/* format chunk identifier */
writeString(view, 12, 'fmt ');
/* format chunk length */
view.setUint32(16, 16, true);
/* sample format (raw) */
view.setUint16(20, 1, true);
/* channel count */
view.setUint16(22, 1, true);
/* sample rate */
view.setUint32(24, sampleRate, true);
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true);
/* block align (channel count * bytes per sample) */
view.setUint16(32, 1 * 2, true);
/* bits per sample */
view.setUint16(34, 16, true);
/* data chunk identifier */
writeString(view, 36, 'data');
/* data chunk length */
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
const blobToBase64 = (blob) => {
// convert blob to base64 encoding
return new Promise((resolve) => {
const reader = new FileReader();
reader.readAsDataURL(blob);
reader.onloadend = function () {
resolve(reader.result);
};
});
};
const upload = async (audioData) => {
// send the blob containing audio bytes to the flask server
var AjaxURL = 'http://127.0.0.1:5000/media';
const b64 = await blobToBase64(audioData);
const jsonString = JSON.stringify({blob: b64});
console.log(jsonString);
$.ajax({
type: "POST",
url: AjaxURL,
data: jsonString,
contentType: 'application/json;charset=UTF-8',
success: function(result) {
window.console.log(result.response);
}
});
}
function downsampleBuffer(buffer, rate, sampleRate) {
if (rate == sampleRate) {
return buffer;
}
if (rate > sampleRate) {
throw "downsampling rate show be smaller than original sample rate";
}
var sampleRateRatio = sampleRate / rate;
var newLength = Math.round(buffer.length / sampleRateRatio);
var result = new Float32Array(newLength);
var offsetResult = 0;
var offsetBuffer = 0;
while (offsetResult < result.length) {
var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
// Use average value of skipped samples
var accum = 0, count = 0;
for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
accum += buffer[i];
count++;
}
result[offsetResult] = accum / count;
// Or you can simply get rid of the skipped samples:
// result[offsetResult] = buffer[nextOffsetBuffer];
offsetResult++;
offsetBuffer = nextOffsetBuffer;
}
return result;
}
recorderWorkletProcessor.js
bufferSize = 4096
_bytesWritten = 0
_buffer = new Float32Array(this.bufferSize)
constructor () {
super()
this.initBuffer()
}
initBuffer() {
this._bytesWritten = 0
}
isBufferEmpty() {
return this._bytesWritten === 0
}
isBufferFull() {
return this._bytesWritten === this.bufferSize
}
process(inputs, outputs, parameters) {
this.append(inputs[0][0])
return true
}
append(channelData){
if (this.isBufferFull()){
this.flush()
}
if (!channelData) return
for (let i=0; i < channelData.length; i++){
this._buffer[this._bytesWritten++] = channelData[i]
}
}
flush () {
this.port.postMessage(
this._bytesWritten < this.bufferSize ? this.buffer.slice(0, this._bytesWritten) : this._buffer
)
this.initBuffer()
}
}
registerProcessor('recorder.worklet', RecorderProcessor)
finally my flask server code.
NOTE: The endpoint has to be a http endpoint that is why I am using the ajax call in the JS (not sockets)...There is a speech recognition engine server running on sockets that is why there is a socket call in async in the code. The socket server recieves BYTES of audio data.
#!/usr/bin/env python
# encoding: utf-8
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
import numpy as np
import soundfile as sf
import json
import logging
import base64
import asyncio
import websockets
import sys
import wave
app = Flask(__name__)
app.secret_key = "stream"
CORS(app, supports_credentials=True)
def get_byte_string(string):
delimiter = ';base64,'
splitted_string = string.split(delimiter)
return splitted_string[1]
#app.route('/media', methods=['POST'])
async def echo():
app.logger.info('Connection accepted')
has_seen_media = False
message_count = 0
chunk = None
data = json.loads(request.data)
if data is None:
app.logger.info('No message recieved')
else:
app.logger.info("Media message recieved")
blob = data['blob']
byte_str = get_byte_string(blob)
byte_str = bytes(byte_str, 'utf-8')
chunk = base64.decodebytes(byte_str)
has_seen_media = True
if has_seen_media:
app.logger.info("Payload recieved: {} bytes".format(len(chunk)))
# set up websocket here
async with websockets.connect('ws://localhost:2700') as websocket:
await websocket.send(chunk)
print (await websocket.recv())
await websocket.send('{"eof" : 1}')
print (await websocket.recv())
message_count += 1
return jsonify({'response': ''})
if __name__ == '__main__':
app.logger.setLevel(logging.DEBUG)
app.run(debug=True)
Related
I am working on mnist digits classification android application.
reference:
https://www.tensorflow.org/lite/examples/on_device_training/overview
I converted the model into tensorflow lite file then I integrated the tflite into android app. Done everything as described in the tensorflow. please help me how to fix this error
I am facing an error that is:
java.lang.IllegalArgumentException: Cannot copy to a TensorFlowLite tensor (train_y:0) with 40 bytes from a Java Buffer with 8 byte
Java code:
import android.content.ClipData;
import android.content.Intent;
import android.content.res.AssetFileDescriptor;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.ColorMatrix;
import android.graphics.ColorMatrixColorFilter;
import android.graphics.Matrix;
import android.graphics.Paint;
import android.net.Uri;
import android.provider.MediaStore;
import android.os.Bundle;
import android.util.Log;
import android.view.View;
import android.widget.Button;
import android.widget.ImageView;
import android.widget.TextView;
import android.widget.Toast;
import androidx.appcompat.app.AppCompatActivity;
import org.tensorflow.lite.Interpreter;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.FloatBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class MainActivity extends AppCompatActivity {
private ImageView imageView;
private TextView textView;
private Button selectImageButton, ProcessImage;
private Button trainModelButton, Updateweights;
private Button predictButton;
private Bitmap image;
private static final int NUM_EPOCHS = 100;
private static final int BATCH_SIZE = 10;
private static final int IMG_HEIGHT = 28;
private static final int IMG_WIDTH = 28;
private static final int NUM_TRAININGS = 60000;
private static final int NUM_BATCHES = NUM_TRAININGS / BATCH_SIZE;
private static final int NUM_IMAGES = 1;
private static final int REQUEST_CODE_GALLERY = 1;
private List<Bitmap> selectedImages;
private List<FloatBuffer> trainImageBatches;
private List<FloatBuffer> trainLabelBatches;
private Button SelectImagesBtn, TrainModelBtn;
// ByteBuffer modelBuffer;
Interpreter modelBuffer;
Bitmap bitmap;
private static final int IMAGE_PICK_REQUEST_CODE = 1;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
imageView = findViewById(R.id.selected_image_view);
textView = findViewById(R.id.improved_learning_rate_text_view);
selectImageButton = findViewById(R.id.select_image_button);
trainModelButton = findViewById(R.id.train_model_button);
predictButton = findViewById(R.id.predict_number_button);
ProcessImage = findViewById(R.id.process_image_button);
try {
modelBuffer = new Interpreter(loadModelFile());
} catch (Exception e) {
Log.e("MainActivity", "Error loading TFLite model", e);
}
selectImageButton.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
selectImagesFromGallery();
}
});
trainModelButton.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
trainModel();
Toast.makeText(getApplicationContext()," Train button is clicked",Toast.LENGTH_SHORT).show();
}
});
predictButton.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
// LoadOndevicetrainedmodel();
// predictNumber();
Toast.makeText(getApplicationContext()," predict button is clicked",Toast.LENGTH_SHORT).show();
}
});
ProcessImage.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
prepareTrainingBatches();
Toast.makeText(getApplicationContext()," process button is clicked",Toast.LENGTH_SHORT).show();
}
});
}
// Method to select images from the gallery
private void selectImagesFromGallery() {
// Use an Intent to pick images from the gallery
Intent intent = new Intent(Intent.ACTION_PICK, MediaStore.Images.Media.EXTERNAL_CONTENT_URI);
intent.setType("image/*");
intent.putExtra(Intent.EXTRA_ALLOW_MULTIPLE, true);
intent.setAction(Intent.ACTION_GET_CONTENT);
startActivityForResult(Intent.createChooser(intent, "Select Images"), REQUEST_CODE_GALLERY);
}
#Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if (requestCode == REQUEST_CODE_GALLERY && resultCode == RESULT_OK) {
ClipData clipData = data.getClipData();
if (clipData != null) {
selectedImages = new ArrayList<>();
int count = clipData.getItemCount();
count = Math.min(count, NUM_IMAGES);
for (int i = 0; i < count; i++) {
Uri imageUri = clipData.getItemAt(i).getUri();
try {
bitmap = MediaStore.Images.Media.getBitmap(this.getContentResolver(), imageUri);
// selectedImages.add(bitmap);
imageView.setImageBitmap(bitmap);
bitmap = resizeImage(bitmap);
Toast.makeText(getApplicationContext(),"image converted to bitmap",Toast.LENGTH_LONG).show();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
}
// Method to prepare training batches using the selected images
private void prepareTrainingBatches() {
try {
trainImageBatches = new ArrayList<>(NUM_BATCHES);
trainLabelBatches = new ArrayList<>(NUM_BATCHES);
// Iterate over the selected images
for (int i = 0; i < NUM_IMAGES; i++) {
// Allocate a direct buffer to store the image data
// ByteBuffer byteBuffer = ByteBuffer.allocateDirect(IMG_HEIGHT * IMG_WIDTH * BATCH_SIZE).order(ByteOrder.nativeOrder());
FloatBuffer trainImages = convertBitmapToFloatBuffer(bitmap);
// Convert the resized image to grayscale
Bitmap grayscaleImage = toGrayscale(bitmap);
// Convert the grayscale image to a float buffer
FloatBuffer floatBuffer = convertBitmapToFloatBuffer(grayscaleImage);
// Add the float buffer to trainImageBatches
trainImageBatches.add(floatBuffer);
// Allocate a direct buffer to store the label data
ByteBuffer labelBuffer = ByteBuffer.allocateDirect(10 * BATCH_SIZE).order(ByteOrder.nativeOrder());
FloatBuffer trainLabels = labelBuffer.asFloatBuffer();
// Fill the image and label data for the current batch
// trainImageBatches.add((FloatBuffer) trainImages.rewind());
trainLabelBatches.add((FloatBuffer) trainLabels.rewind());
Toast.makeText(getApplicationContext(), "prepareTrainingBatches is done", Toast.LENGTH_LONG).show();
}
} catch (Exception e) {
e.printStackTrace();
Toast.makeText(getApplicationContext(), "Error :"+ e, Toast.LENGTH_LONG).show();
}
}
public void trainModel(){
try {
// Run training for a few steps.
float[] losses = new float[NUM_EPOCHS];
for (int epoch = 0; epoch < NUM_EPOCHS; ++epoch) {
for (int batchIdx = 0; batchIdx < NUM_BATCHES; ++batchIdx) {
Map<String, Object> inputs = new HashMap<>();
inputs.put("x", trainImageBatches.get(batchIdx));
inputs.put("y", trainLabelBatches.get(batchIdx));
Map<String, Object> outputs = new HashMap<>();
FloatBuffer loss = FloatBuffer.allocate(1);
outputs.put("loss", loss);
modelBuffer.runSignature(inputs, outputs, "train");
// Record the last loss.
if (batchIdx == NUM_BATCHES - 1) losses[epoch] = loss.get(0);
}
// Print the loss output for every 10 epochs.
if ((epoch + 1) % 10 == 0) {
System.out.println(
"Finished " + (epoch + 1) + " epochs, current loss: " + losses[epoch]);
textView.setText("Finished " + (epoch + 1) + " epochs, current loss: " + losses[epoch]);
}
}
// ...
File outputFile = new File(getFilesDir(), "checkpoint.ckpt");
Map<String, Object> inputs = new HashMap<>();
inputs.put("checkpoint_path", outputFile.getAbsolutePath());
Map<String, Object> outputs = new HashMap<>();
modelBuffer.runSignature(inputs, outputs, "save");
}
catch (Exception e){
e.printStackTrace();
Log.d("TRAIN MODEL:", String.valueOf(e));
Toast.makeText(getApplicationContext(),"Error:"+e,Toast.LENGTH_LONG).show();
}
}
private MappedByteBuffer loadModelFile() throws IOException {
// Load the TensorFlow Lite model from a file
AssetFileDescriptor fileDescriptor = getAssets().openFd("model.tflite");
FileInputStream inputStream = new FileInputStream(fileDescriptor.getFileDescriptor());
FileChannel fileChannel = inputStream.getChannel();
long startOffset = fileDescriptor.getStartOffset();
long declaredLength = fileDescriptor.getDeclaredLength();
return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength);
}
private Bitmap resizeImage(Bitmap originalImage){
int width = originalImage.getWidth();
int height = originalImage.getHeight();
int newWidth = 28;
int newHeight = 28;
float scaleWidth = ((float) newWidth) / width;
float scaleHeight = ((float) newHeight) / height;
Matrix matrix = new Matrix();
matrix.postScale(scaleWidth, scaleHeight);
// Bitmap resizedImage = Bitmap.createBitmap(originalImage, 0, 0, width, height, matrix, false);
Bitmap resizedImage = Bitmap.createScaledBitmap(originalImage,newWidth,newHeight,true);
return resizedImage;
}
// The toGrayscale() and convertBitmapToFloatBuffer() methods are defined as follows:
public static Bitmap toGrayscale(Bitmap bmpOriginal) {
int width, height;
height = bmpOriginal.getHeight();
width = bmpOriginal.getWidth();
Bitmap bmpGrayscale = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
Canvas c = new Canvas(bmpGrayscale);
Paint paint = new Paint();
ColorMatrix cm = new ColorMatrix();
cm.setSaturation(0);
ColorMatrixColorFilter f = new ColorMatrixColorFilter(cm);
paint.setColorFilter(f);
c.drawBitmap(bmpOriginal, 0, 0, paint);
return bmpGrayscale;
}
public static FloatBuffer convertBitmapToFloatBuffer(Bitmap bitmap) {
int width = bitmap.getWidth();
int height = bitmap.getHeight();
float[] floatValues = new float[width * height];
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; ++j) {
int pixelValue = bitmap.getPixel(j, i);
floatValues[i * width + j] = (float)(pixelValue & 0xff) / 255.0f;
}
}
FloatBuffer floatBuffer = FloatBuffer.wrap(floatValues);
return floatBuffer;
}
}
How to create a on device training transfer learning android application.
I'm trying to send an image from C# to Python side via UDP. I split the image by 1024 bytes and send those chunks. On the Python side - I accept and merge them. The problem is speed. The image, which weighs about 200 KB, takes about 7 seconds to send. I read some questions about similar problems with UDP, but nothing helps. What can I do to speed up this connection? Thanks!
The sample image:
Python side:
import time
import threading
import socket
import traceback
import warnings
class ListenPort:
def __init__(self, port: int, is_camera: bool = False):
self.__port = port
self.__is_camera = is_camera
self.thread = None
self.__stop_thread = False
self.out_string = ""
self.out_bytes = b""
self.ip_end_point = ('127.0.0.1', self.__port)
self.sct = None
def start_listening(self):
self.thread = threading.Thread(target=self.listening, args=())
self.thread.start()
def listening(self):
self.sct = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print("connected: " + str(self.__port))
while not self.__stop_thread:
try:
if self.__is_camera:
self.sct.sendto("Wait for size".encode('utf-16-le'), self.ip_end_point)
image_size, _ = self.sct.recvfrom(4)
print(len(image_size))
if len(image_size) < 4:
continue
buffer_size = (image_size[3] & 0xff) << 24 | (image_size[2] & 0xff) << 16 | \
(image_size[1] & 0xff) << 8 | (image_size[0] & 0xff)
self.sct.sendto("Wait for image".encode('utf-16-le'), self.ip_end_point)
local_bytes = b""
check_iters = 0
for i in range(0, buffer_size // 1024):
local_bytes += self.sct.recvfrom(1024)[0]
self.sct.sendto("Got data".encode('utf-16-le'), self.ip_end_point)
check_iters += 1
print(check_iters)
print(check_iters)
if buffer_size % 1024 > 0:
local_bytes += self.sct.recvfrom(buffer_size % 1024)[0]
self.out_bytes = local_bytes
else:
self.sct.sendto("Wait for data".encode('utf-16-le'), self.ip_end_point)
self.out_bytes, _ = self.sct.recvfrom(1024)
self.out_string = self.out_bytes.decode('utf-16-le')
except OSError:
break
except (Exception, EOFError):
traceback.print_exc()
print("disconnected: " + str(self.__port))
def reset_out(self):
self.out_string = ""
self.out_bytes = b""
def stop_listening(self):
self.__stop_thread = True
self.reset_out()
if self.sct is not None:
self.sct.shutdown(socket.SHUT_RDWR)
if self.thread is not None:
st_time = time.time()
while self.thread.is_alive():
if time.time() - st_time > 2:
warnings.warn("Something went wrong. Rude disconnection on port " + str(self.__port))
self.sct.close()
st_time = time.time()
listen = ListenPort(63213, True)
listen.start_listening()
st_time = time.time()
while True:
if len(listen.out_bytes) == 218669:
print("got image")
break
print(time.time() - st_time)
listen.stop_listening()
# the out of print(time.time() - st_time) is 7.35678505897522
C# side:
public struct Received
{
public IPEndPoint Sender;
public string Message;
}
public abstract class UdpBase
{
protected UdpClient Client;
protected UdpBase()
{
Client = new UdpClient();
}
public async Task<Received> Receive()
{
var result = await Client.ReceiveAsync();
return new Received()
{
Message = Encoding.Unicode.GetString(result.Buffer, 0, result.Buffer.Length),
Sender = result.RemoteEndPoint
};
}
}
public class TalkPortUdp : UdpBase
{
private bool stopTask = false;
private IPEndPoint _talkOn;
private string outString = "";
private byte[] outBytes = new byte[10];
public IPEndPoint sender;
public Task task;
public TalkPortUdp(IPEndPoint endpoint)
{
_talkOn = endpoint;
}
public void SetString(string data)
{
outString = data;
}
public void SetBytes(byte[] data)
{
outBytes = data;
}
public void Send(string message, IPEndPoint endpoint)
{
var datagram = Encoding.Unicode.GetBytes(message);
Client.Send(datagram, datagram.Length, endpoint);
}
public void SendBytes(byte[] message, IPEndPoint endpoint)
{
Client.Send(message, message.Length, endpoint);
}
public void StartTalking()
{
Client = new UdpClient(_talkOn);
stopTask = false;
task = Task.Run(() => {
while (!stopTask)
{
try
{
if (this.Client.Available > 0)
{
var received = this.Receive().GetAwaiter().GetResult();
string clientTask = received.Message;
sender = received.Sender;
if (clientTask.Contains("Wait for size"))
{
byte[] intBytes = BitConverter.GetBytes(outBytes.Length);
this.SendBytes(intBytes, received.Sender);
}
else if (clientTask.Contains("Wait for image"))
{
for (int i = 0; i < outBytes.Length - 1024; i += 1024)
{
byte[] second = new byte[1024];
Buffer.BlockCopy(outBytes, i, second, 0, 1024);
Console.WriteLine(i);
this.SendBytes(second, received.Sender);
received = this.Receive().GetAwaiter().GetResult();
}
int lastt = outBytes.Length % 1024;
if (lastt > 0)
{
byte[] lasttBytes = new byte[lastt];
Buffer.BlockCopy(outBytes, outBytes.Length - lastt, lasttBytes, 0, lastt);
this.SendBytes(lasttBytes, received.Sender);
}
}
else if (clientTask.Contains("Wait for data"))
{
this.Send(outString, received.Sender);
}
}
}
catch (Exception ex)
{
Console.WriteLine(ex.ToString());
}
}
Console.WriteLine("Stopped");
});
}
public bool IsAlive()
{
if (task != null)
return task.Status.Equals(TaskStatus.Running);
return false;
}
public void StopTalking()
{
stopTask = true;
Client.Dispose();
Client.Close();
}
}
internal class Program
{
static void Main(string[] args)
{
IPEndPoint ipPoint = new IPEndPoint(IPAddress.Any, 63213);
TalkPortUdp talk = new TalkPortUdp(ipPoint);
talk.StartTalking();
while (true)
{
// Load file meta data with FileInfo
FileInfo fileInfo = new FileInfo(#"D:\Downloads\test_img.png");
// The byte[] to save the data in
byte[] data = new byte[fileInfo.Length];
// Console.WriteLine(fileInfo.Length);
// Load a filestream and put its content into the byte[]
using (FileStream fs = fileInfo.OpenRead())
{
fs.Read(data, 0, data.Length);
}
talk.SetBytes(data);
Thread.Sleep(1000);
}
}
}
I've tried several ways to parse csv. I have a csv file. I want to obtain arrays out of the data.
Pandas equivalent
pd.read_csv('csv_file.csv').values # returns [100, 14] dim array
I've tried papa parse for parsing csv file.
let parsed_data = papa.parse(file,
{
header: true ,
newline: '\n',
dynamicTyping: true,
complete:function(results)
{
data = results.data;
}}
);
This returns a [100,1] dim array.
I tried tf.data.csv and it doesn't seem to work
async function parse_data(){
csvDataset = tf.data.csv(data_path,
{
hasHeader: true
}
);
console.log(csvDataset);
};
Console.log returns Object { size: null, input: {…}
I want to perform inference, something like this (Python equivalent)
model.predict(tf.tensor(pd.read_csv('csv').values))
tf.data.csv returns a tf.csv.Dataset which is an async iterator. The data can be retrieved to create a tensor. Similar question has been asked here
const csvUrl =
'https://storage.googleapis.com/tfjs-examples/multivariate-linear-regression/data/boston-housing-train.csv';
async function run() {
const csvDataset = tf.data.csv(
csvUrl, {
columnConfigs: {
medv: {
isLabel: true
}
}
});
const numOfFeatures = (await csvDataset.columnNames()).length - 1;
// Prepare the Dataset for training.
const flattenedDataset =
csvDataset
.map(({xs, ys}) =>
{
// Convert xs(features) and ys(labels) from object form (keyed by
// column name) to array form.
return {xs:Object.values(xs), ys:Object.values(ys)};
})
//.batch(10);
const it = await flattenedDataset.iterator()
const xs = []
const ys = []
// read only the data for the first 5 rows
// all the data need not to be read once
// since it will consume a lot of memory
for (let i = 0; i < 5; i++) {
let e = await it.next()
xs.push(e.value.xs)
ys.push(e.value.ys)
}
const features = tf.tensor(xs)
const labels = tf.tensor(ys)
console.log(features.shape)
console.log(labels.shape)
}
run();
Help needed convert c# code to python. Need help converting using statement from c# to python. For the following example Using statement in the function public Dictionary
public class Class1
{
public static string GetUrlStartingAtPath(string url)
{
Regex pathEx = new Regex(#"(?in)^https?:\/{0,3}[0-9.\-A-Za-z]+(:\d+)?(?<content>.+)$", RegexOptions.Compiled);
// parse out the path
return pathEx.Match(url).Groups[1].ToString();
}
public static long GetUnixEpochTime()
{
// UNIX epoch
var epoch = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc);
var milliseconds = (long)(DateTime.Now.ToUniversalTime() - epoch).TotalMilliseconds;
return milliseconds;
}
public Dictionary<string, string> GetCodes(string url)
{
const string SBO_KEY_1 = "1";
const string SBO_KEY_TEXT_1 = "PQ/OZW8SZCU/wUwm2u+Os6oyAmiFfif6QGVAhCLUahh36ui7BJfwymytCgULDZ6G111ud6SuySii544A6Uw+Tw==";
Dictionary<string, byte[]> sboKeys = new Dictionary<string, byte[]>();
var b64 = Convert.FromBase64String(SBO_KEY_TEXT_1);
sboKeys.Add(SBO_KEY_1, Convert.FromBase64String(SBO_KEY_TEXT_1));
string scheme = "1";
long unixEpochTime = GetUnixEpochTime();
var sboKey = sboKeys[scheme];
// store the headers we'll return
var headers = new Dictionary<string, string>();
// parse out the path
string urlFromPath = GetUrlStartingAtPath(Uri.UnescapeDataString(url));
// create base message
var baseMessage = String.Format(System.Globalization.CultureInfo.CurrentCulture, "{0}:{1}", unixEpochTime, urlFromPath);
// create signable message
var signable = ASCIIEncoding.ASCII.GetBytes(baseMessage);
// create crypto class
using (var hmacsha1 = new System.Security.Cryptography.HMACSHA1(sboKey))
{
// create hash
var hash = hmacsha1.ComputeHash(signable);
// add headers
headers.Add("SNL-Request-Time", unixEpochTime.ToString(System.Globalization.CultureInfo.CurrentCulture));
headers.Add("SNL-Request-Client", String.Format(System.Globalization.CultureInfo.CurrentCulture, "{0}:{1}", scheme, Convert.ToBase64String(hash)));
}
// done
return headers;
}
public void main()
{
string url = "http://localhost/SNL.Services.Data.Api.Service/v2/Internal/General/SecurityIndexs?$select=KeyIndex,IndexShortNameDisplay&$expand=PricingMRIndexs($select=IndexPriceChange,IndexPriceChangeActual,IndexValue,PricingAsOf),SecurityIndexValues($select=IndexValue,PricingAsOf;$filter=PricingAsOf+gt+2019-01-12;$expand=IndexValueChanges($select=IndexPriceChange,IndexPriceChangeActual;$filter=KeyPricePeriod+eq+1);$orderby=PricingAsOf)&$filter=KeyIndex+eq+1+or+KeyIndex+eq+2+or+KeyIndex+eq+4+or+KeyIndex+eq+196+or+KeyIndex+eq+339&$orderby=KeyIndex&cache=3600";
var Codes = GetCodes(url);
foreach (var k in Codes.Keys)
{
Console.WriteLine(k + " = " + Codes[k]);
}
}
}
I'm trying to make a python client communicate with an arduino server. The python client asks the server to take a measurement from the sonar, and then server just sends a confirmation message and then takes the message.
The client:
client.py
import socket
import time
while True:
try:
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(("192.168.0.250", 10220))
data = "GET\nSONAR\n\n"
print 'send to server: ' + data
client_socket.send(data)
receive = client_socket.recv(2048)
print receive
client_socket.close()
time.sleep(0.1)
except Exception as msg:
print msg
and the server:
server.ino
#include <avr/wdt.h>
#include <SPI.h>
#include <Ethernet.h>
#include <SoftwareSerial.h>
//localClient parameters, for sending data to the server.
byte mac[] = {0x90, 0xA2, 0xDA, 0x0F, 0x03, 0x58};
byte ip[] = {192,168,0,250};
byte server[] = {192, 168, 0, 100};
int serverPort = 8220;
//Server parameters, for acting like a server.
int pollPort = serverPort + 2000;
EthernetServer pollServer = EthernetServer(pollPort);
//char inString[32]; // string for incoming serial data
//sonar stuff
String content_string;
int NUM_SONARS = 1;
int sonarPin[] = {2, 3, 4, 5};
int triggerPin = 6;
int sonarThreshold = 12.0;
int sonarState[] = {0, 0, 0, 0};
long pulse;
int numPulses = 3;
int pulseArray[] = {0,0,0,0,0};
int filteredMode = 0;
float time;
void setup() {
Serial.begin(9600);
Ethernet.begin(mac, ip);
wdt_enable(WDTO_8S);
pollServer.begin();
for(int i = 0; i < NUM_SONARS; i++) {
pinMode(sonarPin[i], INPUT);
}
pinMode(triggerPin, OUTPUT);
digitalWrite(triggerPin, LOW);
time = 0;
}
void loop() {
wdt_reset();
time = millis();
EthernetClient pollClient = pollServer.available();
if (pollClient) {
boolean currentLineIsBlank = true;
String receivingString = "";
while (pollClient.connected()) {
//while the socket is open
if(pollClient.available()) {
//and there is something to read on the port, then read the available characters
char c = pollClient.read();
receivingString += c;
if (c == '\n' && currentLineIsBlank) {
Serial.print("String received -- ");
Serial.print(receivingString);
Serial.print("at ");
Serial.println(time);
pollClient.println("Received message.");
break;
}
if (c == '\n') {
// you're starting a new line
currentLineIsBlank = true;
}
else if (c != '\r') {
// you've gotten a character on the current line
currentLineIsBlank = false;
}
}
}
// give the web browser time to receive the data
delay(1);
// parse the incoming data
String command = split(receivingString,'\n',0);
String payload = split(receivingString,'\n',1);
String key = split(payload,'=',0);
String value = split(payload,'=',1);
//PARSE THE KEY AND VALUE NOW
if(command == "GET") {
//if I received a GET command, send a response to the client.
if(key == "SONAR") {
pingSonars();
}
}
}
String split(String data, char delimiter, int index) {
int found = 0;
int strIndex[] = {0, -1};
int maxIndex = data.length()-1;
for(int i=0; i<=maxIndex && found<=index; i++){
if(data.charAt(i)==delimiter || i==maxIndex){
found++;
strIndex[0] = strIndex[1]+1;
strIndex[1] = (i == maxIndex) ? i+1 : i;
}
}
return found>index ? data.substring(strIndex[0], strIndex[1]) : "";
}
void isort(int *a, int n) {
for (int i = 1; i < n; ++i) {
int j = a[i];
int k;
for (k = i - 1; (k >= 0) && (j < a[k]); k--) {
a[k + 1] = a[k];
}
a[k + 1] = j;
}
}
int mode(int *x,int n){
int i = 0;
int count = 0;
int maxCount = 0;
int mode = 0;
int bimodal;
int prevCount = 0;
while(i<(n-1)){
prevCount=count;
count=0;
while(x[i]==x[i+1]){
count++;
i++;
}
if(count>prevCount&count>maxCount){
mode=x[i];
maxCount=count;
bimodal=0;
}
if(count==0){
i++;
}
if(count==maxCount){//If the dataset has 2 or more modes.
bimodal=1;
}
if(mode==0||bimodal==1){//Return the median if there is no mode.
mode=x[(n/2)];
}
return mode;
}
}
void printArray(int *a, int n) {
for (int i = 0; i < n; i++)
{
Serial.print(a[i], DEC);
Serial.print(' ');
}
Serial.println();
}
void pingSonars() {
digitalWrite(6, HIGH);
for(int i = 0; i < NUM_SONARS; i++) {
for(int j = 0; j < numPulses; j++) {
pulse = pulseIn(sonarPin[i], HIGH);
pulseArray[j] = pulse/147; //convert to inches -- 147 uS per inches
delay(5);
}
isort(pulseArray, numPulses);
filteredMode = mode(pulseArray,numPulses);
//printArray(pulseArray,numPulses);
Serial.print("Filtered distance for Sonar ");
Serial.print(i);
Serial.print(": ");
Serial.println(filteredMode);
if((filteredMode < sonarThreshold) && !sonarState[i]) {
//if we are closer than the threshold and previously were not, this is a rising edge:
Serial.print("Sonar ");
Serial.print(i);
Serial.println(" triggered!");
sonarState[i] = 1;
}
else if (filteredMode > sonarThreshold && sonarState[i]) {
//if we are greater than the threshold and previously were, this is a falling edge:
Serial.print("Sonar ");
Serial.print(i);
Serial.println(" falling!");
sonarState[i] = 0;
}
}
}
The client sends requests at about a 100 millisecond interval, but the server seems to be able to respond much slower than that -- maybe at 2-3 times per second. What's limiting the rate of communication? Is it possible that the serial printouts are actually limiting it? Or does it have to do with how I'm opening/closing/listening to the port in the server code?
thanks for your help!