How to extract thermal frame from FLIR video export file? - python

I have a IR camera video file. I want to extract this video into n-frames. I followed normal opencv method to extract frames from video. like below,
import cv2
vidcap = cv2.VideoCapture('3.mp4')
success,image = vidcap.read()
count = 0
while success:
cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
success,image = vidcap.read()
print('Read a new frame: ', success)
count += 1
It extracts image as a normal image, instead of thermal image.
I found that using below code.
import flirimageextractor
from matplotlib import cm
from glob import glob
flir = flirimageextractor.FlirImageExtractor(palettes=[cm.jet, cm.bwr, cm.gist_ncar])
for file_ in glob("images/*.jpg"):
flir.process_image(file_)
flir.save_images()
flir.plot()
it throws KeyError: 'RawThermalImageType'
Full stack trace
Traceback (most recent call last):
File "thermal_camera.py", line 8, in
flir.process_image(file_)
File "/usr/local/lib/python3.5/dist-packages/flirimageextractor/flirimageextractor.py", line 101, in process_image
if self.get_image_type().upper().strip() == "TIFF":
File "/usr/local/lib/python3.5/dist-packages/flirimageextractor/flirimageextractor.py", line 133, in get_image_type
return json.loads(meta_json.decode())[0]["RawThermalImageType"]
KeyError: 'RawThermalImageType'
But the above code works well for sample thermal images. Which means i am not extracting frame from the video as a proper frame.
How to extract frame from FLIR video without losing thermal(raw) information?

Just export the movie from the Flir Research Software as a .wmv file then it will work just fine.
import cv2
vidcap = cv2.VideoCapture('3.wmv')
success,image = vidcap.read()
count = 0
while success:
cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
success,image = vidcap.read()
print('Read a new frame: ', success)
count += 1

Related

Read and extract keypoints from folder of videos opencv

I have a list of videos (10 sec each) in a folder and I'm trying to loop through each action video to extract keypoints and save them as json files.
path = "path to video folder"
for file in os.listdir(path):
cap = cv2.VideoCapture(path+file)
while cap.isOpened():
try:
ret, frame = cap.read()
I ran into a problem where the extracted data has some keypoints from other videos, and I just want to run this code, end with the stop time for the video is done, pause, start next video. How can I help correct this?
If you want to process multiple videos in turn you can check the ret (success) value of cap.read() to detect the end of each file. Here is a basic example you can start with:
import os
import cv2
path = "videos"
for file in os.listdir(path):
print(file)
cap = cv2.VideoCapture(path + '/' + file)
count = 0
while True:
ret, frame = cap.read()
# check for end of file
if not ret:
break
# process frame
count += 1
print(f"{count} frames read")
cap.release()

How can I capture detected image by Yolov4

I want to capture the box recognized while using YOLOv4 webcam recognition.
So i used this code.
import cv2
import detect as dt
from darknet import Darknet
from PIL import Image
vidcap = cv2.VideoCapture(0)
success, image = vidcap.read()
count = 0
m = Darknet('darknet/data/yolo-obj.cfg')
m.load_weights('darknet/backup/yolo-obj_30000.weights')
use_cuda = 1
m.cuda()
while success:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
im_pil = Image.fromarray(image)
im_pil = im_pil.resize((m.width, m.height))
boxes = dt.do_detect(m, im_pil, 0.5, 0.4, use_cuda)
result = open('Desktop/captureyolobox/capture%04d.jpg'%(count), 'w')
for i in range(len(boxes)):
result.write(boxes[i])
count = count + 1
success, image = vidcap.read()
result.close()
I've encountered this problem. I surfed the web to solve the problem, but I couldn't find it. Can you help me?
Traceback (most recent call last):
File "yoloshort.py", line 2, in <module>
import detect as dt
ImportError: No module named detect
Do you mean detect_image in darknet.py? You can check the darknet.py which have you want or not.

Is there any method to extract still frames from a live video

I am working on a sign language project I just wanted to know how can I extract still frames from a live video. Is there any built-in method or i have to code any logic for it.
OpenCV allows you receive a call back for each frame from the video source and you can analyse, modify and/or save that frame as you want.
A simple example using Python might be (based on tutorial here: https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html)
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
# Here we just save the frame to a file
cv2.imwrite('frame_'+str(i)+'.jpg',frame)
i+=1
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
For a more complex example, this time in Android, the following code will save the frame to an image file if it detects a particular color (based on the OpenCV Color Blob detection example:
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
if (mIsColorSelected) {
mDetector.process(mRgba);
List<MatOfPoint> contours = mDetector.getContours();
Log.e(TAG, "Contours count: " + contours.size());
Imgproc.drawContours(mRgba, contours, -1, CONTOUR_COLOR);
Mat colorLabel = mRgba.submat(4, 68, 4, 68);
colorLabel.setTo(mBlobColorRgba);
Mat spectrumLabel = mRgba.submat(4, 4 + mSpectrum.rows(), 70, 70 + mSpectrum.cols());
mSpectrum.copyTo(spectrumLabel);
File path = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_PICTURES);
String testFilename = "TestOCVwrite.png";
File testFile = new File(path, testFilename);
String testFullfileName = testFile.toString();
Log.i(TAG, "Writing to filename: " + testFilename);
Boolean writeResult = Imgcodecs.imwrite(testFullfileName, mRgba);
if (!writeResult) {
Log.i(TAG, "Failed to write to filename: " + testFilename);
}
}
return mRgba;
}

Get images of a streaming video url with Python

I have this url
https://www.earthcam.com/js/video/embed.php?type=h264&vid=AbbeyRoadHD1.flv
And I want get frames from the streaming and save them in Python. Is this possible? I looked into the streamlink library, but I'm not sure if it will work.
Sorry for my bad English, thanks.
streams = streamlink.streams("https://www.earthcam.com/js/video/embed.php?type=h264&vid=AbbeyRoadHD1.flv")
Copied from this question: Python - Extracting and Saving Video Frames
Try this:
import cv2
vidcap = cv2.VideoCapture('https://www.earthcam.com/js/video/embed.php?type=h264&vid=AbbeyRoadHD1.flv')
success,image = vidcap.read()
count = 0
while success:
cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
success,image = vidcap.read()
print('Read a new frame: ', success)
count += 1

How to print 1 in every thousand frames using opencv and python?

I am trying to save one frame in every thousand frames of a video. Below is the code I am currently using:
import cv2
import numpy as np
import os
# Playing video from file:
cap = cv2.VideoCapture('D:/01 Projects/AMAZON CATALYST PROJECT/Surgery1.mpg')
try:
if not os.path.exists('D:/01 Projects/AMAZON CATALYST PROJECT/data_surg1'):
os.makedirs('D:/01 Projects/AMAZON CATALYST PROJECT/data_surg1')
except OSError:
print ('Error: Creating directory of data_surg1')
currentFrame = 0
while(True):
# Capture frame-by-frame
if currentFrame > 0:
cap.set(cv2.CAP_PROP_POS_MSEC,currentFrame)
ret, frame = cap.read()
# Saves image of the current frame in jpg file
name = 'D:/01 Projects/AMAZON CATALYST PROJECT/data_surg1/frame' + str(currentFrame/1000) + '.jpg'
print ('Creating...' + name)
cv2.imwrite(name, frame)
# To stop duplicate images
currentFrame += 1000
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
However, I am not sure if this is saving it correctly. When I look at the frames in the file explorer, the numbers are initially very high and then reduce to form a sequential frame number compared to the previous image. I am using Python 2.7 and OpenCV3.3.
For storing from certain frames instead of a time-based save, the following script works:
import cv2
import numpy as np
import os
# Playing video from file:
cap = cv2.VideoCapture('D:/01 Projects/AMAZON CATALYST PROJECT/Surgery1.mpg')
try:
if not os.path.exists('D:/01 Projects/AMAZON CATALYST PROJECT/data_surg1'):
os.makedirs('D:/01 Projects/AMAZON CATALYST PROJECT/data_surg1')
except OSError:
print ('Error: Creating directory of data_surg1')
currentFrame = 0
while(True):
# Capture frame-by-frame
if currentFrame > 0:
cap.set(cv2.CAP_PROP_POS_FRAMES,currentFrame)
ret, frame = cap.read()
# Saves image of the current frame in jpg file
name = 'D:/01 Projects/AMAZON CATALYST PROJECT/data_surg1/frame' + str(currentFrame/1000) + '.jpg'
print ('Creating...' + name)
cv2.imwrite(name, frame)
# To stop duplicate images
currentFrame += 1
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

Categories