Related
I am trying to read python script output continuously (In python I am tracking a colored ball and printing it's x, y and radius) from WPF C#.
I tried socket communication
using (var requester = new ZSocket(ZSocketType.REQ))
{
// Connect
requester.Connect("tcp://127.0.0.1:5555");
for (; ; )
{
string requestText = "Hello";
Console.Write("Sending {0}...", requestText);
// Send
requester.Send(new ZFrame(requestText));
// Receive
using (ZFrame reply = requester.ReceiveFrame())
{
Console.WriteLine(" Received: {0} {1}!", requestText, reply.ReadString());
}
}
}
I tried calling python from C# Process
private void Window_Loaded(object sender, RoutedEventArgs e)
{
string python = #"C:\Desktop\test\venv\Scripts\python.exe";
// python app to call
string myPythonApp = #"C:\Users\Desktop\test\main.py";
// Create new process start info
ProcessStartInfo myProcessStartInfo = new ProcessStartInfo(python);
// make sure we can read the output from stdout
myProcessStartInfo.UseShellExecute = false;
myProcessStartInfo.RedirectStandardOutput = true;
myProcessStartInfo.Arguments = myPythonApp;
Process myProcess = new Process();
// assign start information to the process
myProcess.StartInfo = myProcessStartInfo;
// start the process
myProcess.OutputDataReceived += MyProcess_OutputDataReceived;
myProcess.Start();
myProcess.BeginOutputReadLine();
// Read the standard output of the app we called.
// in order to avoid deadlock we will read output first
// and then wait for process terminate:
//StreamReader myStreamReader = myProcess.StandardOutput;
//string myString = myStreamReader.ReadLine();
//await myStreamReader.ReadAsync(result, 0, (int)myStreamReader.BaseStream.Length);
// wait exit signal from the app we called and then close it.
//myProcess.WaitForExit();
//myProcess.Close();
// write the output we got from python app
//Console.WriteLine("Value received from script: " + myString);
}
private void MyProcess_OutputDataReceived(object sender, DataReceivedEventArgs e)
{
Console.WriteLine("++++");
}
Python writes data to txt file, C# reads from that file
Console.WriteLine("hi");
for (; ; )
{
if (File.Exists(textFile))
{
using (StreamReader file = new StreamReader(textFile))
{
int counter = 0;
string ln;
while ((ln = file.ReadLine()) != null)
{
Console.WriteLine(ln);
counter++;
}
file.Close();
}
}
All the cases python is holding the thread and not allowing c# to access it. Now I am working 2 method i.e. getting data from Process, but MyProcess_OutputDataReceived is not triggering.
import sys
from collections import deque
from PIL import Image, ImageOps, ImageDraw
import numpy as np
import argparse
import imutils
import cv2
import time
import pandas as pd
import matplotlib.pyplot as plt
#import zmq
#context = zmq.Context()
#socket = context.socket(zmq.REP)
#socket.bind("tcp://*:5555")
#object tracking def start
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
if not args.get("video", False):
camera = cv2.VideoCapture(0)
else:
camera = cv2.VideoCapture(args["video"])
Data_Features = ['x', 'y', 'time']
Data_Points = pd.DataFrame(data=None, columns=Data_Features, dtype=float)
start = time.time()
#object tracking def end
#file1 = open("myfile.txt","w")
#L = ["This is Delhi \n","This is Paris \n","This is London \n"]
#file1.write("Hello \n")
#file1.writelines(L)
#file1.close()
while True:
# Wait for next request from client
#message = socket.recv()
#print("Received request: %s" % message)
# Do some 'work'
#time.sleep(1)
# Send reply back to client
#socket.send("World")
#object track vids start
(grabbed, frame) = camera.read()
current_time = time.time() - start
if args.get("video") and not grabbed:
break
frame = imutils.resize(frame, width=1600)
# frame2 = imutils.resize
#blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if (radius < 300) & (radius > 10):
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
sys.stdout.write(str(x) + '\n')
#print(str(x))
#file1 = open("myfile.txt", "w")
#file1.write(str(x))
#file1.close()
#time.sleep(1)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# Save The Data Points
Data_Points.loc[Data_Points.size / 3] = [x, y, current_time]
pts.appendleft(center)
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
#object tracking vids end
h = 0.2
X0 = -3
Y0 = 20
time0 = 0
theta0 = 0.3
# Applying the correction terms to obtain actual experimental data
Data_Points['x'] = Data_Points['x'] - X0
Data_Points['y'] = Data_Points['y'] - Y0
Data_Points['time'] = Data_Points['time'] - time0
# Calulataion of theta value
Data_Points['theta'] = 2 * np.arctan(
Data_Points['y'] * 0.0000762 / h) # the factor correspons to pixel length in real life
Data_Points['theta'] = Data_Points['theta'] - theta0
# Creating the 'Theta' vs 'Time' plot
plt.plot(Data_Points['theta'], Data_Points['time'])
plt.xlabel('Theta')
plt.ylabel('Time')
# Export The Data Points As cvs File and plot
Data_Points.to_csv('Data_Set.csv', sep=",")
plt.savefig('Time_vs_Theta_Graph.svg', transparent=True)
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
Please help me.
Thank you in advance.
Ranjith
I've tried to follow several solutions on the internet also this one, but no luck. I'm in the process of implementing a object detection system. I am on Windows 10, and using PyCharm using python-3.8
I am getting errors for the packages, I've tried to add them through the package installer and through the terminal, no luck. Here's the error:
Traceback (most recent call last):
File "D:/CabaleGame/runner.py", line 2, in <module>
import processCards
File "D:\CabaleGame\processCards.py", line 1, in <module>
import gtk.gdk
ModuleNotFoundError: No module named 'gtk'
Process finished with exit code 1
I've downloaded the package from here and here, which one is correct?
My program file:
import gtk.gdk
import cv
import time
import os
import string
def takeScreenCapture(screenShotNum = ""):
time.sleep(1)
w = gtk.gdk.get_default_root_window()
sz = w.get_size()
#print "The size of the window is %d x %d" % sz
pb = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,sz[0],sz[1])
pb = pb.get_from_drawable(w,w.get_colormap(),0,0,0,0,sz[0],sz[1])
# Convert gtk.PixelBuf to a NumPy array
array = pb.get_pixels_array()
# Convert NumPy array to CvMat
mat = cv.fromarray(array)
# Convert RGB to BGR
cv.CvtColor(mat, mat, cv.CV_RGB2BGR)
#cv.ShowImage("win",mat)
#cv.WaitKey(0)
return mat
def getMeaningFromCards(cards):
"""
This takes a dictionary of the form:
(x, y) : Card image
and returns a dictionary of the form:
(x, y) : (number, suit)
(x, y) are the coordinates of the top left of the card
"""
imgdir = "LibraryImages"
templatesNums = os.listdir(os.path.join(imgdir,"Numbers"))
templatesSuits = os.listdir(os.path.join(imgdir,"Suits"))
#templates = filter(lambda s: s[-4:] == ".png", templates)
templatesNums = map(lambda s: os.path.join(imgdir,"Numbers", s), templatesNums)
templatesSuits = map(lambda s: os.path.join(imgdir, "Suits", s), templatesSuits)
for k in cards.keys():
card = cards[k]
cardImg = cv.CreateImageHeader((card.width, card.height), 8, 3)
cv.SetData(cardImg, card.tostring())
numAndSuit3 = cv.GetSubRect(cardImg, (0,0,30,80))
numAndSuit1 = cv.CreateImage((numAndSuit3.width, numAndSuit3.height), 8, 1)
cv.CvtColor(numAndSuit3, numAndSuit1, cv.CV_RGB2GRAY)
# Convert the 1 channel grayscale to 3 channel grayscale
# (GRAY2RGB doesn't actually introduce color)
cv.CvtColor(numAndSuit1, numAndSuit3, cv.CV_GRAY2RGB)
num = findBestTemplateMatch(templatesNums, numAndSuit3)
suit = findBestTemplateMatch(templatesSuits, numAndSuit3)
#print num, suit
# If this image was recognized as a card, but didn't match
# any template, it shouldn't be in the list in the first place
if num == None or suit == None:
del cards[k]
continue
num = string.split(os.path.basename(num), '.')[0]
suit = string.split(os.path.basename(suit), '.')[0]
# The alternate file names have underscores
# after their names
if num[-1] == '_':
num = num[:-1]
if suit[-1] == '_':
suit = suit[:-1]
cards[k] = (num, suit)
#cv.ShowImage("NumandSuit", numAndSuit)
#cv.WaitKey(0)
print
cards
return cards
def findBestTemplateMatch(tplList, img):
"""
Compares img against a list of templates.
tplList is a list of string filenames of template images
Returns a tuple (num, suit) if a template is suitably matched
or None if not
"""
minTpl = 200 # arbitrarily large number
tString = None
for t in tplList:
tpl = cv.LoadImage(t)
w = img.width - tpl.width + 1
h = img.height - tpl.height + 1
result = cv.CreateImage((w,h), 32, 1)
cv.MatchTemplate(img, tpl, result, cv.CV_TM_SQDIFF_NORMED)
(minVal, maxVal, minLoc, maxLoc) = cv.MinMaxLoc(result)
#print t
#print (minVal, maxVal, minLoc, maxLoc)
# 0.2 found by experiment (the non-card images end up being around
# 0.25 - 0.28, and all the card images were much around 0.08 and less
if minVal < minTpl and minVal < 0.2:
minTpl = minVal
tString = t
#print minTpl, tString
#cv.ShowImage("win", img)
#cv.ShowImage("win2", result)
#cv.WaitKey(0)
return tString
def extractCards(fileName = None):
"""
Given an image, this will extract the cards from it.
This takes a filename as an optional argument
This filename should be the name of an image file.
This returns a dictionary of the form:
(x, y) : Card image
It is likely that the output from this will go to the
getMeaningFromCards() function.
"""
if fileName == None:
mat = takeScreenCapture()
else:
mat = cv.LoadImage(fileName)
# First crop the image: but only crop out the bottom.
# It is useful to have all dimensions accurate to the screen
# because otherwise they will throw off the mouse moving and clicking.
# Cropping out the bottom does not change anything in terms of the mouse.
unnec_top_distance = 130
unnec_bottom_distance = 40
margin = 50
submat = cv.GetSubRect(mat, (0,0,mat.width, mat.height - unnec_bottom_distance))
subImg = cv.CreateImageHeader((submat.width, submat.height), 8, 3)
cv.SetData(subImg, submat.tostring())
gray = cv.CreateImage((submat.width, submat.height), 8, 1)
cv.CvtColor(submat, gray, cv.CV_RGB2GRAY)
thresh = 250
max_value = 255
cv.Threshold(gray, gray, thresh, max_value, cv.CV_THRESH_BINARY)
cv.Not(gray,gray)
#cv.ShowImage("sub", submat)
#cv.WaitKey(0)
storage = cv.CreateMemStorage (0)
cpy = cv.CloneImage(gray)
contours = cv.FindContours( cpy, storage, cv.CV_RETR_LIST, cv.CV_CHAIN_APPROX_SIMPLE, (0,0) );
#contours = cv.ApproxPoly(contours, cv.CreateMemStorage(), cv.CV_POLY_APPROX_DP, 3, 1)
bboxes = []
if contours:
while(contours):
area = cv.ContourArea(contours)
# It turns out that all the cards are about 44000 in area...
# It would definitely be nice to have a better way to do this:
# ie, find the size of the card programmatically and use it then
if(area > 44000 and area < submat.width*submat.height*2/3):
bb = cv.BoundingRect(contours)
bboxes.append(bb)
contours = contours.h_next()
#drawBoundingBoxes(bboxes, submat)
# cards is a dictionary of the form:
# (x, y) : card
cards = {}
for box in bboxes:
card = cv.GetSubRect(subImg, box)
#cv.ShowImage("card", card)
#cv.WaitKey(0)
cards[(box[0], box[1])] = card
return cards
def drawBoundingBoxes(bb, img):
for b in bb:
x = b[0]
y = b[1]
width = b[2]
height = b[3]
cv.Rectangle(img, (x,y), (x+width, y+height), (0,255,0,0))
cv.ShowI
mage("bb", img)
cv.WaitKey(0)
def drawSquares(listWithPoints,img):
for l in listWithPoints:
for p in range(len(l)-1):
cv.Line(img, l[p], l[p+1], (0,0,255,0),2)
cv.Line(img, l[-1], l[0], (0,0,255,0),2)
#cv.ShowImage("sub", img)
#cv.WaitKey(0)
def contourToPointList(contour):
plist = []
for (x,y) in contour:
plist.append((x,y))
return plist
if __name__ == '__main__':
cards = extractCards('CardImages/4_heart.jpg')
print
cards
#c = cards[cards.keys()[0]]
#print c
Is it possible to add it manually through the folders?
Would it work if i put the package her:
C:\Users\User1\AppData\Local\Programs\Python\Python38\include
From python documentation :
Replace import gtk.gdk by :
import gi
gi.require_version("Gtk", "insert your gtk version")
from gi.repository import Gtk
I'm trying to create trails and map of trails of all ID like this video: https://www.youtube.com/watch?v=tq0BgncuMhs
So far I haven't been able to, I'm currently using this repo from bendidi https://github.com/bendidi/Tracking-with-darkflow which I modifies to also show the trails.
I did try using cv2.line and extract it from track.to_tlbr() but right now the result look like this:
Here's the code that I modified to get the current result:
darkflow/darkflow/net/yolov2/predict.py
from collections import deque
import numpy as np
import math
import cv2
import os
import json
#from scipy.special import expit
#from utils.box import BoundBox, box_iou, prob_compare
#from utils.box import prob_compare2, box_intersection
from ...utils.box import BoundBox
from ...cython_utils.cy_yolo2_findboxes import box_constructor
ds = True
try :
from deep_sort.application_util import preprocessing as prep
from deep_sort.application_util import visualization
from deep_sort.deep_sort.detection import Detection
except :
ds = False
def expit(x):
return 1. / (1. + np.exp(-x))
def _softmax(x):
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out
def findboxes(self, net_out):
# meta
meta = self.meta
boxes = list()
boxes=box_constructor(meta,net_out)
return boxes
def extract_boxes(self,new_im):
cont = []
new_im=new_im.astype(np.uint8)
ret, thresh=cv2.threshold(new_im, 127, 255, 0)
p, contours, hierarchy=cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
cnt=contours[i]
x, y, w, h=cv2.boundingRect(cnt)
if w*h > 30**2 and ((w < new_im.shape[0] and h <= new_im.shape[1]) or (w <= new_im.shape[0] and h < new_im.shape[1])):
if self.FLAGS.tracker == "sort":
cont.append([x, y, x+w, y+h])
else : cont.append([x, y, w, h])
return cont
def postprocess(self,net_out, im,frame_id = 0,csv_file=None,csv=None,mask = None,encoder=None,tracker=None):
"""
Takes net output, draw net_out, save to disk
"""
boxes = self.findboxes(net_out)
# meta
meta = self.meta
nms_max_overlap = 0.1
threshold = meta['thresh']
colors = meta['colors']
labels = meta['labels']
if type(im) is not np.ndarray:
imgcv = cv2.imread(im)
else: imgcv = im
h, w, _ = imgcv.shape
thick = int((h + w) // 300)
resultsForJSON = []
if not self.FLAGS.track :
for b in boxes:
boxResults = self.process_box(b, h, w, threshold)
if boxResults is None:
continue
left, right, top, bot, mess, max_indx, confidence = boxResults
if self.FLAGS.json:
resultsForJSON.append({"label": mess, "confidence": float('%.2f' % confidence), "topleft": {"x": left, "y": top}, "bottomright": {"x": right, "y": bot}})
continue
if self.FLAGS.display or self.FLAGS.saveVideo:
cv2.rectangle(imgcv,
(left, top), (right, bot),
colors[max_indx], thick)
cv2.putText(imgcv, mess, (left, top - 12),
0, 1e-3 * h, colors[max_indx],thick//3)
else :
if not ds :
print("ERROR : deep sort or sort submodules not found for tracking please run :")
print("\tgit submodule update --init --recursive")
print("ENDING")
exit(1)
detections = []
scores = []
lines = deque(maxlen=64)
for b in boxes:
boxResults = self.process_box(b, h, w, threshold)
if boxResults is None:
continue
left, right, top, bot, mess, max_indx, confidence = boxResults
if mess not in self.FLAGS.trackObj :
continue
if self.FLAGS.tracker == "deep_sort":
detections.append(np.array([left,top,right-left,bot-top]).astype(np.float64))
scores.append(confidence)
elif self.FLAGS.tracker == "sort":
detections.append(np.array([left,top,right,bot]).astype(np.float64))
if len(detections) < 3 and self.FLAGS.BK_MOG:
detections = detections + extract_boxes(self,mask)
detections = np.array(detections)
if detections.shape[0] == 0 :
return imgcv
if self.FLAGS.tracker == "deep_sort":
scores = np.array(scores)
features = encoder(imgcv, detections.copy())
detections = [
Detection(bbox, score, feature) for bbox,score, feature in
zip(detections,scores, features)]
# Run non-maxima suppression.
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = prep.non_max_suppression(boxes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
tracker.predict()
tracker.update(detections)
trackers = tracker.tracks
elif self.FLAGS.tracker == "sort":
trackers = tracker.update(detections)
for track in trackers:
if self.FLAGS.tracker == "deep_sort":
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
center = (int(bbox[0]) + ((int(bbox[2]) - int(bbox[0])) // 2)), (int(bbox[1]) + ((int(bbox[3]) - int(bbox[1])) // 2)) # X + Width / 2, Y + Height / 2
lines.appendleft(center)
id_num = str(track.track_id)
elif self.FLAGS.tracker == "sort":
bbox = [int(track[0]),int(track[1]),int(track[2]),int(track[3])]
id_num = str(int(track[4]))
if self.FLAGS.csv:
csv.writerow([frame_id,id_num,int(bbox[0]),int(bbox[1]),int(bbox[2])-int(bbox[0]),int(bbox[3])-int(bbox[1])])
csv_file.flush()
if self.FLAGS.display or self.FLAGS.saveVideo:
cv2.rectangle(imgcv, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),
(255,255,255), thick//3)
cv2.putText(imgcv, id_num,(int(bbox[0]), int(bbox[1]) - 12),0, 1e-3 * h, (255,255,255),thick//6)
for i in range(1, len(lines)):
cv2.line(imgcv, lines[i - 1], lines[i], (255,255,255), thick//3)
return imgcv
Or just the code that I added:
...
lines = deque(maxlen=64)
...
center = (int(bbox[0]) + ((int(bbox[2]) - int(bbox[0])) // 2)), (int(bbox[1]) + ((int(bbox[3]) - int(bbox[1])) // 2)) # X + Width / 2, Y + Height / 2
lines.appendleft(center)
...
for i in range(1, len(lines)):
cv2.line(imgcv, lines[i - 1], lines[i], (255,255,255), thick//3)
Could someone help me out on this? Or should I do something with the data first instead of plug in straight into cv2.line? If you also have any suggestion for using external software rather than using Python, that's also welcome (I have frame_id, track_id, x, y, w, h data)
Just 4 little lines causing a problem with the alien rain program that I ported from the OpenGL Superbible. It seems I am having issues trying to write to memory after using the function glMapBufferRange
Update: Excellent code by Rabbid76 has solved the problem and provided valuable insight of explanation. Thank You.
Required files: ktxloader.py , aliens.ktx
Source code of alienrain.py
#!/usr/bin/python3
import sys
import time
sys.path.append("./shared")
#from sbmloader import SBMObject # location of sbm file format loader
from ktxloader import KTXObject # location of ktx file format loader
#from sbmath import m3dDegToRad, m3dRadToDeg, m3dTranslateMatrix44, m3dRotationMatrix44, m3dMultiply, m3dOrtho, m3dPerspective, rotation_matrix, translate, m3dScaleMatrix44
fullscreen = True
import numpy.matlib
import numpy as np
import math
try:
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.raw.GL.ARB.vertex_array_object import glGenVertexArrays, glBindVertexArray
except:
print ('''
ERROR: PyOpenGL not installed properly.
''')
sys.exit()
identityMatrix = [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1]
render_prog = GLuint(0)
render_vao = GLuint(0)
tex_alien_array = GLuint(0)
rain_buffer = GLuint(0)
droplet_x_offset = []
droplet_rot_speed = []
droplet_fall_speed = []
seed = 0x13371337
import random
import ctypes
random.seed (0x13371337)
def random_float():
# global seed
# res=0.0
# tmp=0
# seed *= 16807;
# tmp = seed ^ (seed >> 4) ^ (seed << 15);
# res = (tmp >> 9) | 0x3F800000;
# return (res - 1.0);
return (random.random() - 1.0)
class Scene:
def __init__(self, width, height):
global render_prog
global render_vao
global tex_alien_array
global rain_buffer
global droplet_x_offset, droplet_rot_speed, droplet_fall_speed
self.width = width
self.height = height
vs = GLuint(0)
fs = GLuint(0)
vs_source = '''
#version 410 core
layout (location = 0) in int alien_index;
out VS_OUT
{
flat int alien;
vec2 tc;
} vs_out;
struct droplet_t
{
float x_offset;
float y_offset;
float orientation;
float unused;
};
layout (std140) uniform droplets
{
droplet_t droplet[256];
};
void main(void)
{
const vec2[4] position = vec2[4](vec2(-0.5, -0.5),
vec2( 0.5, -0.5),
vec2(-0.5, 0.5),
vec2( 0.5, 0.5));
vs_out.tc = position[gl_VertexID].xy + vec2(0.5);
float co = cos(droplet[alien_index].orientation);
float so = sin(droplet[alien_index].orientation);
mat2 rot = mat2(vec2(co, so),
vec2(-so, co));
vec2 pos = 0.25 * rot * position[gl_VertexID];
gl_Position = vec4(pos.x + droplet[alien_index].x_offset,
pos.y + droplet[alien_index].y_offset,
0.5, 1.0);
vs_out.alien = alien_index % 64;
}
'''
fs_source = '''
#version 410 core
layout (location = 0) out vec4 color;
in VS_OUT
{
flat int alien;
vec2 tc;
} fs_in;
uniform sampler2DArray tex_aliens;
void main(void)
{
color = texture(tex_aliens, vec3(fs_in.tc, float(fs_in.alien)));
}
'''
vs = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vs, vs_source)
glCompileShader(vs)
glGetShaderInfoLog(vs)
fs = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fs, fs_source)
glCompileShader(fs)
glGetShaderInfoLog(vs)
render_prog = glCreateProgram()
glAttachShader(render_prog, vs)
glAttachShader(render_prog, fs)
glLinkProgram(render_prog)
glDeleteShader(vs)
glDeleteShader(fs)
glGetProgramInfoLog(render_prog)
glGenVertexArrays(1, render_vao)
glBindVertexArray(render_vao)
ktxobj = KTXObject()
tex_alien_array = ktxobj.ktx_load("aliens.ktx")
glBindTexture(GL_TEXTURE_2D_ARRAY, tex_alien_array)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
glGenBuffers(1, rain_buffer)
glBindBuffer(GL_UNIFORM_BUFFER, rain_buffer)
glBufferData(GL_UNIFORM_BUFFER, 256*4*4, None, GL_DYNAMIC_DRAW)
for i in range(0, 256):
droplet_x_offset.append(random_float() * 2.0 - 1.0)
droplet_rot_speed.append( (random_float() + 0.5) * (-3.0 if (i & 1) else 3.0) )
droplet_fall_speed.append ( random_float() + 0.2 )
glBindVertexArray(render_vao);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
def display(self):
global rain_buffer
green = [ 0.0, 0.1, 0.0, 0.0 ]
currentTime = time.time()
t = currentTime
glViewport(0, 0, self.width, self.height)
glClearBufferfv(GL_COLOR, 0, green)
glUseProgram(render_prog);
glBindBufferBase(GL_UNIFORM_BUFFER, 0, rain_buffer);
droplet = glMapBufferRange(GL_UNIFORM_BUFFER, 0, 256*4*4, GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT)
float_array = ((ctypes.c_float * 4) * 256).from_address(droplet)
for i in range(0, 256):
float_array[i][0] = droplet_x_offset[i] + 2
float_array[i][1] = 2.0-math.fmod((t + float(i)) * droplet_fall_speed[i], 4.31 ) * random_float()
float_array[i][2] = droplet_rot_speed[i] * t * random_float() * math.pi
float_array[i][3] = 0.0
glUnmapBuffer(GL_UNIFORM_BUFFER);
for alien_index in range(0, 256):
glVertexAttribI1i(0, alien_index);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glutSwapBuffers()
def reshape(self, width, height):
self.width = width
self.height = height
def keyboard(self, key, x, y ):
global fullscreen
global many_cubes
print ('key:' , key)
if key == b'\x1b': # ESC
sys.exit()
elif key == b'f' or key == b'F': #fullscreen toggle
if (fullscreen == True):
glutReshapeWindow(512, 512)
glutPositionWindow(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
else:
glutFullScreen()
fullscreen = True
print('done')
def init(self):
pass
def timer(self, blah):
glutPostRedisplay()
glutTimerFunc( int(1/60), self.timer, 0)
time.sleep(1/60.0)
if __name__ == '__main__':
start = time.time()
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(512, 512)
w1 = glutCreateWindow('OpenGL SuperBible - Alien Rain')
glutInitWindowPosition(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
many_cubes = False
#glutFullScreen()
scene = Scene(512,512)
glutReshapeFunc(scene.reshape)
glutDisplayFunc(scene.display)
glutKeyboardFunc(scene.keyboard)
glutIdleFunc(scene.display)
#glutTimerFunc( int(1/60), scene.timer, 0)
scene.init()
glutMainLoop()
Current output is:
Update: very similar to expected output. Except it very fast and the rotations of each alien are not as smooth as the expected output. If anyone wants to tinker with the values to get it, please do. Otherwise this question is answered.
The output should be:
Here's the alienrain.cpp that was used to create alienrain.py
First of all, the 2nd parameter of glBufferData is the buffer data in bytes.
Since the glsl data structure is
struct droplet_t
{
float x_offset;
float y_offset;
float orientation;
float unused;
};
layout (std140) uniform droplets
{
droplet_t droplet[256];
};
the buffer size is 4*4*256, because the size of a float is 4, t he structure has 4 elements of type flaot and the array has 256 elements
glBufferData(GL_UNIFORM_BUFFER, 256*4*4, None, GL_DYNAMIC_DRAW)
The instruction
droplet = glMapBufferRange(GL_UNIFORM_BUFFER, 0, 256*4*4, GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT)
returns a pointer to a allocated memory region with the proper size. You've to fill this memory with data.
The easiest solution is to use the (in python built-in) library ctypes, which has the handy function .from_address():
This method returns a ctypes type instance using the memory specified by address.
so the instruction
float_array = ((ctypes.c_float * 4) * 256).from_address(droplet)
"wraps" a 2 dimensional array to the memory addressed by droplet, with 256 elements and each element has 4 elements of type float.
The values can be set to the array, by a simple assignment statement. e.g.:
import random
import math
import ctypes
glBindBufferBase(GL_UNIFORM_BUFFER, 0, rain_buffer);
droplet = glMapBufferRange(GL_UNIFORM_BUFFER, 0, 256*4*4, GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT)
float_array = ((ctypes.c_float * 4) * 256).from_address(droplet)
for i in range(0, 256):
float_array[i][0] = random.random() * 2 -1
float_array[i][1] = random.random() * 2 -1
float_array[i][2] = random.random() * math.pi * 2
float_array[i][3] = 0.0
I'm experimenting with the fontforge Python library/wrapper thing and I don't seem to get multiple character substitution to work.
Maybe you gals and guys could help to point me at what I'm doing wrong?
Basically, I'm trying to replace "ABC" with the character representation of "a", but I'm going to extend that with proper subsitutions once this is working.
from random import randint
from datetime import datetime
def add_pixel(pen, x, y, scale = 100):
pen.moveTo(((x + 0) * scale, (y + 0) * scale))
pen.lineTo(((x + 0) * scale, (y + 1) * scale))
pen.lineTo(((x + 1) * scale, (y + 1) * scale))
pen.lineTo(((x + 1) * scale, (y + 0) * scale))
pen.closePath()
def add_character(font, code, name):
if not name in list(font):
font.createChar(code, name)
pen = font[code].glyphPen()
for i in range(1, 15):
add_pixel(pen, randint(0, 15), randint(0, 15), 100 * 10 / 15)
try:
import fontforge
except Exception, e:
raise
else:
font = fontforge.font()
font.familyname = "font"
font.fontname = "font x15"
font.fullname = "font x15"
font.version = datetime.now().strftime("%Y-%m-%d %H:%M")
# lower
for c in range(0x61, 0x61 + 26):
add_character(font, c, unichr(c))
# upper
for c in range(0x41, 0x41 + 26):
add_character(font, c, unichr(c))
font.addLookup("gsub", "gsub_multiple", (), (("dlig",(("latn",("dflt")),)),))
font.addLookupSubtable("gsub", "gsub_n")
glyph = font["a"]
glyph.addPosSub("gsub_n", ("A", "B", "C"))
# font.save("font.x15.sfd")
font.generate("font.x15.otf", flags=("PfEd-lookups", "opentype"))
finally:
pass
I think your lookup type doens't match the feature.
# Try "gsub_ligature" as type.
font.addLookup("gsub", "gsub_ligature", (), (("dlig",(("latn",("dflt")),)),))
Tip: You can inspect your features by generating a feature file:
font.generateFeatureFile("features.fea")