How correctly draw boxes in pyprocessing? - python
I'm trying to write a very basic box drawing program using pyprocessing,
but a condition to check if the mouse is within a box fails, when the logic looks ok:
#!/usr/bin/env python
from pyprocessing import *
S = 20
W = 5
H = 5
data = [[0] * W] * H
def setup():
size(W*(S+5),H*(S+5))
def draw():
background(0)
for y in xrange(H):
for x in xrange(W):
fill(data[x][y] * 255)
rect(x*S,y*S,S,S)
def mouseDragged():
for y in xrange(H):
for x in xrange(W):
xs = x * S
ys = y * S
# this doesn't behave as expected: it should draw a single box if the condition is met, not the whole row
if (mouse.x >= xs) and (mouse.x <= (xs+S)) and (mouse.y >= ys and mouse.y <= (ys+S)):
if key.pressed:
data[x][y] = 0
else:
data[x][y] = 1
run()
I've tried the same approach using the Java version of Processing and it works as expected:
int S = 20;
int W = 5;
int H = 5;
int[][] data = new int[W][H];
void setup(){
size(100,100);
noStroke();
}
void draw(){
background(0);
for (int y = 0 ; y < H; y++){
for (int x = 0 ; x < W; x++){
fill(data[x][y] * 255);
rect(x*S,y*S,S,S);
}
}
}
void mouseDragged(){
for (int y = 0 ; y < H; y++){
for (int x = 0 ; x < W; x++){
int xs = x * S;
int ys = y * S;
if ((mouseX > xs) && (mouseX < (xs+S)) && (mouseY >= ys && mouseY <= (ys+S))){
data[x][y] = 1;
}
}
}
}
Similar behaviour in JS:
var S = 20;
var W = 5;
var H = 5;
var data = new Array(W);
function setup(){
createCanvas(100,100);
noStroke();
for (var i = 0 ; i < H; i++) data[i] = [0,0,0,0,0];
}
function draw(){
background(0);
for (var y = 0 ; y < H; y++){
for (var x = 0 ; x < W; x++){
fill(data[x][y] * 255);
rect(x*S,y*S,S,S);
}
}
}
function mouseDragged(){
for (var y = 0 ; y < H; y++){
for (var x = 0 ; x < W; x++){
var xs = x * S;
var ys = y * S;
if ((mouseX > xs) && (mouseX < (xs+S)) && (mouseY >= ys && mouseY <= (ys+S))){
data[x][y] = 1;
}
}
}
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/0.4.23/p5.min.js"></script>
Am I writing the box bounds condition correctly in Python ? If so, is there a bug with pyprocessing ? How can I get past it ?
I'm using pyprocessing.version '0.1.3.22'
Trying to be lazy is the issue:
data = [[0] * W] * H
This doesn't simply create a nested array, it copies the references of the first array ([0]), so when I modify one value in one row, the whole row is modified.
Since I'm super experienced with python, I've initialised the array in a probably a non-pythonic way:
data = []
for y in xrange(H):
data.append([])
for x in xrange(W):
data[y].append(0)
So the full working code is:
#!/usr/bin/env python
from pyprocessing import *
S = 20
W = 5
H = 5
# data = [[0] * W] * H #trouble
data = []
for y in xrange(H):
data.append([])
for x in xrange(W):
data[y].append(0)
def setup():
size(W*(S),H*(S))
def draw():
background(0)
for y in xrange(H):
for x in xrange(W):
fill(data[x][y] * 255)
rect(x*S,y*S,S,S)
def mouseDragged():
for y in xrange(H):
for x in xrange(W):
xs = x * S
ys = y * S
if (mouse.x >= xs) and (mouse.x <= (xs+S)) and (mouse.y >= ys and mouse.y <= (ys+S)):
if key.pressed:
data[x][y] = 0
else:
data[x][y] = 1
run()
Related
How can I add a colour gradient feature to my bresenham line drawing algorithm?
I've found this thread on StackOverflow but my python understanding isn't that good to properly translate it to C, I'm trying to add that gradient feature to this line drawing algorithm: #define sign(x) ((x > 0)? 1 : ((x < 0)? -1: 0)) x = x1; y = y1; dx = abs(x2 - x1); dy = abs(y2 - y1); s1 = sign(x2 - x1); s2 = sign(y2 - y1); swap = 0; if (dy > dx) { temp = dx; dx = dy; dy = temp; swap = 1; } D = 2*dy - dx; for (i = 0; i < dx; i++) { display_pixel (x, y); while (D >= 0) { D = D - 2*dx; if (swap) x += s1; else y += s2; } D = D + 2*dy; if (swap) y += s2; else x += s1; } I feel bad for asking such a trivial task but I really can't understand what is going on on the python side nor how the colours are represented (mine are int(0xttrrggbb))
Figured it out: #define GAMMA 0.43 //Returns a linear value in the range [0,1] //for sRGB input in [0,255]. double ChannelInvCompanding(int c) { double y; c = c & 0xFF; y = (double) c; y = y / 255.0; if (c <= 0.04045) y = y / 12.92; else y = pow(((y + 0.055) / 1.055), 2.4); return (y); } //Convert color from 0..255 to 0..1 //Inverse Srgb Companding for //Red, Green, and Blue double *InverseSrgbCompanding(int c) { double *r = malloc(4 * sizeof(double)); r[0] = (double) get_t(c); r[1] = ChannelInvCompanding(get_r(c)); r[2] = ChannelInvCompanding(get_g(c)); r[3] = ChannelInvCompanding(get_b(c)); return (r); } //Apply companding to Red, Green, and Blue double ChannelCompanding(double c) { double x; if (c <= 0.0031308) x = 12.92 * c; else x = (1.055 * pow(c, (1/2.4))) - 0.055; return (x); } //return new color. Convert 0..1 back into 0..255 //Srgb Companding for Red, Green, and Blue int SrgbCompanding(double *c) { int t; int r; int g; int b; t = (int)c[0]; r = (int)(ChannelCompanding(c[1]) * 255); g = (int)(ChannelCompanding(c[2]) * 255); b = (int)(ChannelCompanding(c[3]) * 255); free(c); return (create_trgb(t, r, g, b)); } //sums channels //does not include transperancy double sumChannels(double *c) { double x = c[1] + c[2] + c[3]; return (x); } //Lerping see //https://en.wikipedia.org/wiki/Linear_interpolation //#Programming_language_support double lerp_int(double c1, double c2, double t) { return (c1 * (1 - t) + c2 * t); //return ((1 - t) * c1 + t * c2); } double *lerp(double *c1, double *c2, double t) { double *r = malloc(4 * sizeof(double)); //r[1] = ((1 - t) * c1[1] + t * c2[1]); //r[2] = ((1 - t) * c1[2] + t * c2[2]); //r[3] = ((1 - t) * c1[3] + t * c2[3]); r[1] = (c1[1] * (1 - t)) + c2[1] * t; r[2] = (c1[2] * (1 - t)) + c2[2] * t; r[3] = (c1[3] * (1 - t)) + c2[3] * t; return (r); } typedef struct s_bresvars { int x; int y; int dx; int dy; int s1; int s2; int swap; int temp; int d; int i; } t_bresvars; int sign(int x) { if (x > 0) return (1); else if (x < 0) return (-1); else return (0); } void bresenhams_alg(int x1, int y1, int x2, int y2, int scolor, int ecolor, t_vars *vars) { double step; double *color; double intensity; double total; int temp; int d; int clr; double *color1_lin = InverseSrgbCompanding(scolor); double bright1 = pow(sumChannels(c.color1_lin), GAMMA); double *color2_lin = InverseSrgbCompanding(ecolor); double bright2 = pow(sumChannels(c.color2_lin), GAMMA); int x = x1; int y = y1; int dx = abs(x2 - x1); int dy = abs(y2 - y1); int s1 = sign(x2 - x1); int s2 = sign(y2 - y1); int swap = 0; int i = 0; double step_c = 0; if (dy > dx) { temp = dx; dx = dy; dy = temp; swap = 1; } d = 2*dy - dx; step = (1.0 / dx); while (i < dx) { step_c += step; intensity = pow(lerp_int(bright1, bright2, step), (1 / GAMMA)); color = lerp(color1_lin, color2_lin, step); total = sumChannels(color); if (total != 0) c[1] = (c[1] * intensity / total); c[2] = (c[2] * intensity / total); c[3] = (c[3] * intensity / total); clr = SrgbCompanding(color); pixel_put(x, y, clr); while (v.d >= 0) { v.d = v.d - 2 * v.dx; if (v.swap) v.x += v.s1; else v.y += v.s2; } v.d = v.d + 2 * v.dy; if (v.swap) v.y += v.s2; else v.x += v.s1; v.i++; } free(color1_lin); free(color2_lin); }
Creating a snowflake in Python
I am trying to create a program in Python that creates a snowflake based on the input of a number. Below is my code: n = int(input()) a = [["."] * n] * n temp = n/2 start_point = 0 mid_point = int(temp) end_point = n - 1 for i in range(n): if i > mid_point + 1: start_point -= 1 end_point += 1 for j in range(n): if (j == start_point) or (j == mid_point) or (j == end_point) or (i == mid_point): a[i][j] = "*" else: a[i][j] = "." if i < mid_point - 1: start_point += 1 end_point -= 1 for row in a: print(' '.join([str(elem) for elem in row])) For example, if the input is '5' the output should look like: * . * . * . * * * . * * * * * . * * * . * . * . * However, my output looks like: . * * * . . * * * . . * * * . . * * * . . * * * . I was sure that my code was correct so I rewrote it in Java as: public class Snowflake { public static void createSnowflake(int n) { String[][] array = new String[n][n]; float temp = (float) (n/2); System.out.println(temp); int start_point = 0; int mid_point = (int) (temp); System.out.println(mid_point); int end_point = n - 1; for(int i = 0; i < n; i++) { if(i > mid_point+1) { start_point--; end_point++; } for(int j = 0; j < n; j++) { if((j == start_point) || (j == mid_point) || (j == end_point) || (i == mid_point)) { array[i][j] = "*"; } else { array[i][j] = "."; } } if(i < mid_point-1) { start_point++; end_point--; } } for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { System.out.print(array[i][j]); } System.out.print("\n"); } } public static void main(String[] args) { createSnowflake(5); } } And it worked as expected. To my eyes the underlying logic is exactly the same, and yet the Java code works and the Python code doesn't. Could someone help me find where I've made a mistake in the Python syntax or how my Java code somehow differs from it?
If you change the creation of a to: a= [["." for j in range(n)] for i in range(n)] it should fix it. This has to do with the way python copies lists. Check the question linked on the comments to your question. Enjoyed this question, I feel like it could only be here during this time of the year.
How to sort points along a Hilbert curve without using Hilbert indices?
I'm trying to implement the algorithm described in the paper Fast Hilbert Sort Algorithm Without Using Hilbert Indices (https://www.researchgate.net/profile/Takeshi_Shinohara/publication/313074453_Fast_Hilbert_Sort_Algorithm_Without_Using_Hilbert_Indices/links/5b8468bd299bf1d5a72b9a0c/Fast-Hilbert-Sort-Algorithm-Without-Using-Hilbert-Indices.pdf?origin=publication_detail), but I can't get the right results. Below is my python code (For bitset and it's member functions flip and test in C++ , please refer to https://en.cppreference.com/w/cpp/utility/bitset): N=9 # 9 points n=2 # 2 dimension m=3 # order of Hilbert curve b=m-1 def BitTest(x,od,maxlen=3): bit=format(x,'b').zfill(maxlen) return int(bit[maxlen-1-od]) def BitFlip(b,pos,): b ^= 1 << pos return b def partition(A,st,en,od,ax,di): i = st j = en while True: while i < j and BitTest(A[i][ax],od)==di: i = i + 1 while i < j and BitTest(A[j][ax],od)!=di: j = j - 1 if i >= j: return i A[i], A[j] = A[j], A[i] def HSort(A,st,en,od,c,e,d,di,cnt): if en<=st: return p =partition(A,st,en,od,(d+c)%n,BitTest(e,(d+c)%n)) if c==n-1: if b==0: return d2= (d+n+n-(di if(di==2) else cnt+2))%n e=BitFlip(e,d2) e=BitFlip(e,(d+c)%n) HSort(A,st,p-1,b-1,0,e,d2,False,0) e=BitFlip(e,(d+c)%n) e=BitFlip(e,d2) d2= (d+n+n-(di if(di==cnt+2) else 2))%n HSort(A,p+1,en,b-1,0,e,d2,False,0) else: HSort(A,st,p-1,b,c+1,e,d,False,(di if(di==1) else cnt+1)) e=BitFlip(e,(d+c)%n) e=BitFlip(e,(d+c+1)%n) HSort(A,p+1,en,b,c+1,e,d,True,(di if(di==cnt+1) else 1)) e=BitFlip(e,(d+c+1)%n) e=BitFlip(e,(d+c)%n) array = [[2,2],[2,4],[3,4],[2,5],[3,5],[1,6],[3,6],[5,6],[3,7]] HSort(array,st=0,en=N-1,od=m-1,c=0,e=0,d=0,di=False,cnt=0) print(array)
That document has a typo, the constant "b" should be replaced with "od". Here is a working code in c++: #include <iostream> #include <vector> #include <array> constexpr std::int32_t m = 3; constexpr std::int32_t n = 2; bool test_bit(std::int32_t value, std::int32_t pos) { const auto result = value & (1 << pos); return result; } void flip_bit(std::int32_t &value, std::int32_t pos) { value ^= 1 << pos; } std::int32_t partition(std::vector<std::array<std::int32_t, 2>> &A, std::size_t st, std::size_t en, std::int32_t od, std::int32_t ax, bool di) { std::int32_t i = st - 1; std::int32_t j = en + 1; while(true) { do i = i + 1; while(i < j && test_bit(A[i][ax], od) == di); do j = j - 1; while(i < j && test_bit(A[j][ax], od) != di); if(j <= i) return i; //partition is complete std::swap(A[i], A[j]); } } void hilbert_sort(std::vector<std::array<std::int32_t, 2>> &A, std::size_t st, std::size_t en, std::int32_t od, std::int32_t c, std::int32_t &e, std::int32_t d, bool di, std::int32_t cnt) { std::int32_t p; std::int32_t d2; if(en <= st) return; p = partition(A, st, en, od, (d + c) % n, test_bit(e, (d + c) % n)); if(c == n - 1) { if(od == 0) return; d2 = (d + n + n - (di ? 2 : cnt + 2)) % n; flip_bit(e, d2); flip_bit(e, (d + c) % n); hilbert_sort(A, st, p - 1, od - 1, 0, e, d2, false, 0); flip_bit(e, (d + c) % n); flip_bit(e, d2); d2 = (d + n + n - (di ? cnt + 2 : 2)) % n; hilbert_sort(A, p, en, od - 1, 0, e, d2, false, 0); } else { hilbert_sort(A, st, p - 1, od, c + 1, e, d, false, di ? 1 : cnt + 1); flip_bit(e, (d + c) % n); flip_bit(e, (d + c + 1) % n); hilbert_sort(A, p, en, od, c + 1, e, d, true, di ? cnt + 1 : 1); flip_bit(e, (d + c + 1) % n); flip_bit(e, (d + c) % n); } } int main() { std::vector<std::array<std::int32_t, 2>> points = {{2,2},{2,4},{3,4},{2,5},{3,5},{1,6},{3,6},{5,6},{3,7}}; std::int32_t e = 0; hilbert_sort(points, 0, points.size() - 1, m - 1, 0, e, 0, false , 0); for(const auto &point : points) std::clog << "(" << point[0] << ", " << point[1] << ")\n"; return 0; } You also seems to have a typo "p+1" it should be just "p". Here is a working python code: N=9 # 9 points n=2 # 2 dimension m=3 # order of Hilbert curve def BitTest(x,od): result = x & (1 << od) return int(bool(result)) def BitFlip(b,pos): b ^= 1 << pos return b def partition(A,st,en,od,ax,di): i = st j = en while True: while i < j and BitTest(A[i][ax],od) == di: i = i + 1 while i < j and BitTest(A[j][ax],od) != di: j = j - 1 if j <= i: return i A[i], A[j] = A[j], A[i] def HSort(A,st,en,od,c,e,d,di,cnt): if en<=st: return p = partition(A,st,en,od,(d+c)%n,BitTest(e,(d+c)%n)) if c==n-1: if od==0: return d2= (d+n+n-(2 if di else cnt + 2)) % n e=BitFlip(e,d2) e=BitFlip(e,(d+c)%n) HSort(A,st,p-1,od-1,0,e,d2,False,0) e=BitFlip(e,(d+c)%n) e=BitFlip(e,d2) d2= (d+n+n-(cnt + 2 if di else 2))%n HSort(A,p,en,od-1,0,e,d2,False,0) else: HSort(A,st,p-1,od,c+1,e,d,False,(1 if di else cnt+1)) e=BitFlip(e,(d+c)%n) e=BitFlip(e,(d+c+1)%n) HSort(A,p,en,od,c+1,e,d,True,(cnt+1 if di else 1)) e=BitFlip(e,(d+c+1)%n) e=BitFlip(e,(d+c)%n) array = [[2,2],[2,4],[3,4],[2,5],[3,5],[1,6],[3,6],[5,6],[3,7]] HSort(array,st=0,en=N-1,od=m-1,c=0,e=0,d=0,di=False,cnt=0) print(array)
Generating numbers from normal distribution in Python
I'm trying to test the speed of generating numbers from normal distribution by using Box–Muller transform against Marsaglia polar method. It is said that Marsaglia polar method is suppose to be faster than Box–Muller transform because it does not need to compute sin and cos. However, when I code this in Python, this is not true. Can someone verify this or explain to me why this is happening? def marsaglia_polar(): while True: x = (random.random() * 2) - 1 y = (random.random() * 2) - 1 s = x * x + y * y if s < 1: t = math.sqrt((-2) * math.log(s)/s) return x * t, y * t def box_muller(): u1 = random.random() u2 = random.random() t = math.sqrt((-2) * math.log(u1)) v = 2 * math.pi * u2 return t * math.cos(v), t * math.sin(v)
For "fun", I wrote it up in go. The box_muller function is faster there as well. Also, it's about 10 times faster than the python version. package main import ( "fmt" "math" "math/rand" "time" ) func main() { rand.Seed(time.Now().UnixNano()) now := time.Now() for i := 0; i < 1000000; i++ { marsaglia_polar() } fmt.Println("marsaglia_polar duration = ", time.Since(now)) now = time.Now() for i := 0; i < 1000000; i++ { box_muller() } fmt.Println("box_muller duration = ", time.Since(now)) } func marsaglia_polar() (float64, float64) { for { x := random() * 2 - 1; y := random() * 2 - 1; s := x * x + y * y; if s < 1 { t := math.Sqrt((-2) * math.Log(s)/s); return x * t, y * t } } } func box_muller() (float64, float64) { u1 := random() u2 := random() t := math.Sqrt((-2) * math.Log(u1)) v := 2 * math.Pi * u2 return t * math.Cos(v), t * math.Sin(v) } func random() float64 { return rand.Float64() } Output: marsaglia_polar duration = 104.308126ms box_muller duration = 88.365933ms
Cython: for i from 1 <= i < N
I'm learning Cython and came across this snippit of code: import numpy as np cimport numpy as np def mean(np.ndarray[np.double_t] input): cdef np.double_t cur # Py_ssize_t is numpy's index type cdef Py_ssize_t i cdef Py_ssize_t N = len(input) for i from 0 <= i < N: cur += input[i] return cur / N a=np.array([1,2,3,4], dtype=np.double) Obviously, this returns the mean of a which is 2.5. My question is this: Is the for loop a Python loop, Cython, or C?
Compile it and see: the C code that Cython produces is nicely annotated. /* "cyexample.pyx":11 * cdef Py_ssize_t N = len(input) * * for i from 0 <= i < N: # <<<<<<<<<<<<<< * cur += input[i] * */ __pyx_t_1 = __pyx_v_N; for (__pyx_v_i = 0; __pyx_v_i < __pyx_t_1; __pyx_v_i++) { /* "cyexample.pyx":12 * * for i from 0 <= i < N: * cur += input[i] # <<<<<<<<<<<<<< * * return cur / N */ __pyx_t_2 = __pyx_v_i; __pyx_t_3 = -1; if (__pyx_t_2 < 0) { __pyx_t_2 += __pyx_bshape_0_input; if (unlikely(__pyx_t_2 < 0)) __pyx_t_3 = 0; } else if (unlikely(__pyx_t_2 >= __pyx_bshape_0_input)) __pyx_t_3 = 0; if (unlikely(__pyx_t_3 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 12; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_cur = (__pyx_v_cur + (*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_double_t *, __pyx_bstruct_input.buf, __pyx_t_2, __pyx_bstride_0_input))); } And so the loop itself is successfully turned into C. Note that these days Cython can handle range naturally, so the older "from 0 <= i < N" style isn't necessary. The point of introducing the (non-Python) "for/from" syntax was to signify which loops should be C-ified.
for..from seems to be a Pyrex / Cython loop: http://docs.cython.org/src/userguide/language_basics.html#integer-for-loops