I'm running into difficulties trying to run this image segmentation code.
The idea I have is to take an image such as:
http://imgur.com/a/AmcKq
and extract all the black squigglies and save each individual squiggly as its own image.
It seems like the code is working, but it's not segmenting my images for some reason.
The error I am getting is: ('segments detected:', 0)
This is the code Im using:
import os, sys
import numpy as np
from scipy import ndimage as ndi
from scipy.misc import imsave
import matplotlib.pyplot as plt
from skimage.filters import sobel, threshold_local
from skimage.morphology import watershed
from skimage import io
def open_image(name):
filename = os.path.join(os.getcwd(), name)
return io.imread(filename, as_grey=True)
def adaptive_threshold(image):
print(type(image))
print(image)
block_size = 41
binary_adaptive = threshold_local(image, block_size, offset=10)
binary_adaptive = np.asarray(binary_adaptive, dtype=int)
return np.invert(binary_adaptive) * 1.
def segmentize(image):
# make segmentation using edge-detection and watershed
edges = sobel(image)
markers = np.zeros_like(image)
foreground, background = 1, 2
markers[image == 0] = background
markers[image == 1] = foreground
ws = watershed(edges, markers)
return ndi.label(ws == foreground)
def find_segment(segments, index):
segment = np.where(segments == index)
shape = segments.shape
minx, maxx = max(segment[0].min() - 1, 0), min(segment[0].max() + 1, shape[0])
miny, maxy = max(segment[1].min() - 1, 0), min(segment[1].max() + 1, shape[1])
im = segments[minx:maxx, miny:maxy] == index
return (np.sum(im), np.invert(im))
def run(f):
print('Processing:', f)
image = open_image(f)
processed = adaptive_threshold(image)
segments = segmentize(processed)
print('Segments detected:', segments[1])
seg = []
for s in range(1, segments[1]):
seg.append(find_segment(segments[0], s))
seg.sort(key=lambda s: -s[0])
for i in range(len(seg)):
imsave('segments/' + f + '_' + str(i) + '.png', seg[i][1])
folder = os.path.join(os.getcwd(), 'segments')
os.path.isfile(folder) and os.remove(folder)
os.path.isdir(folder) or os.mkdir(folder)
for f in sys.argv[1:]:
run(f)
I'll also mention I'm running this Python script from within Processing 3.3.5 using this as my sketch file:
import deadpixel.command.Command;
static final String BASH =
platform == WINDOWS? "cmd /C " :
platform == MACOSX? "open" : "xdg-open";
static final String CD = "cd ", PY_APP = "python ";
static final String AMP = " && ", SPC = " ";
static final String PY_DIR = "scripts/";
//static final String PY_FILE = PY_DIR + "abc.py";
static final String PY_FILE = PY_DIR + "segmenting.py";
static final String PICS_DIR = "images/";
static final String PICS_EXTS = "extensions=,png,jpg,jpeg,gif";
void setup() {
final String dp = dataPath(""), py = dataPath(PY_FILE);
final String prompt = BASH + CD + dp + AMP + PY_APP + py;
final String pd = dataPath(PICS_DIR);
final String pics = join(listPaths(pd, PICS_EXTS), SPC);
final Command cmd = new Command(prompt + SPC + pics);
println(cmd.command, ENTER);
println("Successs:", cmd.run(), ENTER);
printArray(cmd.getOutput());
exit();
}
And this in a new tab in processing:
https://github.com/GoToLoop/command/blob/patch-1/src/deadpixel/command/Command.java
A quick investigation reveals the problem: this function here
def adaptive_threshold(image):
print(type(image))
print(image)
block_size = 41
binary_adaptive = threshold_local(image, block_size, offset=10)
binary_adaptive = np.asarray(binary_adaptive, dtype=int)
return np.invert(binary_adaptive) * 1.
is supposed to create a mask of the image by adaptive thresholding - but this goes (very) wrong.
The main reason seems to be a misunderstanding of how threshold_local works: this code expects it to return a binarized segmented version of the input image, when in reality it returns a threshold image, see explanation here.
This is not the only problem, however. For images like the one in your example, offset=10 reduces the threshold produced by threshold_local way too far, so the entire image would be above the threshold.
Here's a working version of the function:
def adaptive_threshold(image):
# Create threshold image
# Offset is not desirable for these images
block_size = 41
threshold_img = threshold_local(image, block_size)
# Binarize the image with the threshold image
binary_adaptive = image < threshold_img
# Convert the mask (which has dtype bool) to dtype int
# This is required for the code in `segmentize` (below) to work
binary_adaptive = binary_adaptive.astype(int)
# Return the binarized image
return binary_adaptive
If the code is run with this function (with python; this problem has nothing to do with Processing, as far as I can tell), it returns Segments detected: 108 and it produces a nice segmentation:
plt.imshow(segments[0],interpolation='none')
plt.show()
Side note: based on how you phrased your question, am I correct to assume that you did not write this code yourself and that you perhaps have limited expertise in this field?
If so, you may be interested in learning a bit more about python-based image processing and segmentation. I recently ran a short course on this topic that includes a completely self-explanatory hands-on tutorial of a pipeline similar to the one you are using here. The materials are openly accessible, so feel free to have a look.
Edit:
As per your comment, here is a solution that should allow the program to run with full paths as input.
First, remove all this:
folder = os.path.join(os.getcwd(), 'segments')
os.path.isfile(folder) and os.remove(folder)
os.path.isdir(folder) or os.mkdir(folder)
so that only this remains:
for f in sys.argv[1:]:
run(f)
Next, replace this:
for i in range(len(seg)):
imsave('segments/' + f + '_' + str(i) + '.png', seg[i][1])
by this:
# Get the directory name (if a full path is given)
folder = os.path.dirname(f)
# Get the file name
filenm = os.path.basename(f)
# If it doesn't already exist, create a new dir "segments"
# to save the PNGs
segments_folder = os.path.join(folder,"segments")
os.path.isdir(segments_folder) or os.mkdir(segments_folder)
# Save the segments to the "segments" directory
for i in range(len(seg)):
imsave(os.path.join(segments_folder, filenm + '_' + str(i) + '.png'), seg[i][1])
This solution can handle both files-only input (e.g 'test.png') and path input (e.g. 'C:\Users\Me\etc\test.png').
Edit 2:
For transparency, scipy.misc.imsave allows an alpha layer if arrays are saved as RGBA (MxNx4), see here.
Replace this
imsave(os.path.join(segments_folder, filenm + '_' + str(i) + '.png'), seg[i][1])
by this
# Create an MxNx4 array (RGBA)
seg_rgba = np.zeros((seg[i][1].shape[0],seg[i][1].shape[1],4),dtype=np.bool)
# Fill R, G and B with copies of the image
for c in range(3):
seg_rgba[:,:,c] = seg[i][1]
# For A (alpha), use the invert of the image (so background is 0=transparent)
seg_rgba[:,:,3] = ~seg[i][1]
# Save image
imsave(os.path.join(segments_folder, filenm + '_' + str(i) + '.png'), seg_rgba)
Edit 3:
For saving into a different target folder with individual subfolders for each segmented image:
Instead of this line
folder = os.path.dirname(f)
you can specify the target folder, for example
folder = r'C:\Users\Dude\Desktop'
(Note the r'...' formatting, which produces a raw string literal.)
Next, replace this
segments_folder = os.path.join(folder,"segments")
by this
segments_folder = os.path.join(folder,filenm[:-4]+"_segments")
and to be extra-clean replace this
imsave(os.path.join(segments_folder, filenm + '_' + str(i) + '.png'), seg_rgba)
by this
imsave(os.path.join(segments_folder, filenm[:-4] + '_' + str(i) + '.png'), seg_rgba)
Related
My code below is intended to get a batch of images and convert them to RGB. But I keep getting an error which says to convert to type uint8. I have seen other questions regarding the conversion to uint8, but none directly from an array to uint8. Any advice on how to make that happen is welcome, thank you!
from skimage import io
import numpy as np
import glob, os
from tkinter import Tk
from tkinter.filedialog import askdirectory
import cv2
# wavelength in microns
MWIR = 4.5
R = .692
G = .582
B = .140
rgb_sum = R + G + B;
NRed = R/rgb_sum;
NGreen = G/rgb_sum;
NBlue = B/rgb_sum;
path = askdirectory(title='Select PNG Folder') # shows dialog box and return the path
outpath = askdirectory(title='Select SAVE Folder')
for file in os.listdir(path):
if file.endswith(".png"):
imIn = io.imread(os.path.join(path, file))
imOut = np.zeros(imIn.shape)
for i in range(imIn.shape[0]): # Assuming Rayleigh-Jeans law
for j in range(imIn.shape[1]):
imOut[i,j,0] = imIn[i,j,0]/((NRed/MWIR)**4)
imOut[i,j,1] = imIn[i,j,0]/((NGreen/MWIR)**4)
imOut[i,j,2] = imIn[i,j,0]/((NBlue/MWIR)**4)
io.imsave(os.path.join(outpath, file) + '_RGB.png', imOut)
the code I am trying to integrate into my own (found in another thread, used to convert type to uint8) is:
info = np.iinfo(data.dtype) # Get the information of the incoming image type
data = data.astype(np.float64) / info.max # normalize the data to 0 - 1
data = 255 * data # Now scale by 255
img = data.astype(np.uint8)
cv2.imshow("Window", img)
thank you!
Normally imInt is of type uint8, after your normalisation it is of type float32 because of the casting cause by the division. you must convert back to uint8 before saving to PNG file:
io.imsave(os.path.join(outpath, file) + '_RGB.png', imOut.astype(np.uint8))
Note that the two loops are not necessary, you can use numpy vector operations instead:
MWIR = 4.5
R = .692
G = .582
B = .140
vector = [R, G, B]
vector = vector / vector.sum()
vector = vector / MWIR
vector = np.pow(vector, 4)
for file in os.listdir(path):
if file.endswith((".png"):
imgIn = ...
imgOut = imgIn * vector
io.imsave(
os.path.join(outpath, file) + '_RGB.png',
imgOut.astype(np.uint8))
I would like to get some help with my code on Python. I am a novice to Python.
At high level - I read a (.png) file from command line , put into original array , compute svd , find high rank of svd based on command line , multiple with original array and then finally put the file and the array out.
My issue : The generated file is distorted and does not look like the real picture i intended to generate.
My question : I have put the snippet of code I am using , can you please point to what I am doing incorrectly ?
import sys
import os
import numpy
import numpy.linalg
import scipy.misc
def getOutputPngName(path, rank):
filename, ext = os.path.splitext(path)
return filename + '.' + str(rank) + '.png'
def getOutputNpyName(path, rank):
filename, ext = os.path.splitext(path)
return filename + '.' + str(rank) + '.npy'
if len(sys.argv) < 3:
sys.exit('usage: task1.py <PNG inputFile> <rank>')
inputfile = sys.argv[1]
rank = int(sys.argv[2])
outputpng = getOutputPngName(inputfile, rank)
outputnpy = getOutputNpyName(inputfile, rank)
# Import pic.png into array im as command parameter
img = scipy.misc.imread(inputfile)
# Perform SVD on im and obtain individual matrices
P, D, Q = numpy.linalg.svd(img, full_matrices=False)
# Compute overall SVD matrix based on individual matrices
svd_decomp = numpy.dot(numpy.dot(P, numpy.diag(D)), Q)
# Keep Top entries in svd_decomp
initial = svd_decomp.argsort()
temp = numpy.array(initial)
svd_final = numpy.argpartition(temp,-rank)[-rank:]
# Multiply to obtain the best rank-k approximation of the original array
img = numpy.transpose(img)
final = (numpy.dot(svd_final,img))
#Saving the approximated array as a binary array file(1) and as a PNG file(2)
numpy.save(outputnpy, final)
scipy.misc.imsave(outputpng, final)
The biggest issue is the svd_decomp.argsort(). argsort() without any arguments flattens out the whole matrix and sorts it like that, it's not what you want to do.
In fact, you don't need to do any sorting, because linalg's svd() function does it for you. See the documentation.
The singular values for every matrix, sorted in descending order.
So you just have to do the following
import sys
import os
import numpy
import numpy.linalg
import scipy.misc
def getOutputPngName(path, rank):
filename, ext = os.path.splitext(path)
return filename + '.' + str(rank) + '.png'
def getOutputNpyName(path, rank):
filename, ext = os.path.splitext(path)
return filename + '.' + str(rank) + '.npy'
if len(sys.argv) < 3:
sys.exit('usage: task1.py <PNG inputFile> <rank>')
inputfile = sys.argv[1]
rank = int(sys.argv[2])
outputpng = getOutputPngName(inputfile, rank)
outputnpy = getOutputNpyName(inputfile, rank)
# Import pic.png into array im as command parameter
img = scipy.misc.imread(inputfile)
# Perform SVD on im and obtain individual matrices
P, D, Q = numpy.linalg.svd(img, full_matrices=True)
# Select top "rank" singular values
svd_decomp = numpy.matrix(P[:, :rank]) * numpy.diag(D[:rank]) * numpy.matrix(Q[:rank, :])
# Save the output
numpy.save(outputnpy, svd_decomp)
scipy.misc.imsave(outputpng, svd_decomp)
Notice that all we do is select "rank" singular values, no need to sort.
Example outputs:
Base Image:
Rank = 1
Rank = 10
No Need to sort. Just compute your matrix over rank
svd_decomp = np.zeros((len(P), len(Q)))
for i in range(rank):
svd_decomp += D[i] * np.outer(P.T[i], Q[i])
I want to use SimpleITK or wedpy to convert the 3d images into 2d images.
Or i want to get a three-dimensional matrix, and then i divide the three-dimensional matrix into some two-dimensional matrices.
import SimpleITK as ITK
import numpy as np
#from medpy.io import load
url=r'G:\path\to\my.mha'
image = ITK.ReadImage(url)
frame_num, width, height = image_array.shape
print(frame_num,width,height)
Then only get it:155 240 240
but i want to get [[1,5,2,3,1...],[54,1,3,5...],[5,8,9,6....]]
Just to add to Dave Chen's answer, as it is unclear if you want to get a set of 2D SimpleITK images or numpy arrays. The following code covers all three available options:
import SimpleITK as sitk
import numpy as np
url = "my_file.mha"
image = sitk.ReadImage(url)
max_index = image.GetDepth() # or image.GetWidth() or image.GetHeight() depending on the axis along which you want to extract
# As list of 2D SimpleITK images
list_of_2D_images = [image[:,:,i] for i in range(max_index)]
# As list of 2D numpy arrays which cannot be modified (no data copied)
list_of_2D_images_np_view = [sitk.GetArrayViewFromImage(image[:,:,i]) for i in range(max_index)]
# As list of 2D numpy arrays (data copied to numpy array)
list_of_2D_images_np = [sitk.GetArrayFromImage(image[:,:,i]) for i in range(max_index)]
Also, if you really want to work with URLs and not local files I would suggest looking at the remote download approach used in the SimpleITK notebooks repository, the relevant file is downloaddata.py.
That's not a big deal.
CT images have originally all numbers in int16 type so you don't need to handle float numbers.. In this case, we can think that we can easily change from int16 to uint16 only removing negative values in the image (CT images have some negative numbers as pixel values). Note that we really need uint16, or uint8 type so that OpenCV can handle it... as we have a lot of values in the CT image array, the best choice is uint16, so that we don't lose too much precision.
Ok, now you just need to do as follows:
import SimpleITK as sitk
import numpy as np
import cv2
mha = sitk.ReadImage('/mha/directory') #Importing mha file
array = sitk.GetArrayFromImage(mha) #Converting to array int16 (default)
#Translating each slice to the positive side
for m in range(array.shape[0]):
array[m] = array[m] + abs(np.min(array[m]))
array = np.around(array, decimals=0) #remove any float numbers if exists.. probably not
array = np.asarray(array, dtype='uint16') #From int16 to uint16
After these steps the array is just ready to be saved as png images using opencv.imwrite module:
for image in array:
cv2.imwrite('/dir/to/save/'+'name_image.png', image)
Note that by default SimpleITK handles .mha files by the axial view. I really don't know how to change it because I've never needed it before. Anyway, in this case with some searches you can find something.
I'm not sure exactly what you want to get. But it's easy to extract a 2d slice from a 3d image in SimpleITK.
To get a Z slice where Z=100 you can do this:
zslice = image[100]
To get a Y slice for Y=100:
yslice = image[:, 100]
And a X slice for X=100:
xslice = image[:, :, 100]
#zivy#Dave Chen
I've solved my problem.In fact, running this code will give you 150 240*240 PNG pictures.It's i want to get.
# -*- coding:utf-8 -*-
import numpy as np
import subprocess
import random
import progressbar
from glob import glob
from skimage import io
np.random.seed(5) # for reproducibility
progress = progressbar.ProgressBar(widgets=[progressbar.Bar('*', '[', ']'), progressbar.Percentage(), ' '])
class BrainPipeline(object):
'''
A class for processing brain scans for one patient
INPUT: (1) filepath 'path': path to directory of one patient. Contains following mha files:
flair, t1, t1c, t2, ground truth (gt)
(2) bool 'n4itk': True to use n4itk normed t1 scans (defaults to True)
(3) bool 'n4itk_apply': True to apply and save n4itk filter to t1 and t1c scans for given patient. This will only work if the
'''
def __init__(self, path, n4itk = True, n4itk_apply = False):
self.path = path
self.n4itk = n4itk
self.n4itk_apply = n4itk_apply
self.modes = ['flair', 't1', 't1c', 't2', 'gt']
# slices=[[flair x 155], [t1], [t1c], [t2], [gt]], 155 per modality
self.slices_by_mode, n = self.read_scans()
# [ [slice1 x 5], [slice2 x 5], ..., [slice155 x 5]]
self.slices_by_slice = n
self.normed_slices = self.norm_slices()
def read_scans(self):
'''
goes into each modality in patient directory and loads individual scans.
transforms scans of same slice into strip of 5 images
'''
print('Loading scans...')
slices_by_mode = np.zeros((5, 155, 240, 240))
slices_by_slice = np.zeros((155, 5, 240, 240))
flair = glob(self.path + '/*Flair*/*.mha')
t2 = glob(self.path + '/*_T2*/*.mha')
gt = glob(self.path + '/*more*/*.mha')
t1s = glob(self.path + '/**/*T1*.mha')
t1_n4 = glob(self.path + '/*T1*/*_n.mha')
t1 = [scan for scan in t1s if scan not in t1_n4]
scans = [flair[0], t1[0], t1[1], t2[0], gt[0]] # directories to each image (5 total)
if self.n4itk_apply:
print('-> Applyling bias correction...')
for t1_path in t1:
self.n4itk_norm(t1_path) # normalize files
scans = [flair[0], t1_n4[0], t1_n4[1], t2[0], gt[0]]
elif self.n4itk:
scans = [flair[0], t1_n4[0], t1_n4[1], t2[0], gt[0]]
for scan_idx in xrange(5):
# read each image directory, save to self.slices
slices_by_mode[scan_idx] = io.imread(scans[scan_idx], plugin='simpleitk').astype(float)
for mode_ix in xrange(slices_by_mode.shape[0]): # modes 1 thru 5
for slice_ix in xrange(slices_by_mode.shape[1]): # slices 1 thru 155
slices_by_slice[slice_ix][mode_ix] = slices_by_mode[mode_ix][slice_ix] # reshape by slice
return slices_by_mode, slices_by_slice
def norm_slices(self):
'''
normalizes each slice in self.slices_by_slice, excluding gt
subtracts mean and div by std dev for each slice
clips top and bottom one percent of pixel intensities
if n4itk == True, will apply n4itk bias correction to T1 and T1c images
'''
print('Normalizing slices...')
normed_slices = np.zeros((155, 5, 240, 240))
for slice_ix in xrange(155):
normed_slices[slice_ix][-1] = self.slices_by_slice[slice_ix][-1]
for mode_ix in xrange(4):
normed_slices[slice_ix][mode_ix] = self._normalize(self.slices_by_slice[slice_ix][mode_ix])
print('Done.')
return normed_slices
def _normalize(self, slice):
'''
INPUT: (1) a single slice of any given modality (excluding gt)
(2) index of modality assoc with slice (0=flair, 1=t1, 2=t1c, 3=t2)
OUTPUT: normalized slice
'''
b, t = np.percentile(slice, (0.5,99.5))
slice = np.clip(slice, b, t)
if np.std(slice) == 0:
return slice
else:
return (slice - np.mean(slice)) / np.std(slice)
def save_patient(self, reg_norm_n4, patient_num):
'''
INPUT: (1) int 'patient_num': unique identifier for each patient
(2) string 'reg_norm_n4': 'reg' for original images, 'norm' normalized images, 'n4' for n4 normalized images
OUTPUT: saves png in Norm_PNG directory for normed, Training_PNG for reg
'''
print('Saving scans for patient {}...'.format(patient_num))
progress.currval = 0
if reg_norm_n4 == 'norm': #saved normed slices
for slice_ix in progress(xrange(155)): # reshape to strip
strip = self.normed_slices[slice_ix].reshape(1200, 240)
if np.max(strip) != 0: # set values < 1
strip /= np.max(strip)
if np.min(strip) <= -1: # set values > -1
strip /= abs(np.min(strip))
# save as patient_slice.png
io.imsave('Norm_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
elif reg_norm_n4 == 'reg':
for slice_ix in progress(xrange(155)):
strip = self.slices_by_slice[slice_ix].reshape(1200, 240)
if np.max(strip) != 0:
strip /= np.max(strip)
io.imsave('Training_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
else:
for slice_ix in progress(xrange(155)): # reshape to strip
strip = self.normed_slices[slice_ix].reshape(1200, 240)
if np.max(strip) != 0: # set values < 1
strip /= np.max(strip)
if np.min(strip) <= -1: # set values > -1
strip /= abs(np.min(strip))
# save as patient_slice.png
io.imsave('n4_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
def n4itk_norm(self, path, n_dims=3, n_iters='[20,20,10,5]'):
'''
INPUT: (1) filepath 'path': path to mha T1 or T1c file
(2) directory 'parent_dir': parent directory to mha file
OUTPUT: writes n4itk normalized image to parent_dir under orig_filename_n.mha
'''
output_fn = path[:-4] + '_n.mha'
# run n4_bias_correction.py path n_dim n_iters output_fn
subprocess.call('python n4_bias_correction.py ' + path + ' ' + str(n_dims) + ' ' + n_iters + ' ' + output_fn, shell = True)
def save_patient_slices(patients, type):
'''
INPUT (1) list 'patients': paths to any directories of patients to save. for example- glob("Training/HGG/**")
(2) string 'type': options = reg (non-normalized), norm (normalized, but no bias correction), n4 (bias corrected and normalized)
saves strips of patient slices to approriate directory (Training_PNG/, Norm_PNG/ or n4_PNG/) as patient-num_slice-num
'''
for patient_num, path in enumerate(patients):
a = BrainPipeline(path)
a.save_patient(type, patient_num)
def s3_dump(directory, bucket):
'''
dump files from a given directory to an s3 bucket
INPUT (1) string 'directory': directory containing files to save
(2) string 'bucket': name od s3 bucket to dump files
'''
subprocess.call('aws s3 cp' + ' ' + directory + ' ' + 's3://' + bucket + ' ' + '--recursive')
def save_labels(fns):
'''
INPUT list 'fns': filepaths to all labels
'''
progress.currval = 0
for label_idx in progress(range(len(labels))):
slices = io.imread(labels[label_idx], plugin = 'simpleitk')
for slice_idx in range(len(slices)):
io.imsave(r'{}_{}L.png'.format(label_idx, slice_idx), slices[slice_idx])
if __name__ == '__main__':
url = r'G:\work\deeplearning\BRATS2015_Training\HGG\brats_2013_pat0005_1\VSD.Brain.XX.O.MR_T1.54537\VSD.Brain.XX.O.MR_T1.54537.mha'
labels = glob(url)
save_labels(labels)
# patients = glob('Training/HGG/**')
# save_patient_slices(patients, 'reg')
# save_patient_slices(patients, 'norm')
# save_patient_slices(patients, 'n4')
# s3_dump('Graveyard/Training_PNG/', 'orig-training-png')
I have a project that requires me to export a model in a .dae Collada format. I am struggling to figure out how to do this. I have been trying to use pyCollada to do so, but I have limited experience with 3D modeling and the file structure is confusing me. Why does have to specify an array of "normal_floats" to build a polygon. Aren't the vertices themselves enough?
I have the all the vertices of each face of the object and need to export the data into a collada format? Is there an easy way to do this since each face is two dimensional? Is there an algorithm I can simply feed the vertices into to generate the appropriate faces of the object? Any help would be appreciated.
Additionally, I currently have an algorithm that draws the object in openGL. Is there a way I can reuse code for this in generating the export file?
Update this is the tutorial I was attempting to follow to create the object: http://pycollada.github.io/creating.html
#Allows the laminate to get exported as a DAE.
def toDAE(self):
"""
Exports the current lamiante to a DAE file format
"""
import collada
mesh = collada.Collada()
layerdef = self.layerdef
nodes = [] # Each node of the mesh scene. Typically one per layer.
for layer in layerdef.layers:
layer_thickness = layer.thickness
shapes = self.geoms[layer]
zvalue = layerdef.z_values[layer]
height = float(zvalue) #* 100 #*
if (len(shapes) == 0) : #In case there are no shapes.
continue
for s in shapes:
geom = self.createDAEFromShape(s, height, mesh, layer_thickness)
mesh.geometries.append(geom)
effect = collada.material.Effect("effect", [], "phone", diffuse=(1,0,0), specular=(0,1,0))
mat = collada.material.Material("material", "mymaterial" + str(s.id), effect)
matnode = collada.scene.MaterialNode("materialref" + str(s.id), mat, inputs=[])
mesh.effects.append(effect)
mesh.materials.append(mat)
geomnode = collada.scene.GeometryNode(geom, [matnode])
node = collada.scene.Node("node" + str(s.id), children=[geomnode])
nodes.append(node)
myscene = collada.scene.Scene("myscene", nodes)
mesh.scenes.append(myscene)
mesh.scene = myscene
filename = popupcad.exportdir + os.path.sep + str(self.id) + '.dae' #
mesh.write(filename)
def createDAEFromShape(self, s, layer_num, mesh, thickness): #TODO Move this method into the shape class.
import collada
vertices = s.extrudeVertices(thickness, z0=layer_num)
#This scales the verticies properly. So that they are in millimeters.
vert_floats = [float(x)/(popupcad.SI_length_scaling) for x in vertices]
vert_src_name = str(self.id) + '|' + str(s.id) + "-array"
vert_src = collada.source.FloatSource(vert_src_name, numpy.array(vert_floats), ('X', 'Y', 'Z'))
geom = collada.geometry.Geometry(mesh, "geometry-" + str(s.id), str(self.id), [vert_src])
input_list = collada.source.InputList()
input_list.addInput(0, 'VERTEX', "#" + vert_src_name)
indices = numpy.array(range(0,(len(vertices) // 3)));
triset = geom.createTriangleSet(indices, input_list, "materialref" + str(s.id))
triset.generateNormals()
geom.primitives.append(triset)
return geom
Apparently you can actually compute it without normals, but the documentation was not clear.
I'm jumping right into ArcPy to help out a friend who needs to automate some ArcGIS work. The problem at hand can be reduced to the following pseudocode:
accumulationRaster = SomeZeroedRaster
for each Raster pair, R, S, pertaining to rain and snow:
accumulationRaster += Con(S == 0, Con(R >= 1, 1, 0), 0)
In other words, iterate through all data of rain and snow of each day. Perform a logical operation on each cell pair, so that the corresponding cell in the accumulationRaster gets incremented if no snow (S == 0) and some rain (R >= 1).
Having no luck so far with various attempts, I thought I'd ask here, as the problem should have an easy solution. Full working code with the missing gaps pointed out below. Most of it is just trivial file path set up, and the above pseudocode should suffice:
The missing pieces are numbered 1, 2, and 3.
How to initializase a zero raster w/proper dimensions.
How to make a copy of this zero raster. If 1 is simple, just do this instead.
Increment accumulation raster.
import os
import arcpy
from arcpy import env
from arcpy.sa import *
in_workspace = "C:/Data/ProjectFolder"
os.chdir(in_workspace)
arcpy.env.workspace = in_workspace
maaneder = ["01_januar", "02_februar", "03_mars", "04_april",
"05_mai", "06_juni", "07_juli", "08_august",
"09_september", "10_oktober", "11_november", "12_desember"]
################ 1 ###################
baseAccum = #1: ?? (zero Raster w/correct type & dimensions)
######################################
outputPath = "rainDaysInGrowthSeason/"
for year in range(1997,2015):
for month in range(1, 13):
################ 2 ###################
monthAccum = #2: ?? (Copy of baseSum)
######################################
for day in range(1,32):
# e.g 1997/09_september/
fpath = str(year) + "/" + maaneder[month-1] + "/"
# e.g. 1997_09_08.asc
filesuffix = str(year) + "_" + str(month).zfill(2) + "_" + str(day).zfill(2) + ".asc"
# e.g. Snoedybde/1997/09_september/sd_1997_09_08.asc
snoefile = "Snoedybde/" + fpath + "sd_" + filesuffix
# e.g. rrl/1997/09_september/rrl_1997_09_08.asc
rrlfile = "rrl/" + fpath + "rrl_" + filesuffix
hasSnoe = os.path.isfile(snoefile)
hasRrl = os.path.isfile(rrlfile)
if (not hasSnoe or not hasRrl):
continue
snoeRaster = Raster(snoefile)
rrlRaster = Raster(rrlfile)
############## 3 #####################
monthAccum = #3: ??
# something a'la:
# += Con(snoeRaster == 0, Con(rrlRaster >= 1, 1, 0), 0)
######################################
# e.g. rainDaysInGrowthSeason/1997
outputDir = outputPath + str(year)
if (not os.path.isdir(outputDir)):
os.mkdir(outputDir)
monthAccum.save(outputDir + "/r_" + str(year) + "_" + str(month).zfill(2))
You can use arcpy.NumPyArrayToRaster to make a zero base raster.
if you wanted the raster to be 50 rows and 50 columns with a 1 meter resolution. you can do this.
import numpy
myArr = numpy.zeros(50, 50)
myZeroRaster = arcpy.NumPyArrayToRaster(myArr, arcpy.Point(355355, 3199277), 1, 1)
You can also convert a raster to a numpy array and then do your accumulations using one numpy array to another.
Or if you have Spatial analyst, you could use the tool create constant raster to create a zero based raster and then use the math -> Plus tool to add your rasters together