15 #ifdef KERNEL_MAP_CHANNEL_HEADS 34 __global
const float2 *seed_point_array,
35 __global
const bool *mask_array,
36 __global
const float2 *uv_array,
37 __global uint *mapping_array
42 const uint global_id = get_global_id(0u)+get_global_id(1u)*get_global_size(0u);
43 __private uint idx, prev_idx, n_steps = 0u;
44 __private
float dl = 0.0f, dt =
DT_MAX;
45 __private float2 uv1_vec, uv2_vec, dxy1_vec, dxy2_vec,
46 vec = seed_point_array[global_id], next_vec;
52 while (prev_idx==idx && !mask_array[idx] && n_steps<(
MAX_N_STEPS-1)) {
54 vec, &next_vec, &idx);
56 &vec, &next_vec, &n_steps, &idx);
63 vec, &next_vec, &idx);
65 &vec, &next_vec, &n_steps, &idx);
72 if (!mask_array[idx]) {
75 if ((~mapping_array[idx]) & IS_THINCHANNEL){
81 if (mapping_array[prev_idx] & IS_THINCHANNEL) {
91 #ifdef KERNEL_PRUNE_CHANNEL_HEADS 96 #define CHECK_IS_THINCHANNEL(idx) ((mapping_array[idx] & IS_THINCHANNEL)>0) 97 #define CHECK_IS_MASKED(idx) (mask_array[idx]) 98 #define CHECK_THINCHANNEL(nbr_vec) { \ 99 idx = get_array_idx(nbr_vec); \ 100 flag += (CHECK_IS_THINCHANNEL(idx) | CHECK_IS_MASKED(idx)*16); \ 103 #define CHECK_E(vec) CHECK_THINCHANNEL((float2)( vec[0]+1.0f, vec[1] )) 104 #define CHECK_NE(vec) CHECK_THINCHANNEL((float2)( vec[0]+1.0f, vec[1]+1.0f )) 105 #define CHECK_N(vec) CHECK_THINCHANNEL((float2)( vec[0] , vec[1]+1.0f )) 106 #define CHECK_NW(vec) CHECK_THINCHANNEL((float2)( vec[0]-1.0f, vec[1]+1.0f )) 107 #define CHECK_W(vec) CHECK_THINCHANNEL((float2)( vec[0]-1.0f, vec[1] )) 108 #define CHECK_SW(vec) CHECK_THINCHANNEL((float2)( vec[0]-1.0f, vec[1]-1.0f )) 109 #define CHECK_S(vec) CHECK_THINCHANNEL((float2)( vec[0] , vec[1]-1.0f )) 110 #define CHECK_SE(vec) CHECK_THINCHANNEL((float2)( vec[0]+1.0f, vec[1]-1.0f )) 131 __global
const float2 *seed_point_array,
132 __global
const bool *mask_array,
133 __global
const float2 *uv_array,
134 __global uint *mapping_array
139 const uint global_id = get_global_id(0u)+get_global_id(1u)*get_global_size(0u);
141 __private uint idx, flag = 0;
142 __private float2 vec = seed_point_array[global_id];
160 if (flag==0 || flag>=16) {
161 atomic_and(&mapping_array[idx],IS_THINCHANNEL);
__kernel void prune_channel_heads(__global const float2 *seed_point_array, __global const bool *mask_array, __global const float2 *uv_array, __global uint *mapping_array)
Keep only those provisional channel heads that lie on the 'thin channel' skeletonized network and hav...
__kernel void map_channel_heads(__global const float2 *seed_point_array, __global const bool *mask_array, __global const float2 *uv_array, __global uint *mapping_array)
Map provisional channel heads, even including those not on an IS_THINCHANNEL pixel and thus extraneou...
static void compute_step_vec(const float dt, const __global float2 *uv_array, float2 *dxy1_vec, float2 *dxy2_vec, float2 *uv1_vec, float2 *uv2_vec, const float2 vec, float2 *next_vec, uint *idx)
Compute a 2nd-order Runge-Kutta integration step along a streamline.
static uint get_array_idx(float2 vec)
Compute the array index of the padded grid pixel pointed to by a float2 grid position vector (choice ...
static void channelheads_runge_kutta_step(float *dt, float *dl, float2 *dxy1_vec, float2 *dxy2_vec, float2 *vec, float2 *next_vec, uint *n_steps, uint *idx)
Compute a single step of 2nd-order Runge-Kutta numerical integration of a streamline given precompute...