10 #ifdef KERNEL_COUNT_DOWNCHANNELS 35 __global
const float2 *seed_point_array,
36 __global
const bool *mask_array,
37 __global
const float2 *uv_array,
38 __global uint *mapping_array,
39 __global uint *count_array,
40 __global uint *link_array
45 const uint global_id = get_global_id(0u)+get_global_id(1u)*get_global_size(0u);
46 __private uint idx, prev_idx, n_steps=0u, counter=1u;
47 __private
float dl=0.0f, dt=
DT_MAX;
48 __private float2 uv1_vec, uv2_vec, dxy1_vec, dxy2_vec,
49 vec = seed_point_array[global_id], next_vec;
55 atomic_xchg(&count_array[idx],counter++);
59 while (!mask_array[idx] && n_steps<100*(
MAX_N_STEPS-1)) {
61 vec, &next_vec, &idx);
63 &vec, &next_vec, &idx, mapping_array)) {
73 atomic_xchg(&link_array[prev_idx],idx);
77 if (counter++<count_array[idx]) {
83 if (!mask_array[prev_idx]) atomic_xchg(&link_array[prev_idx],idx);
88 #ifdef KERNEL_FLAG_DOWNCHANNELS 89 __kernel
void flag_downchannels(
106 __global
const float2 *seed_point_array,
107 __global
const bool *mask_array,
108 __global
const float2 *uv_array,
109 __global uint *mapping_array,
110 __global uint *count_array,
111 __global uint *link_array
116 const uint global_id = get_global_id(0u)+get_global_id(1u)*get_global_size(0u);
117 __private uint idx, prev_idx, counter=1u;
118 __private float2 vec = seed_point_array[global_id];
127 while (!mask_array[idx] && prev_idx!=idx) {
129 idx = link_array[idx];
131 if (!mask_array[idx]) {
133 atomic_max(&count_array[idx],counter++);
140 if (!mask_array[prev_idx]) atomic_or(&mapping_array[prev_idx],
IS_CHANNELTAIL);
145 #ifdef KERNEL_LINK_HILLSLOPES 167 __global
const float2 *seed_point_array,
168 __global
const bool *mask_array,
169 __global
const float2 *uv_array,
170 __global uint *mapping_array,
171 __global uint *count_array,
172 __global uint *link_array
177 const uint global_id = get_global_id(0u)+get_global_id(1u)*get_global_size(0u);
178 __private uint idx, prev_idx, n_steps=0u;
179 __private
float dl=0.0f, dt=
DT_MAX;
180 __private float2 uv1_vec, uv2_vec, dxy1_vec, dxy2_vec,
181 vec = seed_point_array[global_id], next_vec;
188 while (prev_idx==idx && !mask_array[idx] && n_steps<100*(
MAX_N_STEPS-1)) {
191 vec, &next_vec, &idx);
193 &vec, &next_vec, &idx, mapping_array)) {
200 if (prev_idx!=idx && !mask_array[idx]) {
201 atomic_xchg(&link_array[prev_idx],idx);
__kernel void count_downchannels(__global const float2 *seed_point_array, __global const bool *mask_array, __global const float2 *uv_array, __global uint *mapping_array, __global uint *count_array, __global uint *link_array)
REVISE? Integrate downstream from all channel heads until either a masked boundary pixel is reached o...
static void compute_step_vec(const float dt, const __global float2 *uv_array, float2 *dxy1_vec, float2 *dxy2_vec, float2 *uv1_vec, float2 *uv2_vec, const float2 vec, float2 *next_vec, uint *idx)
Compute a 2nd-order Runge-Kutta integration step along a streamline.
static uint get_array_idx(float2 vec)
Compute the array index of the padded grid pixel pointed to by a float2 grid position vector (choice ...
__kernel void link_hillslopes(__global const float2 *seed_point_array, __global const bool *mask_array, __global const float2 *uv_array, __global uint *mapping_array, __global uint *count_array, __global uint *link_array)
TBD.
static bool countlink_runge_kutta_step(float *dt, float *dl, float2 *dxy1_vec, float2 *dxy2_vec, float2 *vec, float2 *next_vec, uint *idx, __global uint *mapping_array)
Compute a single step of 2nd-order Runge-Kutta numerical integration of a streamline given precompute...