slm: OpenCL code base  0.1
segment.cl
Go to the documentation of this file.
1 
14 
15 #ifdef KERNEL_SEGMENT_DOWNCHANNELS
16 __kernel void segment_downchannels(
39  __global const float2 *seed_point_array,
40  __global const bool *mask_array,
41  __global const float2 *uv_array,
42  __global const uint *mapping_array,
43  __global const uint *count_array,
44  __global const uint *link_array,
45  __global uint *label_array
46  )
47 {
48  // For every channel head pixel...
49 
50  const uint global_id = get_global_id(0u)+get_global_id(1u)*get_global_size(0u);
51  __private uint idx, prev_idx, segment_label=0u,
52  segmentation_counter=SEGMENTATION_THRESHOLD;
53  __private float2 vec = seed_point_array[global_id];
54 
55  // Remember here
56  prev_idx = get_array_idx(vec);
57  // Label this stream segment, starting with the head pixel
58  segment_label = prev_idx;
59  atomic_xchg(&label_array[prev_idx],segment_label);
60  // Step downstream
61  idx = link_array[prev_idx];
62  // Continue stepping downstream until a dominant confluence
63  // or a masked pixel is reached
64  while (!mask_array[idx] && prev_idx!=idx) {
65  if ((mapping_array[idx]) & IS_MAJORCONFLUENCE) {
66  if ((mapping_array[prev_idx]) & IS_MAJORINFLOW) {
67  if (count_array[idx]>=segmentation_counter) {
68  segment_label = idx;
69  segmentation_counter += SEGMENTATION_THRESHOLD;
70  }
71  } else {
72  break;
73  }
74  }
75  // Label here with the current segment's label
76  atomic_xchg(&label_array[idx],segment_label);
77  // Continue downstream
78  prev_idx = idx;
79  idx = link_array[idx];
80  }
81  return;
82 }
83 #endif
84 
85 #ifdef KERNEL_SEGMENT_HILLSLOPES
86 __kernel void segment_hillslopes(
109  __global const float2 *seed_point_array,
110  __global const bool *mask_array,
111  __global const float2 *uv_array,
112  __global const uint *mapping_array,
113  __global const uint *count_array,
114  __global const uint *link_array,
115  __global uint *label_array
116  )
117 {
118  // For every non-thin-channel pixel
119 
120  const uint global_id = get_global_id(0u)+get_global_id(1u)*get_global_size(0u);
121  __private uint idx, hillslope_idx, n_steps=0u;
122  __private float dl=0.0f, dt=DT_MAX;
123  __private float2 uv1_vec, uv2_vec, dxy1_vec, dxy2_vec,
124  vec = seed_point_array[global_id], next_vec;
125 
126  // Remember here
127  idx = get_array_idx(vec);
128  hillslope_idx = idx;
129  // Integrate downstream until a channel pixel (or masked pixel) is reached
130  while (!mask_array[idx] && ((~mapping_array[idx])&IS_THINCHANNEL)
131  && n_steps<(MAX_N_STEPS-1)) {
132  compute_step_vec(dt, uv_array, &dxy1_vec, &dxy2_vec, &uv1_vec, &uv2_vec,
133  vec, &next_vec, &idx);
134  if (segment_runge_kutta_step(&dt, &dl, &dxy1_vec, &dxy2_vec,
135  &vec, &next_vec, &n_steps, &idx))
136  break;
137  }
138  if (mapping_array[idx]&IS_THINCHANNEL) {
139  // We've reached a (thin) channel, so grab its label and apply it to
140  // the source hillslope pixel
141  atomic_xchg(&label_array[hillslope_idx],label_array[idx]);
142  }
143  return;
144 }
145 #endif
146 
147 #ifdef KERNEL_SUBSEGMENT_CHANNEL_EDGES
148 __kernel void subsegment_channel_edges(
170  __global const float2 *seed_point_array,
171  __global const bool *mask_array,
172  __global const float2 *uv_array,
173  __global uint *mapping_array,
174  __global const uint *channel_label_array,
175  __global const uint *link_array,
176  __global uint *label_array
177  )
178 {
179  // For every channel head and major confluence pixel...
180 
181  const uint global_id = get_global_id(0u)+get_global_id(1u)*get_global_size(0u);
182  __private uint idx, prev_idx, left_idx, segment_label=0u, prev_x,prev_y, x,y, n_turns;
183  __private char dx,dy, rotated_dx,rotated_dy;
184  __private float2 vec = seed_point_array[global_id];
185 
186  // Remember here
187  prev_idx = get_array_idx(vec);
188  segment_label = channel_label_array[prev_idx];
189  // Step downstream off channel head / major confluence pixel
190  idx = link_array[prev_idx];
191  // Even if this pixel is masked, we still need to try to subsegment
192  while (prev_idx!=idx) {
193 
194 #ifdef F_ORDER
195  prev_x = prev_idx%NX_PADDED;
196  prev_y = prev_idx/NX_PADDED;
197  x = idx%NX_PADDED;
198  y = idx/NX_PADDED;
199 #else
200  prev_x = prev_idx/NY_PADDED;
201  prev_y = prev_idx%NY_PADDED;
202  x = idx/NY_PADDED;
203  y = idx%NY_PADDED;
204 #endif
205  dx = (char)(x-prev_x);
206  dy = (char)(y-prev_y);
207 
208  // Rotate 45deg anticlockwise repeatedly until a non-fillable pixel is reached
209  n_turns = 0;
210  while (left_idx!=prev_idx && ++n_turns<=4) {
211  rotated_dx = dx-dy;
212  rotated_dy = dx+dy;
213  rotated_dx = rotated_dx/clamp((char)abs(rotated_dx),(char)1,(char)2);
214  rotated_dy = rotated_dy/clamp((char)abs(rotated_dy),(char)1,(char)2);
215  dx = rotated_dx;
216  dy = rotated_dy;
217 #ifdef F_ORDER
218  left_idx = (prev_x+rotated_dx) + (prev_y+rotated_dy)*NX_PADDED;
219 #else
220  left_idx = (prev_x+rotated_dx)*NY_PADDED + (prev_y+rotated_dy);
221 #endif
222  if (!mask_array[left_idx] && label_array[left_idx]==segment_label) {
223  if ((mapping_array[left_idx]) & IS_THINCHANNEL) {
224  if (n_turns>1) {
225  break;
226  }
227  } else {
228  atomic_or(&mapping_array[left_idx],IS_LEFTFLANK);
229  atomic_or(&label_array[left_idx],LEFT_FLANK_ADDITION);
230  }
231  }
232  }
233 
234  // Step further downstream if necessary
235  prev_idx = idx;
236  idx = link_array[idx];
237  // Stop if we've reached the next major confluence pixel, or the mask
238  if (!mask_array[idx] && ( ((mapping_array[prev_idx]) & IS_MAJORCONFLUENCE)
239  || ((mapping_array[prev_idx]) & IS_MAJORINFLOW)
240  || ((mapping_array[prev_idx]) & IS_MINORINFLOW)
241  ) ) {
242  break;
243  }
244  }
245  return;
246 }
247 #endif
248 
249 #ifdef KERNEL_SUBSEGMENT_FLANKS
250 __kernel void subsegment_flanks(
273  __global const float2 *seed_point_array,
274  __global const bool *mask_array,
275  __global const float2 *uv_array,
276  __global const uint *mapping_array,
277  __global const uint *count_array,
278  __global const uint *link_array,
279  __global uint *label_array
280  )
281 {
282  // For every non-left-flank hillslope pixel...
283 
284  const uint global_id = get_global_id(0u)+get_global_id(1u)*get_global_size(0u);
285  __private uint idx, hillslope_idx, n_steps=0u;
286  __private float dl=0.0f, dt=DT_MAX;
287  __private float2 uv1_vec, uv2_vec, dxy1_vec, dxy2_vec,
288  vec = seed_point_array[global_id], next_vec;
289 
290  // Remember here
291  idx = get_array_idx(vec);
292  hillslope_idx = idx;
293  // Integrate downstream until thin channel or left-flank pixel is reached
294  while (!mask_array[idx] && ((~mapping_array[idx])&IS_LEFTFLANK)
295  && ((~mapping_array[idx])&IS_THINCHANNEL) && n_steps<(MAX_N_STEPS-1)) {
296  compute_step_vec(dt, uv_array, &dxy1_vec, &dxy2_vec, &uv1_vec, &uv2_vec,
297  vec, &next_vec, &idx);
298  if (segment_runge_kutta_step(&dt, &dl, &dxy1_vec, &dxy2_vec,
299  &vec, &next_vec, &n_steps, &idx))
300  break;
301  }
302  if (mapping_array[idx]&IS_LEFTFLANK) {
303  // We've reached a (thin) channel, so grab its label and apply it to
304  // the source hillslope pixel
305  // No need for atomic here since we're writing to the source pixel
306  label_array[hillslope_idx] |= LEFT_FLANK_ADDITION;
307 // atomic_or(&label_array[hillslope_idx],LEFT_FLANK_ADDITION);
308  }
309  return;
310 }
311 #endif
#define DT_MAX
Definition: info.h:48
__kernel void subsegment_channel_edges(__global const float2 *seed_point_array, __global const bool *mask_array, __global const float2 *uv_array, __global uint *mapping_array, __global const uint *channel_label_array, __global const uint *link_array, __global uint *label_array)
TBD.
Definition: segment.cl:169
#define IS_MINORINFLOW
Definition: info.h:104
__kernel void subsegment_flanks(__global const float2 *seed_point_array, __global const bool *mask_array, __global const float2 *uv_array, __global const uint *mapping_array, __global const uint *count_array, __global const uint *link_array, __global uint *label_array)
TBD.
Definition: segment.cl:272
#define NY_PADDED
Definition: info.h:70
#define NX_PADDED
Definition: info.h:69
static void compute_step_vec(const float dt, const __global float2 *uv_array, float2 *dxy1_vec, float2 *dxy2_vec, float2 *uv1_vec, float2 *uv2_vec, const float2 vec, float2 *next_vec, uint *idx)
Compute a 2nd-order Runge-Kutta integration step along a streamline.
Definition: computestep.cl:41
#define LEFT_FLANK_ADDITION
Definition: info.h:112
static uint get_array_idx(float2 vec)
Compute the array index of the padded grid pixel pointed to by a float2 grid position vector (choice ...
Definition: essentials.cl:62
__kernel void segment_hillslopes(__global const float2 *seed_point_array, __global const bool *mask_array, __global const float2 *uv_array, __global const uint *mapping_array, __global const uint *count_array, __global const uint *link_array, __global uint *label_array)
TBD.
Definition: segment.cl:108
#define IS_THINCHANNEL
Definition: info.h:97
static bool segment_runge_kutta_step(float *dt, float *dl, float2 *dxy1_vec, float2 *dxy2_vec, float2 *vec, float2 *next_vec, uint *n_steps, uint *idx)
Compute a single step of 2nd-order Runge-Kutta numerical integration of a streamline given precompute...
#define IS_MAJORINFLOW
Definition: info.h:103
#define IS_LEFTFLANK
Definition: info.h:105
#define MAX_N_STEPS
Definition: info.h:49
#define SEGMENTATION_THRESHOLD
Definition: info.h:113
#define IS_MAJORCONFLUENCE
Definition: info.h:101