1 #ifndef __KERNEL_CONTRIB__
2 #define __KERNEL_CONTRIB__
4 __global__ void liste_positions_a_tester(snake_node_gpu * d_snake, uint4 * liste_positions, uint32 * nb_pix_max,
5 int pas, int nb_nodes, int h, int l);
6 __global__ void calcul_contribs_segments_snake(snake_node_gpu * d_snake, int nb_nodes,
7 uint64 * cumul_1, uint64 * cumul_x, uint64 * cumul_x2,
8 int l, uint2 * liste_pix, uint64 * gsombloc, int * d_table_freeman);
10 __global__ void somsom_snake(uint64 * somblocs, int nb_nodes, unsigned int nb_bl_seg, snake_node_gpu * d_snake);
13 __global__ void calcul_contribs_segments_blocs_full(snake_node_gpu * d_snake, int nb_nodes, uint4 * liste_points, uint32 npix_max,
14 uint64 * cumul_1, uint64 * cumul_x, uint64 * cumul_x2, int * d_codes_x16,
15 int l, uint2 * liste_pix, uint64 * gsombloc, int * d_table_freeman,
16 uint4 * d_freemans_x16, bool pairs);
18 __global__ void somsom_full(uint64 * somblocs, int nb_nodes, unsigned int nb_bl_seg, uint64 * somsom, bool pairs);
19 __device__ bool test_inf_gpu(double arg1, double arg2);
21 __global__ void calcul_stats_snake(snake_node_gpu * d_snake, int nnodes, int64 * d_stats_snake, double * vrais_min,
22 uint64 * cumul_1, uint64 * cumul_x, uint64 * cumul_x2, int * TABLE_CODAGE, uint32 l);
24 __global__ void soustrait_aux_stats_2N_segments_noeud(snake_node_gpu * d_snake, int64 * d_stats_snake, int64 * d_stats_ref,
25 uint64 * cumul_1, uint64 * cumul_x, uint64 * cumul_x2,
26 int * TABLE_CODAGE, uint32 l);
28 __global__ void calcul_stats_full(snake_node_gpu * d_snake, int nnodes, bool pairs, int64 * d_stats_snake,
29 int64 * d_stats_ref, int64 * d_stats, uint64 * d_contribs,
30 uint4 * d_liste_points, int * code_segment, uint4 * d_freemans,
31 int * d_table_codes, uint64 * cumul_1, uint64 * cumul_x, uint64 * cumul_x2,
32 uint32 h, uint32 l, double * vrais, double * vrais_min, bool * move);
33 #endif // __KERNEL_CONTRIB__