1 // compute y = B*x (B is stored in SCOO formats [ cols, rows, values, offsets, numPacks, numRows ])
3 // NUM_ROWS_PER_SLICE is computed based on sparsity
4 template <const uint32_t THREADS_PER_BLOCK, const uint32_t NUM_ROWS_PER_SLICE, const uint32_t LANE_SIZE>
7 const uint32_t numRows,
8 const uint32_t numPacks,
10 const uint16_t * rows,
12 const uint32_t * offsets,
16 const int thread_lane = threadIdx.x & (LANE_SIZE-1); // ~ threadIdx.x % LANE_SIZE
17 const int row_lane = threadIdx.x/(LANE_SIZE);
19 __shared__ float sdata[NUM_ROWS_PER_SLICE][LANE_SIZE];
21 const uint32_t packNo=blockIdx.x;
22 const uint32_t limit = ( (packNo==numPacks-1)?((numRows-1)%NUM_ROWS_PER_SLICE)+1:NUM_ROWS_PER_SLICE );
24 const uint32_t begin = offsets[packNo];
25 const uint32_t end = offsets[packNo+1];
26 for(int i=row_lane; i<limit; i+=THREADS_PER_BLOCK/LANE_SIZE)
27 sdata[i][thread_lane] = 0;
31 for(int32_t index=begin+threadIdx.x; index<end; index+=THREADS_PER_BLOCK){
32 const uint32_t col = cols[index];
33 const uint16_t row = rows[index];
34 const float value = values[index];
36 // fetch x[col] from texture cache
37 const float input = fetch_x(col, x) * value;
38 atomic_add(&sdata[row][thread_lane], input);
44 // Sum of row i is available in column i%LANE_SIZE
45 for (uint32_t i=row_lane; i<limit; i+=THREADS_PER_BLOCK/LANE_SIZE) {
47 int des = (thread_lane+i) & (LANE_SIZE-1);
49 // assuming lane size <= 32
50 if (LANE_SIZE>16 && thread_lane<16)
51 p[des]+=p[(des+16)&(LANE_SIZE-1)]; __syncthreads();
52 if (LANE_SIZE>8 && thread_lane<8)
53 p[des]+=p[(des+8)&(LANE_SIZE-1)]; __syncthreads();
54 if (LANE_SIZE>4 && thread_lane<4)
55 p[des]+=p[(des+4)&(LANE_SIZE-1)]; __syncthreads();
56 if (LANE_SIZE>2 && thread_lane<2)
57 p[des]+=p[(des+2)&(LANE_SIZE-1)]; __syncthreads();
58 if (LANE_SIZE>1 && thread_lane<1)
59 p[des]+=p[(des+1)&(LANE_SIZE-1)]; __syncthreads();
63 const uint32_t actualRow = packNo * NUM_ROWS_PER_SLICE;
65 for(int r = threadIdx.x; r < limit; r+=THREADS_PER_BLOCK)
66 y[actualRow+r] = sdata[r][thread_lane];