#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d/
", cudaGetErrorString(code), file, line);
}
}
#define MAX_CLASSES 2
#define max_threads 64
//#define MAX_FEATURES 65
__device__ __constant__ int d_MAX_SIZE;
__device__ __constant__ int offset;
__device__ void rules_points_reduction(float points[max_threads * MAX_CLASSES], int nrules[max_threads * MAX_CLASSES]){
float psum[MAX_CLASSES];
int nsum[MAX_CLASSES];
for (int i = 0; i < MAX_CLASSES; i++){
psum[i] = points[threadIdx.x + i * blockDim.x];
nsum[i] = nrules[threadIdx.x + i * blockDim.x];
}
__syncthreads();
if (blockDim.x >= 1024) {
if (threadIdx.x < 512) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 512 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 512 + i * blockDim.x];
}
} __syncthreads();
}
if (blockDim.x >= 512) {
if (threadIdx.x < 256) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 256 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 256 + i * blockDim.x];
}
} __syncthreads();
}
if (blockDim.x >= 256) {
if (threadIdx.x < 128) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 128 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 128 + i * blockDim.x];
}
} __syncthreads();
}
if (blockDim.x >= 128) {
if (threadIdx.x < 64) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 64 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 64 + i * blockDim.x];
}
} __syncthreads();
}
if (threadIdx.x < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn’t reorder stores to it and induce incorrect behavior.
//volatile int* smem = nrules;
//volatile float* smemf = points;
if (blockDim.x >= 64) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 32 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 32 + i * blockDim.x];
}
}
if (blockDim.x >= 32) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 16 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 16 + i * blockDim.x];
}
}
if (blockDim.x >= 16) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 8 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 8 + i * blockDim.x];
}
}
if (blockDim.x >= 8) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 4 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 4 + i * blockDim.x];
}
}
if (blockDim.x >= 4) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 2 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 2 + i * blockDim.x];
}
}
if (blockDim.x >= 2) {
for (int i = 0; i < MAX_CLASSES; i++){
points[threadIdx.x + i * blockDim.x] = psum[i] = psum[i] + points[threadIdx.x + 1 + i * blockDim.x];
nrules[threadIdx.x + i * blockDim.x] = nsum[i] = nsum[i] + nrules[threadIdx.x + 1 + i * blockDim.x];
}
}
}
}
__device__ void d_get_THE_prediction(short k, float* finalpoints, int* gn_rules)
{
int max;
short true_label, n_items;
__shared__ float points[max_threads * MAX_CLASSES];
__shared__ int nrules[max_threads * MAX_CLASSES];
//__shared__ short items[MAX_FEATURES], ele[MAX_FEATURES];
__shared__ int max2;
for (int i = 0; i < MAX_CLASSES; i++)
{
points[threadIdx.x + i * blockDim.x] = 1;
nrules[threadIdx.x + i * blockDim.x] = 1;
}
if (threadIdx.x == 0) {
if (k == 1){
nrules[0] = 1;
nrules[blockDim.x] = 1;
}
//max2 = GetBinCoeff_l_d(n_items, k);
}
__syncthreads();
//max = max2;
//d_induce_rules(items, ele, n_items, k, max, nrules, points);
__syncthreads();
rules_points_reduction(points, nrules);
if (threadIdx.x == 0){
for (int i = 0; i < MAX_CLASSES; i++){
gn_rules[(blockIdx.x + offset) + i * blockDim.x] += nrules[i * blockDim.x];
finalpoints[(blockIdx.x + offset) + i * blockDim.x] += points[i * blockDim.x];
}
printf("block %d k%d %f %f %d %d/
", (blockIdx.x + offset), k, finalpoints[(blockIdx.x + offset)],
finalpoints[(blockIdx.x + offset) + blockDim.x], gn_rules[(blockIdx.x + offset)], gn_rules[(blockIdx.x + offset) + blockDim.x]);
}
}
__global__ void lazy_supervised_classification_kernel(int k, float* finalpoints, int* n_rules){
d_get_THE_prediction( k, finalpoints, n_rules);
}
int main() {
//freopen("output.txt","w", stdout);
int N_TESTS = 10000;
int MAX_SIZE = 3;
float *finalpoints = (float*)calloc(MAX_CLASSES * N_TESTS, sizeof(float));
float *d_finalpoints = 0;
int *d_nruls = 0;
int *nruls = (int*)calloc(MAX_CLASSES * N_TESTS, sizeof(int));
gpuErrchk(cudaMalloc(&d_finalpoints, MAX_CLASSES * N_TESTS * sizeof(float)));
gpuErrchk(cudaMemset(d_finalpoints, 0, MAX_CLASSES * N_TESTS * sizeof(float)));
gpuErrchk(cudaMalloc(&d_nruls, MAX_CLASSES * N_TESTS * sizeof(int)));
gpuErrchk(cudaMemset(d_nruls, 0, MAX_CLASSES * N_TESTS * sizeof(int)));
gpuErrchk(cudaMemcpyToSymbol(d_MAX_SIZE, &MAX_SIZE, sizeof(int), 0, cudaMemcpyHostToDevice));
int step = max_threads, ofset = 0;
for (int k = 1; k < MAX_SIZE; k++){
//N_TESTS-step
for (ofset = 0; ofset < max_threads; ofset += step){
gpuErrchk(cudaMemcpyToSymbol(offset, &ofset, sizeof(int), 0, cudaMemcpyHostToDevice));
lazy_supervised_classification_kernel <<<step, max_threads >>>(k, d_finalpoints, d_nruls);
gpuErrchk(cudaDeviceSynchronize());
}
gpuErrchk(cudaMemcpyToSymbol(offset, &ofset, sizeof(int), 0, cudaMemcpyHostToDevice));//comment these lines
//N_TESTS – step
lazy_supervised_classification_kernel <<<3, max_threads >> >(k, d_finalpoints, d_nruls);//
gpuErrchk(cudaDeviceSynchronize());//
}
gpuErrchk(cudaFree(d_finalpoints));
gpuErrchk(cudaFree(d_nruls));
free(finalpoints);
free(nruls);
gpuErrchk(cudaDeviceReset());
return(0);
}