problem
stringlengths
66
76k
answer
stringlengths
29
75.8k
Convert the following CUDA code to AMD GPU code: cuda #include <chrono> #include <iostream> //Kernel definition template<typename T> __global__ void copyKernel (T* out, T* in, const unsigned int N) { const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; for (unsigned int i= id; i < N; i = i + blockDim.x * gridDim.x) { const unsigned el_id = i; ((T*) out)[el_id] = ((T*) in)[el_id]; // ((T*) out)[(1<<29) + 100] = ((T*) in)[0]; } } //template<typename T> //__global__ //void initKernel (T* out) //{ // ((T*) out)[threadIdx.x] = threadIdx.x; //} //Kernel definition template<typename T> __global__ void plusKernel (T* out, T* in, const unsigned int N) { const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; for (unsigned int i= id; i < N; i = i + blockDim.x * gridDim.x) { const unsigned el_id = i; ((T*) out)[el_id] += ((T*) in)[el_id]; } } int main (int argc, char * argv[]) { std::cout<<"Arguments:"; for (int i = 0; i <argc; i++) { std::cout<<argv[i]; } std::cout<<std::endl; if( argc != 4 ) { std::cout<<"Error: InvalidArguments" << std::endl; } using namespace std::chrono; unsigned int N = 1<<29; //N is the Number of elements in the Array double lastMeasurementTimeSpan = 100.0f;//we are not expecting measurements greater 100 s bool stopMeasurement = false; std::cout << "np.array("; //output the results so that they can be read easily by python bool usePlusKernel = false; std::cout << "("; for (int M = 1; M <= 4; M++) { std::cout << "("; for(int i = 1; i <= 32; i++) { if(!stopMeasurement) { unsigned int m = 32 * i; // int* carray; void* out; void* in; void* deviceArray; void* hostArray; // malloc(carray); cudaError_t err1 = cudaSuccess; cudaError_t err2 = cudaSuccess; //standard allocation if(strcmp(argv[2],"-standard") == 0) { err1 = cudaMallocHost(&hostArray, N*4); err2 = cudaMalloc(&deviceArray, N*4); } //writeCombined if(strcmp(argv[2],"-writecombined") == 0) { err1 = cudaHostAlloc(&hostArray, N*4, cudaHostAllocWriteCombined); err2 = cudaMalloc(&deviceArray, N*4); } //unifiedMemorz if(strcmp(argv[2],"-unified") == 0) { err1 = cudaMallocManaged(&hostArray, N*4); err2 = cudaMallocManaged(&deviceArray, N*4); } if (err1 != cudaSuccess) { std::cout << "Allocation ERROR: " << cudaGetErrorString(err1) << std::endl; } if (err2 != cudaSuccess) { std::cout << "Allocation ERROR2: " << cudaGetErrorString(err2) << std::endl; } if(strcmp(argv[1],"-h2d") == 0) { in = hostArray; out = deviceArray; } else { in = deviceArray; out = hostArray; } if(strcmp(argv[3],"-plus") == 0) { usePlusKernel = true; } if(strcmp(argv[3],"-copy") == 0) { usePlusKernel = false; } //std::cout << "in:" << in << "out:" << out << "hostArray:" << hostArray << "deviceArray:" << deviceArray; //make a warmup // copyKernel<<<M, m>>> (static_cast<int*> (out), static_cast<int*> (in), N); // cudaDeviceSynchronize(); double currentTimeSum = 0; for(int x = 1; x <= 5; x++)//run 10 times for better measurement accuracy { // if(strcmp(argv[2],"-unified") == 0) // { if(strcmp(argv[1],"-h2d") == 0) { cudaMemset(out, 4, N*4); cudaDeviceSynchronize(); memset(in, 111, N*4); cudaDeviceSynchronize(); } if(strcmp(argv[1],"-d2h") == 0) { cudaMemset(in, 111, N*4); cudaDeviceSynchronize(); memset(out, 4, N*4); cudaDeviceSynchronize(); } // } //Time Measururement Point 1 high_resolution_clock::time_point timeBefore = high_resolution_clock::now(); //run kernel here if (usePlusKernel) { plusKernel<<<M, m>>> (static_cast<int*> (out), static_cast<int*> (in), N); } else { copyKernel<<<M, m>>> (static_cast<int*> (out), static_cast<int*> (in), N); } cudaDeviceSynchronize(); //Time Measurement Point 2 high_resolution_clock::time_point timeAfter = high_resolution_clock::now(); auto lstErr = cudaGetLastError(); if ( cudaSuccess != lstErr ) { std::cout << lstErr << ": " << cudaGetErrorString(lstErr) << std::endl; } //Output Time Measurement Result duration<double> time_span = duration_cast<duration<double>>(timeAfter - timeBefore); currentTimeSum += time_span.count(); cudaDeviceSynchronize(); } if(false) { //perform error checking void* checkAry = out; if(strcmp(argv[1],"-h2d") == 0) { //copy Out to In so we can read it on the host // cudaMemcpy(in, out, N*4, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); checkAry = in;//as we copied to in, we need to check this now } char targetChar = 111; if(strcmp(argv[3],"-plus") == 0) { targetChar = 111 + 4; } for (int pos = 0; pos < N*4; pos++) { if (static_cast<char*> (checkAry)[pos] != targetChar) { std::cout << "Copy Misstake at:" << pos <<"with:"<< static_cast<int>(static_cast<char*> (checkAry)[pos]) << "instead of:" <<static_cast<int>( targetChar) << std::endl; } } } if(strcmp(argv[2],"-unified") == 0) { cudaFree(deviceArray); cudaFree(hostArray); } else { cudaFreeHost(hostArray); cudaFree(deviceArray); } //it seems we cant use automatic measurement stops if(false)// (lastMeasurementTimeSpan- time_span.count() < 0.01 && i=1) { stopMeasurement = true; } else { // lastMeasurementTimeSpan = time_span.count(); std::cout << currentTimeSum; } } else { std::cout << 0.0; } if( i != 32) {std::cout << ",";} //output a , if we aren't the last element of the for loop } std::cout << ")"; if( M != 4) {std::cout << ",";} //output a , if we aren't the last element of the for loop } std::cout << ")"; std::cout << ")" << std::endl; return 0; }
#include <hip/hip_runtime.h> #include <chrono> #include <iostream> //Kernel definition template<typename T> __global__ void copyKernel (T* out, T* in, const unsigned int N) { const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; for (unsigned int i= id; i < N; i = i + blockDim.x * gridDim.x) { const unsigned el_id = i; ((T*) out)[el_id] = ((T*) in)[el_id]; // ((T*) out)[(1<<29) + 100] = ((T*) in)[0]; } } //template<typename T> //__global__ //void initKernel (T* out) //{ // ((T*) out)[threadIdx.x] = threadIdx.x; //} //Kernel definition template<typename T> __global__ void plusKernel (T* out, T* in, const unsigned int N) { const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x; for (unsigned int i= id; i < N; i = i + blockDim.x * gridDim.x) { const unsigned el_id = i; ((T*) out)[el_id] += ((T*) in)[el_id]; } } int main (int argc, char * argv[]) { std::cout<<"Arguments:"; for (int i = 0; i <argc; i++) { std::cout<<argv[i]; } std::cout<<std::endl; if( argc != 4 ) { std::cout<<"Error: InvalidArguments" << std::endl; } using namespace std::chrono; unsigned int N = 1<<29; //N is the Number of elements in the Array double lastMeasurementTimeSpan = 100.0f;//we are not expecting measurements greater 100 s bool stopMeasurement = false; std::cout << "np.array("; //output the results so that they can be read easily by python bool usePlusKernel = false; std::cout << "("; for (int M = 1; M <= 4; M++) { std::cout << "("; for(int i = 1; i <= 32; i++) { if(!stopMeasurement) { unsigned int m = 32 * i; // int* carray; void* out; void* in; void* deviceArray; void* hostArray; // malloc(carray); hipError_t err1 = hipSuccess; hipError_t err2 = hipSuccess; //standard allocation if(strcmp(argv[2],"-standard") == 0) { err1 = hipHostMalloc(&hostArray, N*4, hipHostMallocDefault); err2 = hipMalloc(&deviceArray, N*4); } //writeCombined if(strcmp(argv[2],"-writecombined") == 0) { err1 = hipHostAlloc(&hostArray, N*4, hipHostMallocWriteCombined); err2 = hipMalloc(&deviceArray, N*4); } //unifiedMemorz if(strcmp(argv[2],"-unified") == 0) { err1 = hipMallocManaged(&hostArray, N*4); err2 = hipMallocManaged(&deviceArray, N*4); } if (err1 != hipSuccess) { std::cout << "Allocation ERROR: " << hipGetErrorString(err1) << std::endl; } if (err2 != hipSuccess) { std::cout << "Allocation ERROR2: " << hipGetErrorString(err2) << std::endl; } if(strcmp(argv[1],"-h2d") == 0) { in = hostArray; out = deviceArray; } else { in = deviceArray; out = hostArray; } if(strcmp(argv[3],"-plus") == 0) { usePlusKernel = true; } if(strcmp(argv[3],"-copy") == 0) { usePlusKernel = false; } //std::cout << "in:" << in << "out:" << out << "hostArray:" << hostArray << "deviceArray:" << deviceArray; //make a warmup // copyKernel<<<M, m>>> (static_cast<int*> (out), static_cast<int*> (in), N); // cudaDeviceSynchronize(); double currentTimeSum = 0; for(int x = 1; x <= 5; x++)//run 10 times for better measurement accuracy { // if(strcmp(argv[2],"-unified") == 0) // { if(strcmp(argv[1],"-h2d") == 0) { hipMemset(out, 4, N*4); hipDeviceSynchronize(); memset(in, 111, N*4); hipDeviceSynchronize(); } if(strcmp(argv[1],"-d2h") == 0) { hipMemset(in, 111, N*4); hipDeviceSynchronize(); memset(out, 4, N*4); hipDeviceSynchronize(); } // } //Time Measururement Point 1 high_resolution_clock::time_point timeBefore = high_resolution_clock::now(); //run kernel here if (usePlusKernel) { plusKernel<<<M, m>>> (static_cast<int*> (out), static_cast<int*> (in), N); } else { copyKernel<<<M, m>>> (static_cast<int*> (out), static_cast<int*> (in), N); } hipDeviceSynchronize(); //Time Measurement Point 2 high_resolution_clock::time_point timeAfter = high_resolution_clock::now(); auto lstErr = hipGetLastError(); if ( hipSuccess != lstErr ) { std::cout << lstErr << ": " << hipGetErrorString(lstErr) << std::endl; } //Output Time Measurement Result duration<double> time_span = duration_cast<duration<double>>(timeAfter - timeBefore); currentTimeSum += time_span.count(); hipDeviceSynchronize(); } if(false) { //perform error checking void* checkAry = out; if(strcmp(argv[1],"-h2d") == 0) { //copy Out to In so we can read it on the host // hipMemcpy(in, out, N*4, hipMemcpyDeviceToHost); hipDeviceSynchronize(); checkAry = in;//as we copied to in, we need to check this now } char targetChar = 111; if(strcmp(argv[3],"-plus") == 0) { targetChar = 111 + 4; } for (int pos = 0; pos < N*4; pos++) { if (static_cast<char*> (checkAry)[pos] != targetChar) { std::cout << "Copy Misstake at:" << pos <<"with:"<< static_cast<int>(static_cast<char*> (checkAry)[pos]) << "instead of:" <<static_cast<int>( targetChar) << std::endl; } } } if(strcmp(argv[2],"-unified") == 0) { hipFree(deviceArray); hipFree(hostArray); } else { hipHostFree(hostArray); hipFree(deviceArray); } //it seems we cant use automatic measurement stops if(false)// (lastMeasurementTimeSpan- time_span.count() < 0.01 && i=1) { stopMeasurement = true; } else { // lastMeasurementTimeSpan = time_span.count(); std::cout << currentTimeSum; } } else { std::cout << 0.0; } if( i != 32) {std::cout << ",";} //output a , if we aren't the last element of the for loop } std::cout << ")"; if( M != 4) {std::cout << ",";} //output a , if we aren't the last element of the for loop } std::cout << ")"; std::cout << ")" << std::endl; return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <time.h> #define N 289 __global__ void MatMul(float d_A[N][N], float d_B[N][N], float d_C[N][N]) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < N && j < N) { for (int l = 0; l < N; l++) { //d_C[i][j] = d_C[i][j] + d_A[j][l] * d_B[l][i]; d_C[i][j] = d_C[i][j] + d_A[i][l] * d_B[l][j]; } } } __global__ void setElement(float d_A[N][N], float d_B[N][N], float d_C[N][N]) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < N && j < N){ /* d_A[i][j] = i * (float)3.2 + j * (float)2.21; d_B[i][j] = i * (float)1.3 + j * (float)3.1; */ d_A[i][j] = 1.0; d_B[i][j] = 1.0; d_C[i][j] = (float)0; } } int main() { cudaError_t res = cudaSuccess; int m,n,k; m = n = k = N; int i,j; int ARRAY_SIZE = N * N; int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); float h_A[N][N], h_B[N][N], h_C[N][N]; float (*d_A)[N], (*d_B)[N], (*d_C)[N]; res=cudaMalloc((void**) &d_A, ARRAY_BYTES); if(res!=cudaSuccess ){ printf("\nCuda error!"); return -1; } res=cudaMalloc((void**) &d_B, ARRAY_BYTES); if( res!=cudaSuccess ){ printf("\nCuda error!"); return -1; } res=cudaMalloc((void**) &d_C, ARRAY_BYTES); if( res!=cudaSuccess ){ printf("\nCuda error!"); return -1; } // Kernel invocation with CONVENIENT amount of blocks int xThreadsPerBlock=32; int yThreadsPerBlock=32; int xBlocks = (N+(xThreadsPerBlock-1))/xThreadsPerBlock; int yBlocks = (N+(yThreadsPerBlock-1))/yThreadsPerBlock; dim3 threadsPerBlock(xThreadsPerBlock,yThreadsPerBlock); dim3 numBlocks( xBlocks,yBlocks ); setElement<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C); cudaDeviceSynchronize(); res=cudaMemcpy(h_A, d_A, ARRAY_BYTES, cudaMemcpyDeviceToHost); if( res!=cudaSuccess){ printf("\nCuda error!"); return -1; } res=cudaMemcpy(h_B, d_B, ARRAY_BYTES, cudaMemcpyDeviceToHost); if( res!=cudaSuccess){ printf("\nCuda error!"); return -1; } res=cudaMemcpy(h_C, d_C, ARRAY_BYTES, cudaMemcpyDeviceToHost); if( res!=cudaSuccess){ printf("\nCuda error!"); return -1; } fprintf(stdout, "Here is the matrix A:\n\n"); for(i=0;i<m;i++) { for(j=0;j<k;j++) { fprintf(stdout, "%10.2f",h_A[i][j]); } fprintf(stdout, "\n"); } fprintf(stdout, "Here is the matrix B:\n\n"); for(i=0;i<k;i++) { for(j=0;j<n;j++) { fprintf(stdout, "%10.2f",h_B[i][j]); } fprintf(stdout, "\n"); } MatMul<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C); cudaDeviceSynchronize(); res=cudaMemcpy(h_C, d_C, ARRAY_BYTES, cudaMemcpyDeviceToHost); if( res!=cudaSuccess){ printf("\nCuda error!"); return -1; } fprintf(stdout, "Here is the matrix C:\n\n"); for(i=0;i<m;i++) { for(j=0;j<n;j++) { fprintf(stdout, "%10.2f",h_C[i][j]); } fprintf(stdout, "\n"); } // Clean up memory cudaFreeHost(h_A); cudaFreeHost(h_B); cudaFreeHost(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <time.h> #define N 289 __global__ void MatMul(float d_A[N][N], float d_B[N][N], float d_C[N][N]) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < N && j < N) { for (int l = 0; l < N; l++) { //d_C[i][j] = d_C[i][j] + d_A[j][l] * d_B[l][i]; d_C[i][j] = d_C[i][j] + d_A[i][l] * d_B[l][j]; } } } __global__ void setElement(float d_A[N][N], float d_B[N][N], float d_C[N][N]) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < N && j < N){ /* d_A[i][j] = i * (float)3.2 + j * (float)2.21; d_B[i][j] = i * (float)1.3 + j * (float)3.1; */ d_A[i][j] = 1.0; d_B[i][j] = 1.0; d_C[i][j] = (float)0; } } int main() { hipError_t res = hipSuccess; int m,n,k; m = n = k = N; int i,j; int ARRAY_SIZE = N * N; int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); float h_A[N][N], h_B[N][N], h_C[N][N]; float (*d_A)[N], (*d_B)[N], (*d_C)[N]; res=hipMalloc((void**) &d_A, ARRAY_BYTES); if(res!=hipSuccess ){ printf("\nCuda error!"); return -1; } res=hipMalloc((void**) &d_B, ARRAY_BYTES); if( res!=hipSuccess ){ printf("\nCuda error!"); return -1; } res=hipMalloc((void**) &d_C, ARRAY_BYTES); if( res!=hipSuccess ){ printf("\nCuda error!"); return -1; } // Kernel invocation with CONVENIENT amount of blocks int xThreadsPerBlock=32; int yThreadsPerBlock=32; int xBlocks = (N+(xThreadsPerBlock-1))/xThreadsPerBlock; int yBlocks = (N+(yThreadsPerBlock-1))/yThreadsPerBlock; dim3 threadsPerBlock(xThreadsPerBlock,yThreadsPerBlock); dim3 numBlocks( xBlocks,yBlocks ); setElement<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C); hipDeviceSynchronize(); res=hipMemcpy(h_A, d_A, ARRAY_BYTES, hipMemcpyDeviceToHost); if( res!=hipSuccess){ printf("\nCuda error!"); return -1; } res=hipMemcpy(h_B, d_B, ARRAY_BYTES, hipMemcpyDeviceToHost); if( res!=hipSuccess){ printf("\nCuda error!"); return -1; } res=hipMemcpy(h_C, d_C, ARRAY_BYTES, hipMemcpyDeviceToHost); if( res!=hipSuccess){ printf("\nCuda error!"); return -1; } fprintf(stdout, "Here is the matrix A:\n\n"); for(i=0;i<m;i++) { for(j=0;j<k;j++) { fprintf(stdout, "%10.2f",h_A[i][j]); } fprintf(stdout, "\n"); } fprintf(stdout, "Here is the matrix B:\n\n"); for(i=0;i<k;i++) { for(j=0;j<n;j++) { fprintf(stdout, "%10.2f",h_B[i][j]); } fprintf(stdout, "\n"); } MatMul<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C); hipDeviceSynchronize(); res=hipMemcpy(h_C, d_C, ARRAY_BYTES, hipMemcpyDeviceToHost); if( res!=hipSuccess){ printf("\nCuda error!"); return -1; } fprintf(stdout, "Here is the matrix C:\n\n"); for(i=0;i<m;i++) { for(j=0;j<n;j++) { fprintf(stdout, "%10.2f",h_C[i][j]); } fprintf(stdout, "\n"); } // Clean up memory hipHostFree(h_A); hipHostFree(h_B); hipHostFree(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); }
Convert the following CUDA code to AMD GPU code: cuda #include <cuda_runtime_api.h> __global__ void scaleKernel(float *src, float *dst, float scale) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; dst[idx] = src[idx] * scale; } int main() { float *a_dev; float *b_dev; float *a = new float[128*100]; float *b = new float[128*100]; cudaSetDevice(0); cudaMalloc(&a_dev, sizeof(float)*128*100); cudaMalloc(&b_dev, sizeof(float)*128*100); cudaMemcpy (a_dev, a, sizeof(float)*1000, cudaMemcpyHostToDevice); scaleKernel<<<100, 128>>>(a_dev, b_dev, 4.0f); scaleKernel<<<50, 128>>>(a_dev, b_dev, 4.0f); scaleKernel<<<50, 128>>>(a_dev+50, b_dev+50, 4.0f); scaleKernel<<<100, 128>>>(a_dev, b_dev, 4.0f); cudaDeviceSynchronize(); cudaMemcpy (b, b_dev, sizeof(int)*1000, cudaMemcpyDeviceToHost); }
#include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> __global__ void scaleKernel(float *src, float *dst, float scale) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; dst[idx] = src[idx] * scale; } int main() { float *a_dev; float *b_dev; float *a = new float[128*100]; float *b = new float[128*100]; hipSetDevice(0); hipMalloc(&a_dev, sizeof(float)*128*100); hipMalloc(&b_dev, sizeof(float)*128*100); hipMemcpy (a_dev, a, sizeof(float)*1000, hipMemcpyHostToDevice); scaleKernel<<<100, 128>>>(a_dev, b_dev, 4.0f); scaleKernel<<<50, 128>>>(a_dev, b_dev, 4.0f); scaleKernel<<<50, 128>>>(a_dev+50, b_dev+50, 4.0f); scaleKernel<<<100, 128>>>(a_dev, b_dev, 4.0f); hipDeviceSynchronize(); hipMemcpy (b, b_dev, sizeof(int)*1000, hipMemcpyDeviceToHost); }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> int static_cxx11_func(int); void test_functions() { auto x = static_cxx11_func(int(42)); std::cout << x << std::endl; } int main(int argc, char** argv) { test_functions(); std::cout << "this executable doesn't use cuda code, just call methods defined" << std::endl; std::cout << "in libraries that have cuda code" << std::endl; return 0; }
#include <hip/hip_runtime.h> #include <iostream> int static_cxx11_func(int); void test_functions() { auto x = static_cxx11_func(int(42)); std::cout << x << std::endl; } int main(int argc, char** argv) { test_functions(); std::cout << "this executable doesn't use cuda code, just call methods defined" << std::endl; std::cout << "in libraries that have cuda code" << std::endl; return 0; }
Convert the following CUDA code to AMD GPU code: cuda /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float* var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12) { if (comp <= (var_2 * sinhf(+1.9369E-37f / (-1.2784E-36f + coshf(-1.5536E-42f + +0.0f + (var_3 * +1.1114E-35f)))))) { for (int i=0; i < var_1; ++i) { comp += var_6 + (-1.7184E34f + -1.3381E10f); var_4[i] = +1.5118E35f; comp += var_4[i] / (var_7 - (var_8 / (var_9 * -1.9421E-37f / atanf(-1.0252E-36f - +1.2662E-19f / var_10 - (+1.6486E-36f * +0.0f))))); comp += +1.7407E36f / (-1.1066E-30f + +1.1145E-35f); for (int i=0; i < var_5; ++i) { comp = var_11 * (-1.4938E-13f + var_12); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float* tmp_5 = initPointer( atof(argv[5]) ); int tmp_6 = atoi(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13); cudaDeviceSynchronize(); return 0; }
/* This is a automatically generated test. Do not modify */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float* var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12) { if (comp <= (var_2 * sinhf(+1.9369E-37f / (-1.2784E-36f + coshf(-1.5536E-42f + +0.0f + (var_3 * +1.1114E-35f)))))) { for (int i=0; i < var_1; ++i) { comp += var_6 + (-1.7184E34f + -1.3381E10f); var_4[i] = +1.5118E35f; comp += var_4[i] / (var_7 - (var_8 / (var_9 * -1.9421E-37f / atanf(-1.0252E-36f - +1.2662E-19f / var_10 - (+1.6486E-36f * +0.0f))))); comp += +1.7407E36f / (-1.1066E-30f + +1.1145E-35f); for (int i=0; i < var_5; ++i) { comp = var_11 * (-1.4938E-13f + var_12); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float* tmp_5 = initPointer( atof(argv[5]) ); int tmp_6 = atoi(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13); hipDeviceSynchronize(); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <sys/time.h> #define NUM_BLOCKS 1000 #define NUM_THREADS 1000 #define ALL_THREADS NUM_BLOCKS * NUM_THREADS __device__ void is_prime(int number, int *output) { for (int i = 2; i*i < number; i++) { if (number%i == 0) { *output = 0; return; } } *output = 1; return; } __global__ void primes_kernel(int from, int to, int *range_ends, int ranges_count, int *output) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= ranges_count) { return; } int thread_from = from + (index == 0 ? 0 : range_ends[index-1]); int thread_to = from + range_ends[index]; for (int i = thread_from; i < thread_to; i++) { is_prime(i, &output[i-from]); } } int main(void) { struct timeval t1, t2; gettimeofday(&t1, 0); int from = 10, to = 20000; int interval = to - from; size_t size = sizeof(int) * interval; int *output = (int*) malloc(size); int ranges_count = ALL_THREADS > interval ? interval : ALL_THREADS; int small_interval = interval / ranges_count; int remainder = interval % ranges_count; int offset = 0; int *range_ends = (int*) malloc(sizeof(int) * ranges_count); for (int i = 0; i < ranges_count; i++) { offset += small_interval + (remainder > i ? 1 : 0); range_ends[i] = offset; } int *d_output, *d_range_ends; cudaMalloc((void**) &d_output, size); cudaMalloc((void**) &d_range_ends, sizeof(int)*ranges_count); cudaMemcpy(d_range_ends, range_ends, sizeof(int)*ranges_count, cudaMemcpyHostToDevice); primes_kernel<<<NUM_BLOCKS, NUM_THREADS>>>(from, to, d_range_ends, ranges_count, d_output); cudaMemcpy(output, d_output, size, cudaMemcpyDeviceToHost); cudaFree(d_output); cudaFree(d_range_ends); gettimeofday(&t2, 0); for (int i = 0; i < to-from; i ++) { if (output[i]) { printf("%d\n", from + i); } } double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; printf("Elapsed time: %3.1f ms \n", time); free(output); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> #define NUM_BLOCKS 1000 #define NUM_THREADS 1000 #define ALL_THREADS NUM_BLOCKS * NUM_THREADS __device__ void is_prime(int number, int *output) { for (int i = 2; i*i < number; i++) { if (number%i == 0) { *output = 0; return; } } *output = 1; return; } __global__ void primes_kernel(int from, int to, int *range_ends, int ranges_count, int *output) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index >= ranges_count) { return; } int thread_from = from + (index == 0 ? 0 : range_ends[index-1]); int thread_to = from + range_ends[index]; for (int i = thread_from; i < thread_to; i++) { is_prime(i, &output[i-from]); } } int main(void) { struct timeval t1, t2; gettimeofday(&t1, 0); int from = 10, to = 20000; int interval = to - from; size_t size = sizeof(int) * interval; int *output = (int*) malloc(size); int ranges_count = ALL_THREADS > interval ? interval : ALL_THREADS; int small_interval = interval / ranges_count; int remainder = interval % ranges_count; int offset = 0; int *range_ends = (int*) malloc(sizeof(int) * ranges_count); for (int i = 0; i < ranges_count; i++) { offset += small_interval + (remainder > i ? 1 : 0); range_ends[i] = offset; } int *d_output, *d_range_ends; hipMalloc((void**) &d_output, size); hipMalloc((void**) &d_range_ends, sizeof(int)*ranges_count); hipMemcpy(d_range_ends, range_ends, sizeof(int)*ranges_count, hipMemcpyHostToDevice); primes_kernel<<<NUM_BLOCKS, NUM_THREADS>>>(from, to, d_range_ends, ranges_count, d_output); hipMemcpy(output, d_output, size, hipMemcpyDeviceToHost); hipFree(d_output); hipFree(d_range_ends); gettimeofday(&t2, 0); for (int i = 0; i < to-from; i ++) { if (output[i]) { printf("%d\n", from + i); } } double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; printf("Elapsed time: %3.1f ms \n", time); free(output); return 0; }
Convert the following CUDA code to AMD GPU code: cuda __global__ void wave1Drusanov1(double * f_nm, double * f_in, double nu,int N){ int tid=threadIdx.x+blockIdx.x*blockDim.x; if(tid<N){ int x_p = tid+1; if(x_p==N) x_p=0; double fp = f_in[x_p]; double f = f_in[tid]; f_nm[tid]=0.5*(fp+f)-(nu/3.)*(fp-f); } }
#include <hip/hip_runtime.h> __global__ void wave1Drusanov1(double * f_nm, double * f_in, double nu,int N){ int tid=threadIdx.x+blockIdx.x*blockDim.x; if(tid<N){ int x_p = tid+1; if(x_p==N) x_p=0; double fp = f_in[x_p]; double f = f_in[tid]; f_nm[tid]=0.5*(fp+f)-(nu/3.)*(fp-f); } }
Convert the following CUDA code to AMD GPU code: cuda /* $ nvcc -o device_prop device_prop.cu */ #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> void getCudaDeviceInfo() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Compute capability: %d.%d\n", prop.major, prop.minor); printf(" Multi-processor count: %d\n", prop.multiProcessorCount); printf(" Maximum size of each dimension of a grid: %d\n", *(prop.maxGridSize)); printf(" Maximum size of each dimension of a block: %d\n", *(prop.maxThreadsDim)); printf(" Maximum number of threads per block: %d\n", prop.maxThreadsPerBlock); //printf("Maximum number of resident blocks per multiprocessor: %d\n", prop.maxBlocksPerMultiProcessor); printf(" Maximum resident threads per multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor); } } int main(void) { // GPU info getCudaDeviceInfo(); }
/* $ nvcc -o device_prop device_prop.cu */ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> void getCudaDeviceInfo() { int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Compute capability: %d.%d\n", prop.major, prop.minor); printf(" Multi-processor count: %d\n", prop.multiProcessorCount); printf(" Maximum size of each dimension of a grid: %d\n", *(prop.maxGridSize)); printf(" Maximum size of each dimension of a block: %d\n", *(prop.maxThreadsDim)); printf(" Maximum number of threads per block: %d\n", prop.maxThreadsPerBlock); //printf("Maximum number of resident blocks per multiprocessor: %d\n", prop.maxBlocksPerMultiProcessor); printf(" Maximum resident threads per multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor); } } int main(void) { // GPU info getCudaDeviceInfo(); }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> __global__ void task_allocate(){ int i = threadIdx.x; if(i==0){ // 计算 1+2+3+...+100 int n = 100; int sum = 0; while(n>0){ sum+=n; n--; } printf("1+2+...+100 = %d\n",sum); }else if(i==1){ // 计算 10 的阶乘 int n = 10; int factor = 1; while(n>0){ factor*=n; n--; } printf("10! = %d\n",factor); } } int main(){ task_allocate<<<1,2>>>(); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void task_allocate(){ int i = threadIdx.x; if(i==0){ // 计算 1+2+3+...+100 int n = 100; int sum = 0; while(n>0){ sum+=n; n--; } printf("1+2+...+100 = %d\n",sum); }else if(i==1){ // 计算 10 的阶乘 int n = 10; int factor = 1; while(n>0){ factor*=n; n--; } printf("10! = %d\n",factor); } } int main(){ task_allocate<<<1,2>>>(); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <cstdio> #include <cstdlib> #include <iostream> using std::cout; using std::endl; #define BLOCKSIZE 50 #define RADIUS 10 #define maxn 2000 #define size maxn*sizeof(int) __global__ void add(const int *a, const int *b, int *c) { c[threadIdx.x + blockIdx.x * blockDim.x] = a[threadIdx.x + blockIdx.x * blockDim.x] + b[threadIdx.x + blockIdx.x * blockDim.x]; } __global__ void stential(const int *in, int *out) { __shared__ int tmp[BLOCKSIZE + RADIUS * 2]; auto gindex = threadIdx.x + blockIdx.x * blockDim.x; auto lindex = threadIdx.x + RADIUS; tmp[lindex] = in[gindex]; if (threadIdx.x < RADIUS) { tmp[lindex - RADIUS] = (gindex < RADIUS ) ? 0 : in[gindex - RADIUS]; tmp[lindex + BLOCKSIZE] = (gindex + BLOCKSIZE > maxn) ? 0 : in[gindex + BLOCKSIZE]; } __syncthreads(); int result = 0; for (int i = -RADIUS; i < RADIUS; i++) { result += tmp[i + lindex]; } out[gindex] = result; } int main() { int *in, *out; int *inc, *outc; in = (int *) malloc(size); out = (int *) malloc(size); for (int i = 0; i < maxn; i++) { in[i] = i; out[i] = 233; } cudaMalloc((void **) &inc, size); cudaMalloc((void **) &outc, size); cudaMemcpy(inc, in, size, cudaMemcpyHostToDevice); stential<<<(maxn+BLOCKSIZE-1)/BLOCKSIZE, BLOCKSIZE>>>(inc, outc); cudaError error; cout<<size<<" "<<sizeof(out)<<" "<<sizeof(outc)<<endl; error = cudaMemcpy(out, outc, size, cudaMemcpyDeviceToHost);//why i can't copy the memory? std::cout << "error: " << cudaGetErrorString(error) << std::endl; for (int i = 0; i < maxn; i++) { std::cout << in[i] << " " << out[i] << std::endl; } cudaFree(inc); cudaFree(outc); return 0; }
#include <hip/hip_runtime.h> #include <cstdio> #include <cstdlib> #include <iostream> using std::cout; using std::endl; #define BLOCKSIZE 50 #define RADIUS 10 #define maxn 2000 #define size maxn*sizeof(int) __global__ void add(const int *a, const int *b, int *c) { c[threadIdx.x + blockIdx.x * blockDim.x] = a[threadIdx.x + blockIdx.x * blockDim.x] + b[threadIdx.x + blockIdx.x * blockDim.x]; } __global__ void stential(const int *in, int *out) { __shared__ int tmp[BLOCKSIZE + RADIUS * 2]; auto gindex = threadIdx.x + blockIdx.x * blockDim.x; auto lindex = threadIdx.x + RADIUS; tmp[lindex] = in[gindex]; if (threadIdx.x < RADIUS) { tmp[lindex - RADIUS] = (gindex < RADIUS ) ? 0 : in[gindex - RADIUS]; tmp[lindex + BLOCKSIZE] = (gindex + BLOCKSIZE > maxn) ? 0 : in[gindex + BLOCKSIZE]; } __syncthreads(); int result = 0; for (int i = -RADIUS; i < RADIUS; i++) { result += tmp[i + lindex]; } out[gindex] = result; } int main() { int *in, *out; int *inc, *outc; in = (int *) malloc(size); out = (int *) malloc(size); for (int i = 0; i < maxn; i++) { in[i] = i; out[i] = 233; } hipMalloc((void **) &inc, size); hipMalloc((void **) &outc, size); hipMemcpy(inc, in, size, hipMemcpyHostToDevice); stential<<<(maxn+BLOCKSIZE-1)/BLOCKSIZE, BLOCKSIZE>>>(inc, outc); hipError_t error; cout<<size<<" "<<sizeof(out)<<" "<<sizeof(outc)<<endl; error = hipMemcpy(out, outc, size, hipMemcpyDeviceToHost);//why i can't copy the memory? std::cout << "error: " << hipGetErrorString(error) << std::endl; for (int i = 0; i < maxn; i++) { std::cout << in[i] << " " << out[i] << std::endl; } hipFree(inc); hipFree(outc); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <thrust/device_vector.h> #include <thrust/tabulate.h> #include <stdio.h> #include <iostream> #include <time.h> #include <chrono> struct generator{ __host__ __device__ int operator() (const int& i) const{ return i % 2 == 0 ? 1 : -1; } }; int main(int argc, char** argv){ //If there are no arguments given run a performace test of thrust if (atoi(argv[0]) == 0) { for(int size = 100000000; size < 2100000000; size += 100000000) { double time = 0.0; for(int sample = 0; sample < 5; sample++){ thrust::device_vector<int> test(size); thrust::tabulate(test.begin(), test.end(), generator()); auto started = std::chrono::high_resolution_clock::now(); thrust::inclusive_scan(test.begin(), test.end(), test.begin(), thrust::plus<int>()); int last = test[test.size() - 1]; int min = thrust::reduce(test.begin(), test.end(), 0, thrust::minimum<int>()); bool matched = last == 0 && min >= 0; auto end = std::chrono::high_resolution_clock::now(); time += (std::chrono::duration_cast<std::chrono::milliseconds>(end - started).count()) / 1000.0; } printf("%d,%.6f\n", size, time / 5.0); } } //If there is a size given then run parenmatch on that size if (atoi(argv[0]) > 0) { int size = atoi(argv[1]); thrust::device_vector<int> test(size); thrust::tabulate(test.begin(), test.end(), generator()); auto started = std::chrono::high_resolution_clock::now(); thrust::inclusive_scan(test.begin(), test.end(), test.begin(), thrust::plus<int>()); int last = test[test.size() - 1]; int min = thrust::reduce(test.begin(), test.end(), 0, thrust::minimum<int>()); bool matched = last == 0 && min >= 0; auto end = std::chrono::high_resolution_clock::now(); printf("Thrust time %.4f\n", (std::chrono::duration_cast<std::chrono::milliseconds>(end - started).count()) / 1000.0); } return 0; }
#include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/tabulate.h> #include <stdio.h> #include <iostream> #include <time.h> #include <chrono> struct generator{ __host__ __device__ int operator() (const int& i) const{ return i % 2 == 0 ? 1 : -1; } }; int main(int argc, char** argv){ //If there are no arguments given run a performace test of thrust if (atoi(argv[0]) == 0) { for(int size = 100000000; size < 2100000000; size += 100000000) { double time = 0.0; for(int sample = 0; sample < 5; sample++){ thrust::device_vector<int> test(size); thrust::tabulate(test.begin(), test.end(), generator()); auto started = std::chrono::high_resolution_clock::now(); thrust::inclusive_scan(test.begin(), test.end(), test.begin(), thrust::plus<int>()); int last = test[test.size() - 1]; int min = thrust::reduce(test.begin(), test.end(), 0, thrust::minimum<int>()); bool matched = last == 0 && min >= 0; auto end = std::chrono::high_resolution_clock::now(); time += (std::chrono::duration_cast<std::chrono::milliseconds>(end - started).count()) / 1000.0; } printf("%d,%.6f\n", size, time / 5.0); } } //If there is a size given then run parenmatch on that size if (atoi(argv[0]) > 0) { int size = atoi(argv[1]); thrust::device_vector<int> test(size); thrust::tabulate(test.begin(), test.end(), generator()); auto started = std::chrono::high_resolution_clock::now(); thrust::inclusive_scan(test.begin(), test.end(), test.begin(), thrust::plus<int>()); int last = test[test.size() - 1]; int min = thrust::reduce(test.begin(), test.end(), 0, thrust::minimum<int>()); bool matched = last == 0 && min >= 0; auto end = std::chrono::high_resolution_clock::now(); printf("Thrust time %.4f\n", (std::chrono::duration_cast<std::chrono::milliseconds>(end - started).count()) / 1000.0); } return 0; }
Convert the following CUDA code to AMD GPU code: cuda #define MODE_MANDEL 1 #define MODE_MANDEL_DISTANCE 2 #define MODE_JULIA 3 #define WIDTH gridDim.x*blockDim.x #define HEIGHT gridDim.y*blockDim.y #define X ((blockIdx.x * blockDim.x) + threadIdx.x) #define Y ((blockIdx.y * blockDim.y) + threadIdx.y) __device__ inline float2 mul(const float2 pFF1, const float2 pFF2) { const float hi = pFF1.x; const float lo = pFF1.y; const float yhi = pFF2.x; const float ylo = pFF2.y; float t, tau, u, v, w; t = hi * yhi; /* Highest order float term. */ if (t == 0) { return make_float2(0,0); } tau = fma(hi, yhi, -t); v = hi * ylo; w = lo * yhi; tau += v + w; /* Add in other second-order terms. */ u = t + tau; return make_float2(u, (t - u) + tau); } __device__ inline float2 mulFloat(const float2 pFF1, const float pDouble) { const float hi = pFF1.x; const float lo = pFF1.y; const float yhi = pDouble; float t, tau, u, w; t = hi * yhi; /* Highest order float term. */ if (t == 0) { return make_float2(0,0); } tau = fma(hi, yhi, -t); w = lo * yhi; tau += w; /* Add in other second-order terms. */ u = t + tau; return make_float2(u, (t - u) + tau); } __device__ inline float2 add(const float2 pFF1, const float2 pFF2) { const float hi = pFF1.x; const float lo = pFF1.y; const float yhi = pFF2.x; const float ylo = pFF2.y; float z, q, zz, xh; z = hi + yhi; q = hi - z; zz = q + yhi + (hi - (q + z)) + lo + ylo; /* Keep -0 result. */ if (zz == 0.0) { return make_float2(z,0); } xh = z + zz; return make_float2(xh,z - xh + zz); } __device__ inline float2 addFloat(const float2 pFF1, const float y) { float hi = pFF1.x; float lo = pFF1.y; float z, q, zz, xh; z = hi + y; q = hi - z; zz = q + y + (hi - (q + z)) + lo; /* Keep -0 result. */ if (zz == 0.0) { return make_float2(z,0); } xh = z + zz; return make_float2(xh,z - xh + zz); } __device__ inline float2 sub(const float2 pFF1, const float2 pFF2) { return add(pFF1, make_float2(-pFF2.x, -pFF2.y)); } extern "C" __global__ void compute( int *iters, double *lastValuesR, double *lastValuesI, double *distancesR, double *distancesI, const int mode, const int4 tile, const float2 xStart, const float2 yStart, const float2 juliaCr, const float2 juliaCi, const float2 xInc, const float2 yInc, const int maxIterations, const double sqrEscapeRadius ) { if (X >= tile.z || Y >= tile.w) { // tile.z is width of tile, tile.w is height of tile return; } const float2 x = add(make_float2(xStart.x, xStart.y), mulFloat(make_float2(xInc.x, xInc.y), X)); const float2 y = add(make_float2(yStart.x, yStart.y), mulFloat(make_float2(yInc.x, yInc.y), Y)); const float2 cr = mode == MODE_JULIA ? juliaCr : x; const float2 ci = mode == MODE_JULIA ? juliaCi : y; const float escape = (float) sqrEscapeRadius; float2 zr = x; float2 zi = y; float2 tmp; // distance float2 dr = make_float2(1, 0); float2 di = make_float2(0, 0); float2 new_dr; int count = 0; for (; count < maxIterations; count++) { const float2 zrsqr = mul(zr, zr); const float2 zisqr = mul(zi, zi); if (add(zrsqr, zisqr).x >= escape) { break; } if (mode == MODE_MANDEL_DISTANCE) { // new_dr = 2.0f * (zr * dr - zi * di) + 1.0f; new_dr = addFloat(mulFloat(sub(mul(zr, dr), mul(zi, di)), 2.0f), 1.0f); // di = 2.0f * (zr * di + zi * dr); di = mulFloat(add(mul(zr, di), mul(zi, dr)), 2.0f); dr = new_dr; } tmp = add(sub(zrsqr, zisqr), cr); zi = add(mulFloat(mul(zr, zi), 2.0f), ci); zr = tmp; } const int tIndex = X + Y * tile.z; // tile.z is width of tile iters[tIndex] = count; lastValuesR[tIndex] = (double) zr.x + (double) zr.y; lastValuesI[tIndex] = (double) zi.x + (double) zi.y; if (mode == MODE_MANDEL_DISTANCE) { distancesR[tIndex] = (double) dr.x + (double) dr.y; distancesI[tIndex] = (double) di.x + (double) di.y; } }
#include <hip/hip_runtime.h> #define MODE_MANDEL 1 #define MODE_MANDEL_DISTANCE 2 #define MODE_JULIA 3 #define WIDTH gridDim.x*blockDim.x #define HEIGHT gridDim.y*blockDim.y #define X ((blockIdx.x * blockDim.x) + threadIdx.x) #define Y ((blockIdx.y * blockDim.y) + threadIdx.y) __device__ inline float2 mul(const float2 pFF1, const float2 pFF2) { const float hi = pFF1.x; const float lo = pFF1.y; const float yhi = pFF2.x; const float ylo = pFF2.y; float t, tau, u, v, w; t = hi * yhi; /* Highest order float term. */ if (t == 0) { return make_float2(0,0); } tau = fma(hi, yhi, -t); v = hi * ylo; w = lo * yhi; tau += v + w; /* Add in other second-order terms. */ u = t + tau; return make_float2(u, (t - u) + tau); } __device__ inline float2 mulFloat(const float2 pFF1, const float pDouble) { const float hi = pFF1.x; const float lo = pFF1.y; const float yhi = pDouble; float t, tau, u, w; t = hi * yhi; /* Highest order float term. */ if (t == 0) { return make_float2(0,0); } tau = fma(hi, yhi, -t); w = lo * yhi; tau += w; /* Add in other second-order terms. */ u = t + tau; return make_float2(u, (t - u) + tau); } __device__ inline float2 add(const float2 pFF1, const float2 pFF2) { const float hi = pFF1.x; const float lo = pFF1.y; const float yhi = pFF2.x; const float ylo = pFF2.y; float z, q, zz, xh; z = hi + yhi; q = hi - z; zz = q + yhi + (hi - (q + z)) + lo + ylo; /* Keep -0 result. */ if (zz == 0.0) { return make_float2(z,0); } xh = z + zz; return make_float2(xh,z - xh + zz); } __device__ inline float2 addFloat(const float2 pFF1, const float y) { float hi = pFF1.x; float lo = pFF1.y; float z, q, zz, xh; z = hi + y; q = hi - z; zz = q + y + (hi - (q + z)) + lo; /* Keep -0 result. */ if (zz == 0.0) { return make_float2(z,0); } xh = z + zz; return make_float2(xh,z - xh + zz); } __device__ inline float2 sub(const float2 pFF1, const float2 pFF2) { return add(pFF1, make_float2(-pFF2.x, -pFF2.y)); } extern "C" __global__ void compute( int *iters, double *lastValuesR, double *lastValuesI, double *distancesR, double *distancesI, const int mode, const int4 tile, const float2 xStart, const float2 yStart, const float2 juliaCr, const float2 juliaCi, const float2 xInc, const float2 yInc, const int maxIterations, const double sqrEscapeRadius ) { if (X >= tile.z || Y >= tile.w) { // tile.z is width of tile, tile.w is height of tile return; } const float2 x = add(make_float2(xStart.x, xStart.y), mulFloat(make_float2(xInc.x, xInc.y), X)); const float2 y = add(make_float2(yStart.x, yStart.y), mulFloat(make_float2(yInc.x, yInc.y), Y)); const float2 cr = mode == MODE_JULIA ? juliaCr : x; const float2 ci = mode == MODE_JULIA ? juliaCi : y; const float escape = (float) sqrEscapeRadius; float2 zr = x; float2 zi = y; float2 tmp; // distance float2 dr = make_float2(1, 0); float2 di = make_float2(0, 0); float2 new_dr; int count = 0; for (; count < maxIterations; count++) { const float2 zrsqr = mul(zr, zr); const float2 zisqr = mul(zi, zi); if (add(zrsqr, zisqr).x >= escape) { break; } if (mode == MODE_MANDEL_DISTANCE) { // new_dr = 2.0f * (zr * dr - zi * di) + 1.0f; new_dr = addFloat(mulFloat(sub(mul(zr, dr), mul(zi, di)), 2.0f), 1.0f); // di = 2.0f * (zr * di + zi * dr); di = mulFloat(add(mul(zr, di), mul(zi, dr)), 2.0f); dr = new_dr; } tmp = add(sub(zrsqr, zisqr), cr); zi = add(mulFloat(mul(zr, zi), 2.0f), ci); zr = tmp; } const int tIndex = X + Y * tile.z; // tile.z is width of tile iters[tIndex] = count; lastValuesR[tIndex] = (double) zr.x + (double) zr.y; lastValuesI[tIndex] = (double) zi.x + (double) zi.y; if (mode == MODE_MANDEL_DISTANCE) { distancesR[tIndex] = (double) dr.x + (double) dr.y; distancesI[tIndex] = (double) di.x + (double) di.y; } }
Convert the following CUDA code to AMD GPU code: cuda #include<stdio.h> __global__ void hello_from_gpu(){ int bx = blockIdx.x; int by = blockIdx.y; int bz = blockIdx.z; int gdx = gridDim.x; int gdy = gridDim.y; int gdz = gridDim.z; int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int bdx = blockDim.x; int bdy = blockDim.y; int bdz = blockDim.z; printf("Hello World from block-[%d/%d, %d/%d, %d/%d] and thread-[%d/%d, %d/%d, %d/%d]!\n", bx, gdx, by, gdy, bz, gdz, tx, bdx, ty, bdy, tz, bdz); } int main(void){ dim3 block_size(2, 3, 4); dim3 grid_size(2, 1, 1); hello_from_gpu<<<grid_size, block_size>>>(); cudaDeviceSynchronize(); printf("\nblock_size.x:%d, block_size.y:%d, block_size.z:%d", block_size.x, block_size.y, block_size.z); cudaDeviceReset(); return 0; }
#include <hip/hip_runtime.h> #include<stdio.h> __global__ void hello_from_gpu(){ int bx = blockIdx.x; int by = blockIdx.y; int bz = blockIdx.z; int gdx = gridDim.x; int gdy = gridDim.y; int gdz = gridDim.z; int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int bdx = blockDim.x; int bdy = blockDim.y; int bdz = blockDim.z; printf("Hello World from block-[%d/%d, %d/%d, %d/%d] and thread-[%d/%d, %d/%d, %d/%d]!\n", bx, gdx, by, gdy, bz, gdz, tx, bdx, ty, bdy, tz, bdz); } int main(void){ dim3 block_size(2, 3, 4); dim3 grid_size(2, 1, 1); hello_from_gpu<<<grid_size, block_size>>>(); hipDeviceSynchronize(); printf("\nblock_size.x:%d, block_size.y:%d, block_size.z:%d", block_size.x, block_size.y, block_size.z); hipDeviceReset(); return 0; }
Convert the following CUDA code to AMD GPU code: cuda // allocate pitch memory and cudaArray #include <stdio.h> #include <memory.h> #include <cuda.h> #include <cuda_runtime.h> #define NX 1003 #define NY 1024 __global__ void PLShift(float* odata, int pitch, int width, int height, int shiftx, int shifty, cudaTextureObject_t texRefPL ){ int global_x_ = threadIdx.x + blockIdx.x*blockDim.x; int global_y_ = threadIdx.y + blockIdx.y*blockDim.y; /* if(threadIdx.x == 0 && blockIdx.x == 0){ for(int i=0; i<NX; i++){ for(int j=0;j<NY;j++){ printf("%d ", tex2D<float>(texRefPL, (i) / (float) width, (j) / (float) height)) } printf("\n"); } } */ //odata[global_y_*pitch + global_x_] = tex2D<float>(texRefPL, // (global_x_) / (float) width, // (global_y_) / (float) height); //because using normalizedCoords and cudaAddressModeWrap(automaticaly loop), Line 78/80/81 odata[global_y_*pitch + global_x_] = tex2D<float>(texRefPL, (global_x_ + shiftx)%width, (global_y_ + shifty)%height); } int main(){ size_t sizeByte = NX*NY*sizeof(float); //host data declaration and initialization float* hdata = (float* )malloc(sizeByte); for(int i=0;i<NX*NY; i++){ hdata[i] = i; } float* hdata_res = (float* )malloc(sizeByte); memset(hdata_res, 0, sizeByte); //gloden result float *gold = (float*) malloc(sizeByte); for (int j = 0; j < NY; ++j) { int jshift = (j + 2) % NY; for (int i = 0; i < NX; ++i) { int ishift = (i + 1) % NX; gold[j * NX + i] = hdata[jshift * NX + ishift]; } } //cudaChannelDesc, similar to data type cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); //using pitch linear memory float *ddata_pl; float *ddata_pl_res; size_t sizePL; cudaMallocPitch((void**)&ddata_pl, &sizePL, NX*sizeof(float), NY); cudaMallocPitch((void**)&ddata_pl_res, &sizePL, NX*sizeof(float), NY); printf("Pitch of ddata_pl is %d , while NX =%d\n", sizePL, NX); //memory copy: host -> device cudaMemcpy2D(ddata_pl, sizePL, hdata, NX*sizeof(float),NX*sizeof(float), NY, cudaMemcpyHostToDevice); //no padding in host memory, so spitch = width //allocate texture memory //cudaResourceDesc --> cudaTextureDesc --> cudaTextureObject_t //cudaResourceDesc cudaResourceDesc texRes; memset(&texRes,0,sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypePitch2D; texRes.res.pitch2D.devPtr = ddata_pl; texRes.res.pitch2D.desc = channelDesc; texRes.res.pitch2D.width = NX; texRes.res.pitch2D.height = NY; texRes.res.pitch2D.pitchInBytes = sizePL; //why not sizePL //cudaTextureDesc cudaTextureDesc texDescr; memset(&texDescr,0,sizeof(cudaTextureDesc)); texDescr.normalizedCoords = false; texDescr.filterMode = cudaFilterModePoint; texDescr.addressMode[0] = cudaAddressModeWrap; texDescr.addressMode[1] = cudaAddressModeWrap; texDescr.readMode = cudaReadModeElementType; //create cudaTextureObject cudaTextureObject_t texRefPL; cudaCreateTextureObject(&texRefPL, &texRes, &texDescr, NULL); //start running kernel cudaMemset2D(ddata_pl_res, sizePL, 0, NX*sizeof(float), NY); dim3 mygrid(32, 32, 1); dim3 myblock(32, 32, 1); PLShift<<<mygrid, myblock>>>(ddata_pl_res, sizePL/sizeof(float), NX, NY, 1, 2, texRefPL); cudaMemcpy2D(hdata_res, NX*sizeof(float), ddata_pl_res, sizePL, NX*sizeof(float), NY, cudaMemcpyDeviceToHost); for(int i=0;i<NX*NY;i++){ if(hdata_res[i] != gold[i]){ printf("Error at %d, GPU(%lf) != CPU(%lf)\n",i, hdata_res[i], gold[i]); //return 0; } } }
// allocate pitch memory and cudaArray #include <stdio.h> #include <memory.h> #include <hip/hip_runtime.h> #define NX 1003 #define NY 1024 __global__ void PLShift(float* odata, int pitch, int width, int height, int shiftx, int shifty, hipTextureObject_t texRefPL ){ int global_x_ = threadIdx.x + blockIdx.x*blockDim.x; int global_y_ = threadIdx.y + blockIdx.y*blockDim.y; /* if(threadIdx.x == 0 && blockIdx.x == 0){ for(int i=0; i<NX; i++){ for(int j=0;j<NY;j++){ printf("%d ", tex2D<float>(texRefPL, (i) / (float) width, (j) / (float) height)) } printf("\n"); } } */ //odata[global_y_*pitch + global_x_] = tex2D<float>(texRefPL, // (global_x_) / (float) width, // (global_y_) / (float) height); //because using normalizedCoords and cudaAddressModeWrap(automaticaly loop), Line 78/80/81 odata[global_y_*pitch + global_x_] = tex2D<float>(texRefPL, (global_x_ + shiftx)%width, (global_y_ + shifty)%height); } int main(){ size_t sizeByte = NX*NY*sizeof(float); //host data declaration and initialization float* hdata = (float* )malloc(sizeByte); for(int i=0;i<NX*NY; i++){ hdata[i] = i; } float* hdata_res = (float* )malloc(sizeByte); memset(hdata_res, 0, sizeByte); //gloden result float *gold = (float*) malloc(sizeByte); for (int j = 0; j < NY; ++j) { int jshift = (j + 2) % NY; for (int i = 0; i < NX; ++i) { int ishift = (i + 1) % NX; gold[j * NX + i] = hdata[jshift * NX + ishift]; } } //cudaChannelDesc, similar to data type hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); //using pitch linear memory float *ddata_pl; float *ddata_pl_res; size_t sizePL; hipMallocPitch((void**)&ddata_pl, &sizePL, NX*sizeof(float), NY); hipMallocPitch((void**)&ddata_pl_res, &sizePL, NX*sizeof(float), NY); printf("Pitch of ddata_pl is %d , while NX =%d\n", sizePL, NX); //memory copy: host -> device hipMemcpy2D(ddata_pl, sizePL, hdata, NX*sizeof(float),NX*sizeof(float), NY, hipMemcpyHostToDevice); //no padding in host memory, so spitch = width //allocate texture memory //cudaResourceDesc --> cudaTextureDesc --> cudaTextureObject_t //cudaResourceDesc hipResourceDesc texRes; memset(&texRes,0,sizeof(hipResourceDesc)); texRes.resType = hipResourceTypePitch2D; texRes.res.pitch2D.devPtr = ddata_pl; texRes.res.pitch2D.desc = channelDesc; texRes.res.pitch2D.width = NX; texRes.res.pitch2D.height = NY; texRes.res.pitch2D.pitchInBytes = sizePL; //why not sizePL //cudaTextureDesc hipTextureDesc texDescr; memset(&texDescr,0,sizeof(hipTextureDesc)); texDescr.normalizedCoords = false; texDescr.filterMode = hipFilterModePoint; texDescr.addressMode[0] = hipAddressModeWrap; texDescr.addressMode[1] = hipAddressModeWrap; texDescr.readMode = hipReadModeElementType; //create cudaTextureObject hipTextureObject_t texRefPL; hipCreateTextureObject(&texRefPL, &texRes, &texDescr, NULL); //start running kernel hipMemset2D(ddata_pl_res, sizePL, 0, NX*sizeof(float), NY); dim3 mygrid(32, 32, 1); dim3 myblock(32, 32, 1); PLShift<<<mygrid, myblock>>>(ddata_pl_res, sizePL/sizeof(float), NX, NY, 1, 2, texRefPL); hipMemcpy2D(hdata_res, NX*sizeof(float), ddata_pl_res, sizePL, NX*sizeof(float), NY, hipMemcpyDeviceToHost); for(int i=0;i<NX*NY;i++){ if(hdata_res[i] != gold[i]){ printf("Error at %d, GPU(%lf) != CPU(%lf)\n",i, hdata_res[i], gold[i]); //return 0; } } }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" // includes, project #define PI 3.1415926536f int MaxThreadsPerBlock; int MaxThreadsX; int MaxThreadsY; // Conversion d'un vecteur réel en vecteur complexe // Conversion d'un vecteur complexe en vecteur réel // Multiplie point par point un vecteur complex par un vecteur réel // Applique y = at*x +bt à chaque point d'un vecteur réel // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel // Alpha n'est pas modifié // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de bytes // Alpha n'est pas modifié // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel // Alpha autorise l'affichage au dessus d'un certain seuil // Processus auto-régressif X2 = a*X1 + b*X0 + N0; // Expansion // On applique une interpolation bi-linéaire à la source // Transformation Cartesian To Polar // On applique une interpolation bi-linéaire à la source __global__ void AutoRegK(double* X0, double* X1, double* X2, double* N0, int numElements, double a, double b) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { X2[i] = a*X1[i] + b*X0[i] + N0[i]; } }
#include <hip/hip_runtime.h> #include "includes.h" // includes, project #define PI 3.1415926536f int MaxThreadsPerBlock; int MaxThreadsX; int MaxThreadsY; // Conversion d'un vecteur réel en vecteur complexe // Conversion d'un vecteur complexe en vecteur réel // Multiplie point par point un vecteur complex par un vecteur réel // Applique y = at*x +bt à chaque point d'un vecteur réel // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel // Alpha n'est pas modifié // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de bytes // Alpha n'est pas modifié // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel // Alpha autorise l'affichage au dessus d'un certain seuil // Processus auto-régressif X2 = a*X1 + b*X0 + N0; // Expansion // On applique une interpolation bi-linéaire à la source // Transformation Cartesian To Polar // On applique une interpolation bi-linéaire à la source __global__ void AutoRegK(double* X0, double* X1, double* X2, double* N0, int numElements, double a, double b) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { X2[i] = a*X1[i] + b*X0[i] + N0[i]; } }
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <device_functions.h> #include <cuda_runtime_api.h> #include <iostream> template<typename T> struct ShouldSwap { __host__ __device__ virtual bool operator() (const T left, const T right) const; }; template <typename T> __host__ __device__ __inline__ void swap(T* a, T* r); template<typename T> __global__ void bubbleSort(T* v, const unsigned int n, ShouldSwap<T> shouldSwap); int main(int argc, char** argv) { // vector size const unsigned int size = 10; // host vector int h_v[size] = { 3, 7, 1, 10, 6, 9, 5, 2, 8, 4 }; // device vector int *d_v = 0; cudaMalloc((void**)&d_v, size * sizeof(int)); cudaMemcpy(d_v, h_v, size * sizeof(int), cudaMemcpyHostToDevice); dim3 grdDim(1, 1, 1); dim3 blkDim(size / 2, 1, 1); ShouldSwap<int> shouldSwap; bubbleSort<int> <<<grdDim, blkDim >>>(d_v, size, shouldSwap); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(error)); // system("pause"); // when using VisStudio exit(-1); } cudaMemcpy(h_v, d_v, size * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_v); for (int i = 0; i < size; i++) { std::cout << (i == 0 ? "{" : "") << h_v[i] << (i < size - 1 ? " ," : "}"); } std::cout << std::endl; // system("pause"); // when using VisStudio return 0; } template<typename T> __host__ __device__ bool ShouldSwap<T>::operator() (const T left, const T right) const { return left > right; } template<typename T> __host__ __device__ __inline__ void swap (T* a, T* b) { T tmp = *a; *a = *b; *b = tmp; } template<typename T> __global__ void bubbleSort(T* v, const unsigned int n, ShouldSwap<T> shouldSwap) { const unsigned int tIdx = threadIdx.x; for (unsigned int i = 0; i < n; i++) { unsigned int offset = i % 2; unsigned int indiceGauche = 2 * tIdx + offset; unsigned int indiceDroite = indiceGauche + 1; if (indiceDroite < n) { if (shouldSwap(v[indiceGauche], v[indiceDroite])) { swap<T>(&v[indiceGauche], &v[indiceDroite]); } } __syncthreads(); } }
#include "hip/hip_runtime.h" #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include <iostream> template<typename T> struct ShouldSwap { __host__ __device__ virtual bool operator() (const T left, const T right) const; }; template <typename T> __host__ __device__ __inline__ void swap(T* a, T* r); template<typename T> __global__ void bubbleSort(T* v, const unsigned int n, ShouldSwap<T> shouldSwap); int main(int argc, char** argv) { // vector size const unsigned int size = 10; // host vector int h_v[size] = { 3, 7, 1, 10, 6, 9, 5, 2, 8, 4 }; // device vector int *d_v = 0; hipMalloc((void**)&d_v, size * sizeof(int)); hipMemcpy(d_v, h_v, size * sizeof(int), hipMemcpyHostToDevice); dim3 grdDim(1, 1, 1); dim3 blkDim(size / 2, 1, 1); ShouldSwap<int> shouldSwap; bubbleSort<int> <<<grdDim, blkDim >>>(d_v, size, shouldSwap); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if (error != hipSuccess) { printf("CUDA error: %s\n", hipGetErrorString(error)); // system("pause"); // when using VisStudio exit(-1); } hipMemcpy(h_v, d_v, size * sizeof(int), hipMemcpyDeviceToHost); hipFree(d_v); for (int i = 0; i < size; i++) { std::cout << (i == 0 ? "{" : "") << h_v[i] << (i < size - 1 ? " ," : "}"); } std::cout << std::endl; // system("pause"); // when using VisStudio return 0; } template<typename T> __host__ __device__ bool ShouldSwap<T>::operator() (const T left, const T right) const { return left > right; } template<typename T> __host__ __device__ __inline__ void swap (T* a, T* b) { T tmp = *a; *a = *b; *b = tmp; } template<typename T> __global__ void bubbleSort(T* v, const unsigned int n, ShouldSwap<T> shouldSwap) { const unsigned int tIdx = threadIdx.x; for (unsigned int i = 0; i < n; i++) { unsigned int offset = i % 2; unsigned int indiceGauche = 2 * tIdx + offset; unsigned int indiceDroite = indiceGauche + 1; if (indiceDroite < n) { if (shouldSwap(v[indiceGauche], v[indiceDroite])) { swap<T>(&v[indiceGauche], &v[indiceDroite]); } } __syncthreads(); } }
Convert the following CUDA code to AMD GPU code: cuda #include <cuda.h> #include <cufft.h> #include <cuda_profiler_api.h> #include <stdio.h> template<typename T> __device__ __forceinline__ T ldg(const T* ptr) { #if __CUDA_ARCH__ >= 350 return __ldg(ptr); #else return *ptr; #endif } extern "C" __global__ void SemblanceDiv( int nx , int ny , int nz , float * numerator // num (in) , float * denominator // den (in) , float * output // (out) ) { int kx = blockIdx.x*blockDim.x + threadIdx.x; int ky = blockIdx.y*blockDim.y + threadIdx.y; if (kx < nx && ky < ny) { int k_1 = nx*ky + kx; for (int i = 0; i < nz; i++, k_1 += nx*ny) { output[k_1] = ldg(&numerator[k_1]) / (ldg(&denominator[k_1]) + 0.001f); output[k_1] *= output[k_1]; output[k_1] *= output[k_1]; output[k_1] *= output[k_1]; output[k_1] = 1.0f - output[k_1]; } } } extern "C" __global__ void SemblanceMax( int nx , int ny , int nz , float theta , float * data // (in) , float * max_data // (out) , float * max_theta // (out) ) { int kx = blockIdx.x*blockDim.x + threadIdx.x; int ky = blockIdx.y*blockDim.y + threadIdx.y; if (kx < nx && ky < ny) { int k_1 = nx*ky + kx; for (int i = 0; i < nz; i++, k_1 += nx*ny) { if (ldg(&data[k_1]) > max_data[k_1]) { max_data[k_1] = ldg(&data[k_1]); max_theta[k_1] = theta; } } } } extern "C" __global__ void Semblance( int win , int nz , int ny , int nx , float * data // data (in) , float * num // num (out) , float * den // den (out) ) { int kz = blockIdx.x*blockDim.x + threadIdx.x; int ky = blockIdx.y*blockDim.y + threadIdx.y; if (kz < nz && ky+win < ny && ky >= win) { int factor = 2 * win + 1; factor *= factor; float val; int ind; for (int kx = win; kx+win < nx; kx++) { ind = (kz*ny + ky)*nx + kx; for (int dy = -win; dy <= win; dy++) { for (int dx = -win; dx <= win; dx++) { val = ldg(&data[(kz*ny + ky+dy)*nx + kx+dx]); num[ind] += val; den[ind] += val*val; } } num[ind] *= num[ind]; den[ind] *= factor; } } }
#include <hip/hip_runtime.h> #include <hipfft/hipfft.h> #include <hip/hip_runtime_api.h> #include <stdio.h> template<typename T> __device__ __forceinline__ T ldg(const T* ptr) { #if __CUDA_ARCH__ >= 350 return __ldg(ptr); #else return *ptr; #endif } extern "C" __global__ void SemblanceDiv( int nx , int ny , int nz , float * numerator // num (in) , float * denominator // den (in) , float * output // (out) ) { int kx = blockIdx.x*blockDim.x + threadIdx.x; int ky = blockIdx.y*blockDim.y + threadIdx.y; if (kx < nx && ky < ny) { int k_1 = nx*ky + kx; for (int i = 0; i < nz; i++, k_1 += nx*ny) { output[k_1] = ldg(&numerator[k_1]) / (ldg(&denominator[k_1]) + 0.001f); output[k_1] *= output[k_1]; output[k_1] *= output[k_1]; output[k_1] *= output[k_1]; output[k_1] = 1.0f - output[k_1]; } } } extern "C" __global__ void SemblanceMax( int nx , int ny , int nz , float theta , float * data // (in) , float * max_data // (out) , float * max_theta // (out) ) { int kx = blockIdx.x*blockDim.x + threadIdx.x; int ky = blockIdx.y*blockDim.y + threadIdx.y; if (kx < nx && ky < ny) { int k_1 = nx*ky + kx; for (int i = 0; i < nz; i++, k_1 += nx*ny) { if (ldg(&data[k_1]) > max_data[k_1]) { max_data[k_1] = ldg(&data[k_1]); max_theta[k_1] = theta; } } } } extern "C" __global__ void Semblance( int win , int nz , int ny , int nx , float * data // data (in) , float * num // num (out) , float * den // den (out) ) { int kz = blockIdx.x*blockDim.x + threadIdx.x; int ky = blockIdx.y*blockDim.y + threadIdx.y; if (kz < nz && ky+win < ny && ky >= win) { int factor = 2 * win + 1; factor *= factor; float val; int ind; for (int kx = win; kx+win < nx; kx++) { ind = (kz*ny + ky)*nx + kx; for (int dy = -win; dy <= win; dy++) { for (int dx = -win; dx <= win; dx++) { val = ldg(&data[(kz*ny + ky+dy)*nx + kx+dx]); num[ind] += val; den[ind] += val*val; } } num[ind] *= num[ind]; den[ind] *= factor; } } }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __device__ __forceinline__ int mirror(int index, int len){ int s2 = 2 * len - 2; if(index < 0){ index = s2 * (-index / s2) + index; return index <= 1 - len ? index + s2 : -index; } if(index >= len){ index -= s2 * (index / s2); if(index >= len) index = s2 - index; return index; } if(index < 0 || index >= len) index = mirror(index, len); return index; } __global__ void gussain_filter_x(float* random, float* kernel, int lw, size_t dim_z, size_t dim_y, size_t dim_x, int mode, float cval){ size_t index = blockIdx.x * blockDim.x + threadIdx.x; size_t total = dim_x * dim_y * dim_z; size_t total_xy = dim_x * dim_y; size_t id_x = index % dim_x; size_t id_y = (index / dim_x) % dim_y; size_t id_z = (index / total_xy) % dim_z; size_t id_block = index / total; int id; float new_pixel = 0; int dim = 2; if(dim_z > 1){ dim = 3; } if(index < total * dim){ if(mode == 0){ for(int i = -lw; i < lw + 1; i++){ id = id_x + i; if(id < 0 || id > dim_x - 1) new_pixel += cval * kernel[i+lw]; else new_pixel += kernel[i+lw] * random[id_block * total + id_z * total_xy + id_y * dim_x + id]; } __syncthreads(); random[index] = new_pixel; __syncthreads(); } else{ for(int i = -lw; i < lw + 1; i++){ id = id_x + i; id = mirror(id, dim_x); new_pixel += kernel[i+lw] * random[id_block * total + id_z * total_xy + id_y * dim_x + id]; } __syncthreads(); random[index] = new_pixel; __syncthreads(); } } }
#include <hip/hip_runtime.h> #include "includes.h" __device__ __forceinline__ int mirror(int index, int len){ int s2 = 2 * len - 2; if(index < 0){ index = s2 * (-index / s2) + index; return index <= 1 - len ? index + s2 : -index; } if(index >= len){ index -= s2 * (index / s2); if(index >= len) index = s2 - index; return index; } if(index < 0 || index >= len) index = mirror(index, len); return index; } __global__ void gussain_filter_x(float* random, float* kernel, int lw, size_t dim_z, size_t dim_y, size_t dim_x, int mode, float cval){ size_t index = blockIdx.x * blockDim.x + threadIdx.x; size_t total = dim_x * dim_y * dim_z; size_t total_xy = dim_x * dim_y; size_t id_x = index % dim_x; size_t id_y = (index / dim_x) % dim_y; size_t id_z = (index / total_xy) % dim_z; size_t id_block = index / total; int id; float new_pixel = 0; int dim = 2; if(dim_z > 1){ dim = 3; } if(index < total * dim){ if(mode == 0){ for(int i = -lw; i < lw + 1; i++){ id = id_x + i; if(id < 0 || id > dim_x - 1) new_pixel += cval * kernel[i+lw]; else new_pixel += kernel[i+lw] * random[id_block * total + id_z * total_xy + id_y * dim_x + id]; } __syncthreads(); random[index] = new_pixel; __syncthreads(); } else{ for(int i = -lw; i < lw + 1; i++){ id = id_x + i; id = mirror(id, dim_x); new_pixel += kernel[i+lw] * random[id_block * total + id_z * total_xy + id_y * dim_x + id]; } __syncthreads(); random[index] = new_pixel; __syncthreads(); } } }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> //printf #include <stdlib.h>//srand, rand #include <time.h> //time #include <fstream> #include <string> #include <iostream> #include <ctime> #include <ratio> #include <chrono> //globalne ustawienia const unsigned BLOCKS_PER_GRID = 16; const unsigned THREADS_PER_BLOCK = 256; class Matrix{ unsigned M,N; //rozmiary macierzy double** tab; //wskaznik na dane macierzy Matrix minor(unsigned m,unsigned n){ Matrix result(M-1,N-1); for(unsigned i=0;i<M;i++){ for(unsigned j=0;j<N;j++){ if(i<m && j<n) result.set(i,j,this->get(i,j)); else if(i<m && j>n) result.set(i,j-1,this->get(i,j)); else if(i>m && j<n) result.set(i-1,j,this->get(i,j)); else if(i>m && j>n) result.set(i-1,j-1,this->get(i,j)); } } return result; } public: Matrix(unsigned M, unsigned N){ this->M = M; this->N = N; tab = new double*[M]; for(unsigned i=0;i<M;i++){ tab[i] = new double[N]; for(unsigned j=0;j<N;j++){ tab[i][j] = (i==j); //domyslnie macierz jednostkowa } } } ~Matrix(){ for(unsigned i=0;i<M;i++){ delete[] tab[i]; } delete[] tab; } unsigned getRows() const{ return M;} unsigned getColumns() const{ return N;} double get(unsigned m, unsigned n) const{ if(m<M && n<N) return tab[m][n]; throw "Nie znaleziono elementu pod podana para indeksow."; } void set(unsigned m, unsigned n, double num){ if(m<M && n<N) tab[m][n] = num; else throw "Nie mozna ustawic wartosci elementu pod podana para indeksow."; } Matrix operator+(const Matrix& mat) const{ if(this->M != mat.getRows() || this->N != mat.getColumns()) throw "Nie mozna dodawac macierzy o roznych wymiarach!"; Matrix result = Matrix(M,N); for(unsigned i=0;i<M;i++){ for(unsigned j=0;j<N;j++){ result.set(i,j,this->get(i,j)+mat.get(i,j)); } } return result; } Matrix operator*(const Matrix& mat) const{ if(this->N != mat.getRows()) throw "Pierwsza macierz nie ma tylu kolumn, co druga wierszy - odmowa wykonania mnozenia!"; Matrix result = Matrix(M, mat.getColumns()); for(unsigned i=0;i<M;i++){ for(unsigned j=0;j<mat.getColumns();j++){ //result.set(i,j,this->get(i,j)+mat.get(i,j)); result.set(i,j,0.0); for(unsigned k=0;k<this->N;k++){ result.set(i,j,result.get(i,j)+this->get(i,k)*mat.get(k,j)); } } } return result; } double det(){ if(M != N) throw "Wyznacznik mozna obliczyc tylko dla macierzy kwadratowej!"; if(M == 1) return get(0,0); else{ double result = 0.0; for(unsigned i=0;i<M;i++){ //result += ((i%2==0)?(1.0):(-1.0))*get(i,0)*minor(i,0).det(); Matrix minr = minor(i,0); //printf("%.1f \n", get(i,0)); result += ((i%2==0)?(1.0):(-1.0))*get(i,0)*minr.det(); } return result; } } Matrix inverse(){ double mainDet = det(); if(mainDet == 0.0 || mainDet == -0.0) throw "Wyznacznik macierzy rowny zero!"; Matrix result = Matrix(M,N); if(M==1) result.set(0,0,1.0/mainDet); else{ for(unsigned i=0;i<M;i++){ for(unsigned j=0;j<N;j++){ double detElement = (((i%2)+(j%2))%2==0?1.0:-1.0)*minor(i,j).det(); result.set(j,i,detElement/mainDet); } } } return result; } void setSize(unsigned M, unsigned N) { for(int i =0 ;i<this->M;i++){ delete[] tab[i]; } delete[] tab; this->M = M; this->N = N; this->tab = new double* [this->M]; for (unsigned i = 0; i < this->M; i++) { tab[i] = new double[this->N]; for(unsigned j=0;j< this->N; j++){ tab[i][j] = (i==j); //domyslnie macierz jednostkowa } } } }; void printMatrixCPU(const char* format, const Matrix& matrix){ for(unsigned i=0; i<matrix.getRows(); i++){ for(unsigned j=0; j<matrix.getColumns(); j++){ printf(format, matrix.get(i,j)); } printf("\n"); } } void printMatrixGPU(const char* format, void* matrix){ //debug //printf("%d\n",*(unsigned*)matrix); //printf("%d\n",*((unsigned*)matrix+1)); for(unsigned i=0; i<*(unsigned*)matrix; i++){ //wartosc *(unsigned*)matrix przechowuje liczbe wierszy for(unsigned j=0; j<*((unsigned*)matrix+1); j++){ //wartosc *(unsigned*)matrix+1 przechowuje liczbe kolumn //duzo castowania printf(format, *((double*)((unsigned*)matrix+2)+i*(*((unsigned*)matrix+1))+j)); } printf("\n"); } } void copyMatrixToGPU( void* ptr, const Matrix& matrix){ //Kopia macierzy w pamięci GPU *((unsigned*)ptr) = matrix.getRows(); *((unsigned*)ptr+1) = matrix.getColumns(); for(unsigned i=0;i<matrix.getRows();i++){ for(unsigned j=0;j<matrix.getColumns();j++){ //*((double*)((unsigned*)ptr+2)+(i*matrix.getRows()+j)) = matrix.get(i,j); *((double*)((unsigned*)ptr+2)+(i*matrix.getColumns()+j)) = matrix.get(i,j); } } } __global__ void add(void* matrix1, void* matrix2, void* result){//PRZY ZALOZENIU, ZE PAMIEC jest juz zaalokowana, w a matrix1 i matrix 2 są juz gotowe wartosci, zakladamy tez, ze zgadzaja sie wymiary macierzy i ewentualne inne warunki, NOTE: wymiary macierzy trzeba wpisac poza kernelem unsigned idx = threadIdx.x + blockDim.x * blockIdx.x; unsigned stride = blockDim.x * gridDim.x; for(unsigned i = idx; i<(*(unsigned*)matrix1)*(*((unsigned*)matrix1+1)); i+=stride){ *((double*)((unsigned*)result+2)+i) = *((double*)((unsigned*)matrix1+2)+i) + *((double*)((unsigned*)matrix2+2)+i); } } __global__ void mul(void* matrix1, void* matrix2, void* result){ unsigned idx = threadIdx.x + blockDim.x * blockIdx.x; unsigned stride = blockDim.x * gridDim.x; for(unsigned i = idx; i<(*(unsigned*)matrix1)*(*((unsigned*)matrix2+1)); i+=stride){ //przechodzimy po elementach macierzy wynikowej double product = 0.0; for(unsigned j=0; j<*(unsigned*)matrix2; j++){ product += (*((double*)((unsigned*)matrix1+2)+(i/(*((unsigned*)matrix2+1)))*(*((unsigned*)matrix1+1))+j)) * (*((double*)((unsigned*)matrix2+2)+j*(*((unsigned*)matrix2+1))+i%(*((unsigned*)matrix2+1)))); } *((double*)((unsigned*)result+2)+i) = product; } } __global__ void detHelper(void* matrix, double* tab_helper/*, unsigned N*/){ unsigned idx = threadIdx.x + blockDim.x * blockIdx.x; unsigned stride = blockDim.x * gridDim.x; unsigned N = *(unsigned*)matrix; //zapisujemy idx w silniowym systemie liczbowym: int* idxTab = new int[N]; for(int i=1;i<=N;i++){ idxTab[N-i] = idx%i; idx/=i; } //upewniamy sie, ze udzial w obliczeniach biora tylko te watki, dla ktorych oryginalny index nalezal do przedzialu <0, N!), czyli ze mamy co najwyzej N! watkow wykonujacych jakas prace if(idx==0){ //zapisujemy stride (skok dla pojedynczego watku) w silniowym systemie liczbowym int* strideTab = new int[N]; for(int i=1;i<=N;i++){ strideTab[N-i] = stride%i; stride/=i; } //przywracamy oryginalne wartosci idx = threadIdx.x + blockDim.x * blockIdx.x; stride = blockDim.x * gridDim.x; //warunek konca pracy watku (wyjscie poza zakres na skutek stalego dodawania strideTab do idxTab) while(idxTab[0]<N){ //wskaznik parzystosci permutacji, 0 - parzysta, 1 - nieparzysta int parz = 0; for(int i=0;i<N;i++) parz = (parz + idxTab[i])%2; //konwersja na interesujaca permutacje for(int i=0;i<N;i++){ for(int j=1;j<=N;j++){ bool niepojawilo = true; for(int k=0;k<i;k++){ if(idxTab[k] == j){niepojawilo = false; break;} } if(niepojawilo){ if(idxTab[i]==0){ idxTab[i]=j;break; } else idxTab[i]--; } } } //iloczyn czastkowy (jeden z N! z wzoru na wyznacznik), z odpowiednim znakiem double product = ((parz%2==0) ? 1.0 : (-1.0)); for(int i=0;i<N;i++) product*= *(((double*)((unsigned*)matrix+2))+i*N+(idxTab[i]-1));//element z i-tego wiersza i (idxTab[i]-1) kolumny tab_helper[idx] += product; //konwersja odwrotna (przywrocenie numeru indexu) for(int i=0;i<N;i++){ int ile = 0; for(int j=i+1;j<N;j++){ if(idxTab[j]<idxTab[i]) ile++; } idxTab[i] = ile; } //dodanie idxTab+=strideTab, (patrz: warunek konca petli) int ak=0; for(int i=1;i<=N;i++){ idxTab[N-i]=idxTab[N-i]+strideTab[N-i]+ak; ak=idxTab[N-i]/i; if(i!=N) idxTab[N-i]%=i; } idxTab[0]+=ak; } delete[] strideTab; } delete[] idxTab; } void loadMatrixFromFile(Matrix & matrix) { char filename[50]; printf("Podaj nazwe pliku: "); scanf("%s", filename); std::ifstream myfile; myfile.open(filename); if (!myfile.good()){ printf("Nie udalo sie otworzyc pliku o podanej nazwie!\n"); } if (myfile.is_open()) { std::string line; getline(myfile, line); unsigned M = stoul(line); getline(myfile, line); unsigned N = stoul(line); matrix.setSize(M, N); for (unsigned i = 0; i < M; i++) { for (unsigned j = 0; j < N; j++) { getline(myfile, line); matrix.set(i, j, stod(line)); } } myfile.close(); } } void saveMatrixToFile(Matrix& matrixInt) { char filename[50]; printf("Podaj nazwe pliku: "); scanf("%s", filename); std::ofstream myfile; myfile.open(filename); myfile << matrixInt.getRows()<<"\n"; myfile << matrixInt.getColumns()<<"\n"; for (int i = 0; i < matrixInt.getRows(); i++) { for (int j = 0; j < matrixInt.getColumns(); j++) { myfile << matrixInt.get(i, j)<< "\n"; } } myfile.close(); } using namespace std::chrono; int main(){ const unsigned M = 3; //test values const unsigned N = 3; Matrix matrix1(M,N); Matrix matrix2(M,N); for(int i = 0 ;i<M;i++){ for(int j = 0 ; j<N ; j++){ matrix1.set(i,j,i); } } matrix1.set(0,0,1.0); matrix1.set(0,1,-1.0); matrix1.set(0,2,2.0); matrix1.set(1,0,3.0); matrix1.set(1,1,0.0); matrix1.set(1,2,-4.0); matrix1.set(2,0,2.0); matrix1.set(2,1,3.0); matrix1.set(2,2,5.0); for(int i = 0 ;i<M;i++){ for(int j = 0 ; j<N ; j++){ matrix2.set(i,j,j); } } int x = 11; while (x != 0) { printf("Macierz 1:\n"); printMatrixCPU("%.2f ", matrix1); printf("\n"); printf("Macierz 2:\n"); printMatrixCPU("%.2f ", matrix2); printf("\n"); printf( "*******************MENU********************\n"); printf("1.Wczytaj macierz z pliku\n"); printf("2.Zapisz macierz do pliku\n"); printf("3.Wyznacznik ( det(m1) )\n"); printf("4.Dodaj macierze (m1=m1+m2)\n"); printf("5.Przemnóż macierze (m1=m1*m2)\n"); printf("6.Macierz odwrotna ( m1=m1^(-1) )\n"); printf("0.Wyjdź\n"); printf("Wybieram : \n"); scanf("%d", &x); system("clear"); int subselect= 0; switch (x){ case 1: //ladowanie printf("1 - Macierz 1\n2 - Macierz 2\n"); scanf("%d", &subselect); subselect==1?loadMatrixFromFile(matrix1):loadMatrixFromFile(matrix2); break; case 2: //zapisywanie printf("1 - Macierz 1\n2 - Macierz 2\n"); scanf("%d", &subselect); subselect==1?saveMatrixToFile(matrix1):saveMatrixToFile(matrix2); break; case 3: //wyznacznik if(matrix1.getRows() != matrix1.getColumns()){ printf("Wymiary macierzy musza byc rowne!!!"); } else{ //deklarcja wskaznikow na pamiec urzadzenia void* d_1 = NULL; //poczatek mierzenia czasu wliczajac alokacja pamieci high_resolution_clock::time_point tzk = high_resolution_clock::now(); //alokacja pamieci cudaMallocManaged(&d_1, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); //kopia macierzy w pamieci GPU: copyMatrixToGPU(d_1, matrix1); //dynamiczne przystosowanie liczby watkow (ominiecie bledu BLOCKS_PER_GRID * THREADS_PER_BLOCK >= N!) unsigned tempBLOCKS_PER_GRID = BLOCKS_PER_GRID; unsigned tempTHREADS_PER_BLOCK = THREADS_PER_BLOCK; switch(matrix1.getRows()){ case 1: tempBLOCKS_PER_GRID=1; tempTHREADS_PER_BLOCK=1; break; case 2: tempBLOCKS_PER_GRID=1; tempTHREADS_PER_BLOCK=1; break; case 3: tempBLOCKS_PER_GRID=2; tempTHREADS_PER_BLOCK=2; break; case 4: tempBLOCKS_PER_GRID=4; tempTHREADS_PER_BLOCK=4; break; case 5: tempBLOCKS_PER_GRID=4; tempTHREADS_PER_BLOCK=16; break; case 6: tempBLOCKS_PER_GRID=8; tempTHREADS_PER_BLOCK=64; break; default: tempBLOCKS_PER_GRID = BLOCKS_PER_GRID; tempTHREADS_PER_BLOCK = THREADS_PER_BLOCK; } //tablica i wskaznik pomocnicze double* tab = NULL;//new double[BLOCKS_PER_GRID * THREADS_PER_BLOCK]; cudaMallocManaged(&tab, sizeof(double) * tempBLOCKS_PER_GRID * tempTHREADS_PER_BLOCK); for(unsigned i=0;i<tempBLOCKS_PER_GRID * tempTHREADS_PER_BLOCK; i++) tab[i] = 0.0; double w = 0.0; //poczatek mierzenia czasu wliczajac alokacja pamieci high_resolution_clock::time_point tbk = high_resolution_clock::now(); //jezeli rozmiar macierzy wiekszy od jeden wywolaj kernel, oblicz wyznacznik if(matrix1.getRows()>1){ detHelper<<<tempBLOCKS_PER_GRID, tempTHREADS_PER_BLOCK>>>(d_1, tab); cudaDeviceSynchronize(); for(unsigned i=0;i<tempBLOCKS_PER_GRID*tempTHREADS_PER_BLOCK; i++){ w += tab[i]; } } //w przeciwnym wypadku wyznacznikiem jest jedyny element macierzy else w = *((double*)((unsigned*)d_1+2)); //koniec mierzenia czasu wliczajac alokacja pamieci high_resolution_clock::time_point tk = high_resolution_clock::now(); duration<double> time_alloc = duration_cast<duration<double>>(tk - tzk); duration<double> time_nalloc = duration_cast<duration<double>>(tk - tbk); printf("(Device) Wyznacznik obliczony przy uzyciu kernela:\n"); printf("%f\n\n", w); printf("(Device) wykonano w ciagu: %fs\n", time_nalloc.count()); printf("(Device) ... %fs, jesli uzwglednic zarzadzanie pamiecia.\n", time_alloc.count()); printf("\n"); cudaFree(d_1); cudaFree(tab); //delete[] tab; //poczatek pomiaru czasu na CPU high_resolution_clock::time_point tpc = high_resolution_clock::now(); double d = matrix1.det(); //koniec pomiaru czasu na CPU high_resolution_clock::time_point tpk = high_resolution_clock::now(); duration<double> timeCPU = duration_cast<duration<double>>(tpk - tpc); printf("Wynik obliczen na CPU:\n"); printf("%f\n\n", d); printf("(Host) wykonano w ciagu: %fs\n", timeCPU.count()); printf("\n"); } break; case 4: //dodawanie if(matrix1.getRows() != matrix2.getRows() || matrix1.getColumns()!=matrix2.getColumns()){ printf("Macierze różnych rozmiarów!\n"); } else{ //realizacja na GPU: //deklaracja wskaznikow na pamiec urzadzenia void* d_1 = NULL; void* d_2 = NULL; void* d_r = NULL; //poczatek mierzenia czasu wliczajac alokacja pamieci high_resolution_clock::time_point tzk = high_resolution_clock::now(); //alokacja pamieci cudaMallocManaged(&d_1, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); cudaMallocManaged(&d_2, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); cudaMallocManaged(&d_r, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); //kopie macierzy w pamieci GPU: copyMatrixToGPU(d_1, matrix1); copyMatrixToGPU(d_2, matrix2); //poczatek mierzenia czasu, jedynie obliczenia high_resolution_clock::time_point tbk = high_resolution_clock::now(); //ustawienie rozmiaru macierzy wynikowej *(unsigned*)d_r = matrix1.getRows(); *((unsigned*)d_r+1) = matrix1.getColumns(); add<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(d_1, d_2, d_r); cudaDeviceSynchronize(); //koniec mierzenia czasu high_resolution_clock::time_point tk = high_resolution_clock::now(); //obliczenie delt czasowych: duration<double> time_alloc = duration_cast<duration<double>>(tk - tzk); duration<double> time_nalloc = duration_cast<duration<double>>(tk - tbk); printf("Wynik obliczen na GPU:\n"); printMatrixGPU("%.2f ", d_r); printf("\n"); printf("(Device) wykonano w ciagu: %fs\n", time_nalloc.count()); printf("(Device) ... %fs, jesli uzwglednic zarzadzanie pamiecia.\n", time_alloc.count()); printf("\n"); cudaFree(d_1); cudaFree(d_2); cudaFree(d_r); //poczatek pomiaru czasu na CPU high_resolution_clock::time_point tpc = high_resolution_clock::now(); Matrix result = matrix1 + matrix2;//(matrix1.getRows(),matrix.getColumns()); //koniec pomiaru czasu na CPU high_resolution_clock::time_point tpk = high_resolution_clock::now(); duration<double> timeCPU = duration_cast<duration<double>>(tpk - tpc); printf("Wynik obliczen na CPU:\n"); printMatrixCPU("%.2f ", result); printf("\n"); printf("(Host) wykonano w ciagu: %fs\n", timeCPU.count()); printf("\n"); for(unsigned i=0;i<matrix1.getRows();i++){ for(unsigned j=0;j<matrix1.getColumns();j++){ matrix1.set(i,j,result.get(i,j)); } } } //else executeAdding(matrix1, matrix2); break; case 5: //mnozenie if(matrix1.getColumns() != matrix2.getRows()){ printf("Nie zgadzaja sie rozmiary macierzy!!!\n"); } else{ //realizacja na GPU: //deklaracja wskaznikow na pamiec urzadzenia void* d_1 = NULL; void* d_2 = NULL; void* d_r = NULL; //poczatek mierzenia czasu wliczajac alokacja pamieci high_resolution_clock::time_point tzk = high_resolution_clock::now(); //alokacja pamieci cudaMallocManaged(&d_1, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); cudaMallocManaged(&d_2, 2*sizeof(unsigned)+matrix2.getRows()*matrix2.getColumns()*sizeof(double)); cudaMallocManaged(&d_r, 2*sizeof(unsigned)+matrix1.getRows()*matrix2.getColumns()*sizeof(double)); //kopie macierzy w pamieci GPU: copyMatrixToGPU(d_1, matrix1); copyMatrixToGPU(d_2, matrix2); //poczatek mierzenia czasu, jedynie obliczenia high_resolution_clock::time_point tbk = high_resolution_clock::now(); //ustawienie rozmiaru macierzy wynikowej *(unsigned*)d_r = matrix1.getRows(); *((unsigned*)d_r+1) = matrix2.getColumns(); mul<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(d_1, d_2, d_r); cudaDeviceSynchronize(); //koniec mierzenia czasu high_resolution_clock::time_point tk = high_resolution_clock::now(); //obliczenie delt czasowych: duration<double> time_alloc = duration_cast<duration<double>>(tk - tzk); duration<double> time_nalloc = duration_cast<duration<double>>(tk - tbk); printf("Wynik obliczen na GPU:\n"); printMatrixGPU("%.2f ", d_r); printf("\n"); printf("(Device) wykonano w ciagu: %fs\n", time_nalloc.count()); printf("(Device) ... %fs, jesli uzwglednic zarzadzanie pamiecia.\n", time_alloc.count()); printf("\n"); cudaFree(d_1); cudaFree(d_2); cudaFree(d_r); //poczatek pomiaru czasu na CPU high_resolution_clock::time_point tpc = high_resolution_clock::now(); Matrix result = matrix1 * matrix2;//(matrix1.getRows(),matrix.getColumns()); //koniec pomiaru czasu na CPU high_resolution_clock::time_point tpk = high_resolution_clock::now(); duration<double> timeCPU = duration_cast<duration<double>>(tpk - tpc); printf("Wynik obliczen na CPU:\n"); printMatrixCPU("%.2f ", result); printf("\n"); printf("(Host) wykonano w ciagu: %fs\n", timeCPU.count()); printf("\n"); matrix1.setSize(result.getRows(),result.getColumns()); for(unsigned i=0;i<matrix1.getRows();i++){ for(unsigned j=0;j<matrix1.getColumns();j++){ matrix1.set(i,j,result.get(i,j)); } } } break; case 6: //macierz odwrotna if(matrix1.getColumns() != matrix1.getRows()){ printf("Nie zgadzaja sie rozmiary macierzy!!!\n"); } else{ //deklarcja wskaznikow na pamiec urzadzenia void* d_1 = NULL; void* d_r = NULL; //poczatek mierzenia czasu wliczajac alokacja pamieci high_resolution_clock::time_point tzk = high_resolution_clock::now(); //alokacja pamieci cudaMallocManaged(&d_1, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); cudaMallocManaged(&d_r, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); //kopia macierzy w pamieci GPU: copyMatrixToGPU(d_1, matrix1); //dynamiczne przystosowanie liczby watkow (ominiecie bledu BLOCKS_PER_GRID * THREADS_PER_BLOCK >= N!) unsigned tempBLOCKS_PER_GRID = BLOCKS_PER_GRID; unsigned tempTHREADS_PER_BLOCK = THREADS_PER_BLOCK; switch(matrix1.getRows()){ case 1: tempBLOCKS_PER_GRID=1; tempTHREADS_PER_BLOCK=1; break; case 2: tempBLOCKS_PER_GRID=1; tempTHREADS_PER_BLOCK=1; break; case 3: tempBLOCKS_PER_GRID=2; tempTHREADS_PER_BLOCK=2; break; case 4: tempBLOCKS_PER_GRID=4; tempTHREADS_PER_BLOCK=4; break; case 5: tempBLOCKS_PER_GRID=4; tempTHREADS_PER_BLOCK=16; break; case 6: tempBLOCKS_PER_GRID=8; tempTHREADS_PER_BLOCK=64; break; default: tempBLOCKS_PER_GRID = BLOCKS_PER_GRID; tempTHREADS_PER_BLOCK = THREADS_PER_BLOCK; } //tablica i wskaznik pomocnicze double* tab = NULL;//new double[BLOCKS_PER_GRID * THREADS_PER_BLOCK]; cudaMallocManaged(&tab, sizeof(double) * tempBLOCKS_PER_GRID * tempTHREADS_PER_BLOCK); for(unsigned i=0;i<tempBLOCKS_PER_GRID * tempTHREADS_PER_BLOCK; i++) tab[i] = 0.0; double w = 0.0; //poczatek mierzenia czasu wliczajac alokacja pamieci //high_resolution_clock::time_point tbk = high_resolution_clock::now(); if(matrix1.getColumns()>1){ detHelper<<<tempBLOCKS_PER_GRID, tempTHREADS_PER_BLOCK>>>(d_1, tab); cudaDeviceSynchronize(); for(unsigned i=0;i<tempBLOCKS_PER_GRID*tempTHREADS_PER_BLOCK; i++){ w += tab[i]; } } //w przeciwnym wypadku wyznacznikiem jest jedyny element macierzy else w = *((double*)((unsigned*)d_1+2)); double mainDet = w; //debug printf("Wyznacznik glownej macierzy: %f\n", mainDet); if(mainDet == 0.0 || mainDet == -0.0){ printf("Wyznacznik macierzy rowny 0.0 - macierz odwrotna nie istnieje!"); } else{ *(unsigned*)d_r = matrix1.getRows(); *((unsigned*)d_r+1) = matrix1.getColumns(); if(matrix1.getRows()==1){ *((double*)((unsigned*)d_r+2)) = 1.0/mainDet; } else if(matrix1.getRows()>1){ //Tworzymy bufor na minora: void* minor = NULL; cudaMallocManaged(&minor, 2*sizeof(unsigned)+(matrix1.getRows()-1)*(matrix1.getColumns()-1)*sizeof(double)); //przekopiowanie *(unsigned*)minor = matrix1.getRows()-1; *((unsigned*)minor+1) = matrix1.getColumns()-1; for(unsigned i=0; i<matrix1.getRows(); i++){ for(unsigned j=0; j<matrix1.getColumns(); j++){ //zerujemy tab i w for(unsigned q=0;q<tempBLOCKS_PER_GRID * tempTHREADS_PER_BLOCK; q++) tab[q] = 0.0; w = 0.0; for(unsigned q=0;q<matrix1.getRows(); q++){ for(unsigned k=0;k<matrix1.getRows(); k++){ if(q<i && k<j) *((double*)((unsigned*)minor+2)+q*(matrix1.getColumns()-1)+k) = *((double*)((unsigned*)d_1+2) + q*matrix1.getColumns() + k); if(q<i && k>j) *((double*)((unsigned*)minor+2)+q*(matrix1.getColumns()-1)+k-1) = *((double*)((unsigned*)d_1+2) + q*matrix1.getColumns() + k); if(q>i && k<j) *((double*)((unsigned*)minor+2)+(q-1)*(matrix1.getColumns()-1)+k) = *((double*)((unsigned*)d_1+2) + q*matrix1.getColumns() + k); if(q>i && k>j) *((double*)((unsigned*)minor+2)+(q-1)*(matrix1.getColumns()-1)+k-1) = *((double*)((unsigned*)d_1+2) + q*matrix1.getColumns() + k); } } unsigned temp2BLOCKS_PER_GRID = BLOCKS_PER_GRID; unsigned temp2THREADS_PER_BLOCK = THREADS_PER_BLOCK; switch(matrix1.getRows()-1){ case 1: temp2BLOCKS_PER_GRID=1; temp2THREADS_PER_BLOCK=1; break; case 2: temp2BLOCKS_PER_GRID=1; temp2THREADS_PER_BLOCK=1; break; case 3: temp2BLOCKS_PER_GRID=2; temp2THREADS_PER_BLOCK=2; break; case 4: temp2BLOCKS_PER_GRID=4; temp2THREADS_PER_BLOCK=4; break; case 5: temp2BLOCKS_PER_GRID=4; temp2THREADS_PER_BLOCK=16; break; case 6: temp2BLOCKS_PER_GRID=8; temp2THREADS_PER_BLOCK=64; break; default: temp2BLOCKS_PER_GRID = BLOCKS_PER_GRID; temp2THREADS_PER_BLOCK = THREADS_PER_BLOCK; } if(matrix1.getColumns()-1>1){ //kernel detHelper<<<temp2BLOCKS_PER_GRID, temp2THREADS_PER_BLOCK>>>(minor, tab); cudaDeviceSynchronize(); for(unsigned q=0;q<temp2BLOCKS_PER_GRID*temp2THREADS_PER_BLOCK; q++){ w += tab[q]; } } else w = *((double*)((unsigned*)minor+2)); *((double*)((unsigned*)d_r+2)+j*matrix1.getColumns()+i) = (((i%2)+(j%2))%2==0?1.0:-1.0) * w / mainDet; } } cudaFree(minor); } //koniec mierzenia czasu high_resolution_clock::time_point tk = high_resolution_clock::now(); //obliczenie delt czasowych: duration<double> time_alloc = duration_cast<duration<double>>(tk - tzk); //duration<double> time_nalloc = duration_cast<duration<double>>(tk - tbk); printf("Wynik obliczen na GPU:\n"); printMatrixGPU("%.2f ", d_r); printf("\n"); printf("(Device) wykonano w ciagu: %fs\n", time_alloc.count()); //printf("(Device) ... %fs, jesli uzwglednic zarzadzanie pamiecia.\n", time_alloc.count()); printf("\n"); } cudaFree(tab); cudaFree(d_r); cudaFree(d_1); if(mainDet != 0.0 && mainDet != -0.0){ try{ //poczatek pomiaru czasu na CPU high_resolution_clock::time_point tpc = high_resolution_clock::now(); Matrix result = matrix1.inverse(); //koniec pomiaru czasu na CPU high_resolution_clock::time_point tpk = high_resolution_clock::now(); duration<double> timeCPU = duration_cast<duration<double>>(tpk - tpc); printf("Wynik obliczen na CPU:\n"); printMatrixCPU("%.2f ", result); printf("\n"); printf("(Host) wykonano w ciagu: %fs\n", timeCPU.count()); printf("\n"); for(unsigned i=0;i<matrix1.getRows();i++){ for(unsigned j=0;j<matrix1.getColumns();j++){ matrix1.set(i,j,result.get(i,j)); } } } catch(...){} } } break; case 0: //wyjscie exit(0); default: printf("Nie rozumiem - sprobuj ponownie!"); break; } } return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> //printf #include <stdlib.h>//srand, rand #include <time.h> //time #include <fstream> #include <string> #include <iostream> #include <ctime> #include <ratio> #include <chrono> //globalne ustawienia const unsigned BLOCKS_PER_GRID = 16; const unsigned THREADS_PER_BLOCK = 256; class Matrix{ unsigned M,N; //rozmiary macierzy double** tab; //wskaznik na dane macierzy Matrix minor(unsigned m,unsigned n){ Matrix result(M-1,N-1); for(unsigned i=0;i<M;i++){ for(unsigned j=0;j<N;j++){ if(i<m && j<n) result.set(i,j,this->get(i,j)); else if(i<m && j>n) result.set(i,j-1,this->get(i,j)); else if(i>m && j<n) result.set(i-1,j,this->get(i,j)); else if(i>m && j>n) result.set(i-1,j-1,this->get(i,j)); } } return result; } public: Matrix(unsigned M, unsigned N){ this->M = M; this->N = N; tab = new double*[M]; for(unsigned i=0;i<M;i++){ tab[i] = new double[N]; for(unsigned j=0;j<N;j++){ tab[i][j] = (i==j); //domyslnie macierz jednostkowa } } } ~Matrix(){ for(unsigned i=0;i<M;i++){ delete[] tab[i]; } delete[] tab; } unsigned getRows() const{ return M;} unsigned getColumns() const{ return N;} double get(unsigned m, unsigned n) const{ if(m<M && n<N) return tab[m][n]; throw "Nie znaleziono elementu pod podana para indeksow."; } void set(unsigned m, unsigned n, double num){ if(m<M && n<N) tab[m][n] = num; else throw "Nie mozna ustawic wartosci elementu pod podana para indeksow."; } Matrix operator+(const Matrix& mat) const{ if(this->M != mat.getRows() || this->N != mat.getColumns()) throw "Nie mozna dodawac macierzy o roznych wymiarach!"; Matrix result = Matrix(M,N); for(unsigned i=0;i<M;i++){ for(unsigned j=0;j<N;j++){ result.set(i,j,this->get(i,j)+mat.get(i,j)); } } return result; } Matrix operator*(const Matrix& mat) const{ if(this->N != mat.getRows()) throw "Pierwsza macierz nie ma tylu kolumn, co druga wierszy - odmowa wykonania mnozenia!"; Matrix result = Matrix(M, mat.getColumns()); for(unsigned i=0;i<M;i++){ for(unsigned j=0;j<mat.getColumns();j++){ //result.set(i,j,this->get(i,j)+mat.get(i,j)); result.set(i,j,0.0); for(unsigned k=0;k<this->N;k++){ result.set(i,j,result.get(i,j)+this->get(i,k)*mat.get(k,j)); } } } return result; } double det(){ if(M != N) throw "Wyznacznik mozna obliczyc tylko dla macierzy kwadratowej!"; if(M == 1) return get(0,0); else{ double result = 0.0; for(unsigned i=0;i<M;i++){ //result += ((i%2==0)?(1.0):(-1.0))*get(i,0)*minor(i,0).det(); Matrix minr = minor(i,0); //printf("%.1f \n", get(i,0)); result += ((i%2==0)?(1.0):(-1.0))*get(i,0)*minr.det(); } return result; } } Matrix inverse(){ double mainDet = det(); if(mainDet == 0.0 || mainDet == -0.0) throw "Wyznacznik macierzy rowny zero!"; Matrix result = Matrix(M,N); if(M==1) result.set(0,0,1.0/mainDet); else{ for(unsigned i=0;i<M;i++){ for(unsigned j=0;j<N;j++){ double detElement = (((i%2)+(j%2))%2==0?1.0:-1.0)*minor(i,j).det(); result.set(j,i,detElement/mainDet); } } } return result; } void setSize(unsigned M, unsigned N) { for(int i =0 ;i<this->M;i++){ delete[] tab[i]; } delete[] tab; this->M = M; this->N = N; this->tab = new double* [this->M]; for (unsigned i = 0; i < this->M; i++) { tab[i] = new double[this->N]; for(unsigned j=0;j< this->N; j++){ tab[i][j] = (i==j); //domyslnie macierz jednostkowa } } } }; void printMatrixCPU(const char* format, const Matrix& matrix){ for(unsigned i=0; i<matrix.getRows(); i++){ for(unsigned j=0; j<matrix.getColumns(); j++){ printf(format, matrix.get(i,j)); } printf("\n"); } } void printMatrixGPU(const char* format, void* matrix){ //debug //printf("%d\n",*(unsigned*)matrix); //printf("%d\n",*((unsigned*)matrix+1)); for(unsigned i=0; i<*(unsigned*)matrix; i++){ //wartosc *(unsigned*)matrix przechowuje liczbe wierszy for(unsigned j=0; j<*((unsigned*)matrix+1); j++){ //wartosc *(unsigned*)matrix+1 przechowuje liczbe kolumn //duzo castowania printf(format, *((double*)((unsigned*)matrix+2)+i*(*((unsigned*)matrix+1))+j)); } printf("\n"); } } void copyMatrixToGPU( void* ptr, const Matrix& matrix){ //Kopia macierzy w pamięci GPU *((unsigned*)ptr) = matrix.getRows(); *((unsigned*)ptr+1) = matrix.getColumns(); for(unsigned i=0;i<matrix.getRows();i++){ for(unsigned j=0;j<matrix.getColumns();j++){ //*((double*)((unsigned*)ptr+2)+(i*matrix.getRows()+j)) = matrix.get(i,j); *((double*)((unsigned*)ptr+2)+(i*matrix.getColumns()+j)) = matrix.get(i,j); } } } __global__ void add(void* matrix1, void* matrix2, void* result){//PRZY ZALOZENIU, ZE PAMIEC jest juz zaalokowana, w a matrix1 i matrix 2 są juz gotowe wartosci, zakladamy tez, ze zgadzaja sie wymiary macierzy i ewentualne inne warunki, NOTE: wymiary macierzy trzeba wpisac poza kernelem unsigned idx = threadIdx.x + blockDim.x * blockIdx.x; unsigned stride = blockDim.x * gridDim.x; for(unsigned i = idx; i<(*(unsigned*)matrix1)*(*((unsigned*)matrix1+1)); i+=stride){ *((double*)((unsigned*)result+2)+i) = *((double*)((unsigned*)matrix1+2)+i) + *((double*)((unsigned*)matrix2+2)+i); } } __global__ void mul(void* matrix1, void* matrix2, void* result){ unsigned idx = threadIdx.x + blockDim.x * blockIdx.x; unsigned stride = blockDim.x * gridDim.x; for(unsigned i = idx; i<(*(unsigned*)matrix1)*(*((unsigned*)matrix2+1)); i+=stride){ //przechodzimy po elementach macierzy wynikowej double product = 0.0; for(unsigned j=0; j<*(unsigned*)matrix2; j++){ product += (*((double*)((unsigned*)matrix1+2)+(i/(*((unsigned*)matrix2+1)))*(*((unsigned*)matrix1+1))+j)) * (*((double*)((unsigned*)matrix2+2)+j*(*((unsigned*)matrix2+1))+i%(*((unsigned*)matrix2+1)))); } *((double*)((unsigned*)result+2)+i) = product; } } __global__ void detHelper(void* matrix, double* tab_helper/*, unsigned N*/){ unsigned idx = threadIdx.x + blockDim.x * blockIdx.x; unsigned stride = blockDim.x * gridDim.x; unsigned N = *(unsigned*)matrix; //zapisujemy idx w silniowym systemie liczbowym: int* idxTab = new int[N]; for(int i=1;i<=N;i++){ idxTab[N-i] = idx%i; idx/=i; } //upewniamy sie, ze udzial w obliczeniach biora tylko te watki, dla ktorych oryginalny index nalezal do przedzialu <0, N!), czyli ze mamy co najwyzej N! watkow wykonujacych jakas prace if(idx==0){ //zapisujemy stride (skok dla pojedynczego watku) w silniowym systemie liczbowym int* strideTab = new int[N]; for(int i=1;i<=N;i++){ strideTab[N-i] = stride%i; stride/=i; } //przywracamy oryginalne wartosci idx = threadIdx.x + blockDim.x * blockIdx.x; stride = blockDim.x * gridDim.x; //warunek konca pracy watku (wyjscie poza zakres na skutek stalego dodawania strideTab do idxTab) while(idxTab[0]<N){ //wskaznik parzystosci permutacji, 0 - parzysta, 1 - nieparzysta int parz = 0; for(int i=0;i<N;i++) parz = (parz + idxTab[i])%2; //konwersja na interesujaca permutacje for(int i=0;i<N;i++){ for(int j=1;j<=N;j++){ bool niepojawilo = true; for(int k=0;k<i;k++){ if(idxTab[k] == j){niepojawilo = false; break;} } if(niepojawilo){ if(idxTab[i]==0){ idxTab[i]=j;break; } else idxTab[i]--; } } } //iloczyn czastkowy (jeden z N! z wzoru na wyznacznik), z odpowiednim znakiem double product = ((parz%2==0) ? 1.0 : (-1.0)); for(int i=0;i<N;i++) product*= *(((double*)((unsigned*)matrix+2))+i*N+(idxTab[i]-1));//element z i-tego wiersza i (idxTab[i]-1) kolumny tab_helper[idx] += product; //konwersja odwrotna (przywrocenie numeru indexu) for(int i=0;i<N;i++){ int ile = 0; for(int j=i+1;j<N;j++){ if(idxTab[j]<idxTab[i]) ile++; } idxTab[i] = ile; } //dodanie idxTab+=strideTab, (patrz: warunek konca petli) int ak=0; for(int i=1;i<=N;i++){ idxTab[N-i]=idxTab[N-i]+strideTab[N-i]+ak; ak=idxTab[N-i]/i; if(i!=N) idxTab[N-i]%=i; } idxTab[0]+=ak; } delete[] strideTab; } delete[] idxTab; } void loadMatrixFromFile(Matrix & matrix) { char filename[50]; printf("Podaj nazwe pliku: "); scanf("%s", filename); std::ifstream myfile; myfile.open(filename); if (!myfile.good()){ printf("Nie udalo sie otworzyc pliku o podanej nazwie!\n"); } if (myfile.is_open()) { std::string line; getline(myfile, line); unsigned M = stoul(line); getline(myfile, line); unsigned N = stoul(line); matrix.setSize(M, N); for (unsigned i = 0; i < M; i++) { for (unsigned j = 0; j < N; j++) { getline(myfile, line); matrix.set(i, j, stod(line)); } } myfile.close(); } } void saveMatrixToFile(Matrix& matrixInt) { char filename[50]; printf("Podaj nazwe pliku: "); scanf("%s", filename); std::ofstream myfile; myfile.open(filename); myfile << matrixInt.getRows()<<"\n"; myfile << matrixInt.getColumns()<<"\n"; for (int i = 0; i < matrixInt.getRows(); i++) { for (int j = 0; j < matrixInt.getColumns(); j++) { myfile << matrixInt.get(i, j)<< "\n"; } } myfile.close(); } using namespace std::chrono; int main(){ const unsigned M = 3; //test values const unsigned N = 3; Matrix matrix1(M,N); Matrix matrix2(M,N); for(int i = 0 ;i<M;i++){ for(int j = 0 ; j<N ; j++){ matrix1.set(i,j,i); } } matrix1.set(0,0,1.0); matrix1.set(0,1,-1.0); matrix1.set(0,2,2.0); matrix1.set(1,0,3.0); matrix1.set(1,1,0.0); matrix1.set(1,2,-4.0); matrix1.set(2,0,2.0); matrix1.set(2,1,3.0); matrix1.set(2,2,5.0); for(int i = 0 ;i<M;i++){ for(int j = 0 ; j<N ; j++){ matrix2.set(i,j,j); } } int x = 11; while (x != 0) { printf("Macierz 1:\n"); printMatrixCPU("%.2f ", matrix1); printf("\n"); printf("Macierz 2:\n"); printMatrixCPU("%.2f ", matrix2); printf("\n"); printf( "*******************MENU********************\n"); printf("1.Wczytaj macierz z pliku\n"); printf("2.Zapisz macierz do pliku\n"); printf("3.Wyznacznik ( det(m1) )\n"); printf("4.Dodaj macierze (m1=m1+m2)\n"); printf("5.Przemnóż macierze (m1=m1*m2)\n"); printf("6.Macierz odwrotna ( m1=m1^(-1) )\n"); printf("0.Wyjdź\n"); printf("Wybieram : \n"); scanf("%d", &x); system("clear"); int subselect= 0; switch (x){ case 1: //ladowanie printf("1 - Macierz 1\n2 - Macierz 2\n"); scanf("%d", &subselect); subselect==1?loadMatrixFromFile(matrix1):loadMatrixFromFile(matrix2); break; case 2: //zapisywanie printf("1 - Macierz 1\n2 - Macierz 2\n"); scanf("%d", &subselect); subselect==1?saveMatrixToFile(matrix1):saveMatrixToFile(matrix2); break; case 3: //wyznacznik if(matrix1.getRows() != matrix1.getColumns()){ printf("Wymiary macierzy musza byc rowne!!!"); } else{ //deklarcja wskaznikow na pamiec urzadzenia void* d_1 = NULL; //poczatek mierzenia czasu wliczajac alokacja pamieci high_resolution_clock::time_point tzk = high_resolution_clock::now(); //alokacja pamieci hipMallocManaged(&d_1, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); //kopia macierzy w pamieci GPU: copyMatrixToGPU(d_1, matrix1); //dynamiczne przystosowanie liczby watkow (ominiecie bledu BLOCKS_PER_GRID * THREADS_PER_BLOCK >= N!) unsigned tempBLOCKS_PER_GRID = BLOCKS_PER_GRID; unsigned tempTHREADS_PER_BLOCK = THREADS_PER_BLOCK; switch(matrix1.getRows()){ case 1: tempBLOCKS_PER_GRID=1; tempTHREADS_PER_BLOCK=1; break; case 2: tempBLOCKS_PER_GRID=1; tempTHREADS_PER_BLOCK=1; break; case 3: tempBLOCKS_PER_GRID=2; tempTHREADS_PER_BLOCK=2; break; case 4: tempBLOCKS_PER_GRID=4; tempTHREADS_PER_BLOCK=4; break; case 5: tempBLOCKS_PER_GRID=4; tempTHREADS_PER_BLOCK=16; break; case 6: tempBLOCKS_PER_GRID=8; tempTHREADS_PER_BLOCK=64; break; default: tempBLOCKS_PER_GRID = BLOCKS_PER_GRID; tempTHREADS_PER_BLOCK = THREADS_PER_BLOCK; } //tablica i wskaznik pomocnicze double* tab = NULL;//new double[BLOCKS_PER_GRID * THREADS_PER_BLOCK]; hipMallocManaged(&tab, sizeof(double) * tempBLOCKS_PER_GRID * tempTHREADS_PER_BLOCK); for(unsigned i=0;i<tempBLOCKS_PER_GRID * tempTHREADS_PER_BLOCK; i++) tab[i] = 0.0; double w = 0.0; //poczatek mierzenia czasu wliczajac alokacja pamieci high_resolution_clock::time_point tbk = high_resolution_clock::now(); //jezeli rozmiar macierzy wiekszy od jeden wywolaj kernel, oblicz wyznacznik if(matrix1.getRows()>1){ detHelper<<<tempBLOCKS_PER_GRID, tempTHREADS_PER_BLOCK>>>(d_1, tab); hipDeviceSynchronize(); for(unsigned i=0;i<tempBLOCKS_PER_GRID*tempTHREADS_PER_BLOCK; i++){ w += tab[i]; } } //w przeciwnym wypadku wyznacznikiem jest jedyny element macierzy else w = *((double*)((unsigned*)d_1+2)); //koniec mierzenia czasu wliczajac alokacja pamieci high_resolution_clock::time_point tk = high_resolution_clock::now(); duration<double> time_alloc = duration_cast<duration<double>>(tk - tzk); duration<double> time_nalloc = duration_cast<duration<double>>(tk - tbk); printf("(Device) Wyznacznik obliczony przy uzyciu kernela:\n"); printf("%f\n\n", w); printf("(Device) wykonano w ciagu: %fs\n", time_nalloc.count()); printf("(Device) ... %fs, jesli uzwglednic zarzadzanie pamiecia.\n", time_alloc.count()); printf("\n"); hipFree(d_1); hipFree(tab); //delete[] tab; //poczatek pomiaru czasu na CPU high_resolution_clock::time_point tpc = high_resolution_clock::now(); double d = matrix1.det(); //koniec pomiaru czasu na CPU high_resolution_clock::time_point tpk = high_resolution_clock::now(); duration<double> timeCPU = duration_cast<duration<double>>(tpk - tpc); printf("Wynik obliczen na CPU:\n"); printf("%f\n\n", d); printf("(Host) wykonano w ciagu: %fs\n", timeCPU.count()); printf("\n"); } break; case 4: //dodawanie if(matrix1.getRows() != matrix2.getRows() || matrix1.getColumns()!=matrix2.getColumns()){ printf("Macierze różnych rozmiarów!\n"); } else{ //realizacja na GPU: //deklaracja wskaznikow na pamiec urzadzenia void* d_1 = NULL; void* d_2 = NULL; void* d_r = NULL; //poczatek mierzenia czasu wliczajac alokacja pamieci high_resolution_clock::time_point tzk = high_resolution_clock::now(); //alokacja pamieci hipMallocManaged(&d_1, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); hipMallocManaged(&d_2, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); hipMallocManaged(&d_r, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); //kopie macierzy w pamieci GPU: copyMatrixToGPU(d_1, matrix1); copyMatrixToGPU(d_2, matrix2); //poczatek mierzenia czasu, jedynie obliczenia high_resolution_clock::time_point tbk = high_resolution_clock::now(); //ustawienie rozmiaru macierzy wynikowej *(unsigned*)d_r = matrix1.getRows(); *((unsigned*)d_r+1) = matrix1.getColumns(); add<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(d_1, d_2, d_r); hipDeviceSynchronize(); //koniec mierzenia czasu high_resolution_clock::time_point tk = high_resolution_clock::now(); //obliczenie delt czasowych: duration<double> time_alloc = duration_cast<duration<double>>(tk - tzk); duration<double> time_nalloc = duration_cast<duration<double>>(tk - tbk); printf("Wynik obliczen na GPU:\n"); printMatrixGPU("%.2f ", d_r); printf("\n"); printf("(Device) wykonano w ciagu: %fs\n", time_nalloc.count()); printf("(Device) ... %fs, jesli uzwglednic zarzadzanie pamiecia.\n", time_alloc.count()); printf("\n"); hipFree(d_1); hipFree(d_2); hipFree(d_r); //poczatek pomiaru czasu na CPU high_resolution_clock::time_point tpc = high_resolution_clock::now(); Matrix result = matrix1 + matrix2;//(matrix1.getRows(),matrix.getColumns()); //koniec pomiaru czasu na CPU high_resolution_clock::time_point tpk = high_resolution_clock::now(); duration<double> timeCPU = duration_cast<duration<double>>(tpk - tpc); printf("Wynik obliczen na CPU:\n"); printMatrixCPU("%.2f ", result); printf("\n"); printf("(Host) wykonano w ciagu: %fs\n", timeCPU.count()); printf("\n"); for(unsigned i=0;i<matrix1.getRows();i++){ for(unsigned j=0;j<matrix1.getColumns();j++){ matrix1.set(i,j,result.get(i,j)); } } } //else executeAdding(matrix1, matrix2); break; case 5: //mnozenie if(matrix1.getColumns() != matrix2.getRows()){ printf("Nie zgadzaja sie rozmiary macierzy!!!\n"); } else{ //realizacja na GPU: //deklaracja wskaznikow na pamiec urzadzenia void* d_1 = NULL; void* d_2 = NULL; void* d_r = NULL; //poczatek mierzenia czasu wliczajac alokacja pamieci high_resolution_clock::time_point tzk = high_resolution_clock::now(); //alokacja pamieci hipMallocManaged(&d_1, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); hipMallocManaged(&d_2, 2*sizeof(unsigned)+matrix2.getRows()*matrix2.getColumns()*sizeof(double)); hipMallocManaged(&d_r, 2*sizeof(unsigned)+matrix1.getRows()*matrix2.getColumns()*sizeof(double)); //kopie macierzy w pamieci GPU: copyMatrixToGPU(d_1, matrix1); copyMatrixToGPU(d_2, matrix2); //poczatek mierzenia czasu, jedynie obliczenia high_resolution_clock::time_point tbk = high_resolution_clock::now(); //ustawienie rozmiaru macierzy wynikowej *(unsigned*)d_r = matrix1.getRows(); *((unsigned*)d_r+1) = matrix2.getColumns(); mul<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(d_1, d_2, d_r); hipDeviceSynchronize(); //koniec mierzenia czasu high_resolution_clock::time_point tk = high_resolution_clock::now(); //obliczenie delt czasowych: duration<double> time_alloc = duration_cast<duration<double>>(tk - tzk); duration<double> time_nalloc = duration_cast<duration<double>>(tk - tbk); printf("Wynik obliczen na GPU:\n"); printMatrixGPU("%.2f ", d_r); printf("\n"); printf("(Device) wykonano w ciagu: %fs\n", time_nalloc.count()); printf("(Device) ... %fs, jesli uzwglednic zarzadzanie pamiecia.\n", time_alloc.count()); printf("\n"); hipFree(d_1); hipFree(d_2); hipFree(d_r); //poczatek pomiaru czasu na CPU high_resolution_clock::time_point tpc = high_resolution_clock::now(); Matrix result = matrix1 * matrix2;//(matrix1.getRows(),matrix.getColumns()); //koniec pomiaru czasu na CPU high_resolution_clock::time_point tpk = high_resolution_clock::now(); duration<double> timeCPU = duration_cast<duration<double>>(tpk - tpc); printf("Wynik obliczen na CPU:\n"); printMatrixCPU("%.2f ", result); printf("\n"); printf("(Host) wykonano w ciagu: %fs\n", timeCPU.count()); printf("\n"); matrix1.setSize(result.getRows(),result.getColumns()); for(unsigned i=0;i<matrix1.getRows();i++){ for(unsigned j=0;j<matrix1.getColumns();j++){ matrix1.set(i,j,result.get(i,j)); } } } break; case 6: //macierz odwrotna if(matrix1.getColumns() != matrix1.getRows()){ printf("Nie zgadzaja sie rozmiary macierzy!!!\n"); } else{ //deklarcja wskaznikow na pamiec urzadzenia void* d_1 = NULL; void* d_r = NULL; //poczatek mierzenia czasu wliczajac alokacja pamieci high_resolution_clock::time_point tzk = high_resolution_clock::now(); //alokacja pamieci hipMallocManaged(&d_1, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); hipMallocManaged(&d_r, 2*sizeof(unsigned)+matrix1.getRows()*matrix1.getColumns()*sizeof(double)); //kopia macierzy w pamieci GPU: copyMatrixToGPU(d_1, matrix1); //dynamiczne przystosowanie liczby watkow (ominiecie bledu BLOCKS_PER_GRID * THREADS_PER_BLOCK >= N!) unsigned tempBLOCKS_PER_GRID = BLOCKS_PER_GRID; unsigned tempTHREADS_PER_BLOCK = THREADS_PER_BLOCK; switch(matrix1.getRows()){ case 1: tempBLOCKS_PER_GRID=1; tempTHREADS_PER_BLOCK=1; break; case 2: tempBLOCKS_PER_GRID=1; tempTHREADS_PER_BLOCK=1; break; case 3: tempBLOCKS_PER_GRID=2; tempTHREADS_PER_BLOCK=2; break; case 4: tempBLOCKS_PER_GRID=4; tempTHREADS_PER_BLOCK=4; break; case 5: tempBLOCKS_PER_GRID=4; tempTHREADS_PER_BLOCK=16; break; case 6: tempBLOCKS_PER_GRID=8; tempTHREADS_PER_BLOCK=64; break; default: tempBLOCKS_PER_GRID = BLOCKS_PER_GRID; tempTHREADS_PER_BLOCK = THREADS_PER_BLOCK; } //tablica i wskaznik pomocnicze double* tab = NULL;//new double[BLOCKS_PER_GRID * THREADS_PER_BLOCK]; hipMallocManaged(&tab, sizeof(double) * tempBLOCKS_PER_GRID * tempTHREADS_PER_BLOCK); for(unsigned i=0;i<tempBLOCKS_PER_GRID * tempTHREADS_PER_BLOCK; i++) tab[i] = 0.0; double w = 0.0; //poczatek mierzenia czasu wliczajac alokacja pamieci //high_resolution_clock::time_point tbk = high_resolution_clock::now(); if(matrix1.getColumns()>1){ detHelper<<<tempBLOCKS_PER_GRID, tempTHREADS_PER_BLOCK>>>(d_1, tab); hipDeviceSynchronize(); for(unsigned i=0;i<tempBLOCKS_PER_GRID*tempTHREADS_PER_BLOCK; i++){ w += tab[i]; } } //w przeciwnym wypadku wyznacznikiem jest jedyny element macierzy else w = *((double*)((unsigned*)d_1+2)); double mainDet = w; //debug printf("Wyznacznik glownej macierzy: %f\n", mainDet); if(mainDet == 0.0 || mainDet == -0.0){ printf("Wyznacznik macierzy rowny 0.0 - macierz odwrotna nie istnieje!"); } else{ *(unsigned*)d_r = matrix1.getRows(); *((unsigned*)d_r+1) = matrix1.getColumns(); if(matrix1.getRows()==1){ *((double*)((unsigned*)d_r+2)) = 1.0/mainDet; } else if(matrix1.getRows()>1){ //Tworzymy bufor na minora: void* minor = NULL; hipMallocManaged(&minor, 2*sizeof(unsigned)+(matrix1.getRows()-1)*(matrix1.getColumns()-1)*sizeof(double)); //przekopiowanie *(unsigned*)minor = matrix1.getRows()-1; *((unsigned*)minor+1) = matrix1.getColumns()-1; for(unsigned i=0; i<matrix1.getRows(); i++){ for(unsigned j=0; j<matrix1.getColumns(); j++){ //zerujemy tab i w for(unsigned q=0;q<tempBLOCKS_PER_GRID * tempTHREADS_PER_BLOCK; q++) tab[q] = 0.0; w = 0.0; for(unsigned q=0;q<matrix1.getRows(); q++){ for(unsigned k=0;k<matrix1.getRows(); k++){ if(q<i && k<j) *((double*)((unsigned*)minor+2)+q*(matrix1.getColumns()-1)+k) = *((double*)((unsigned*)d_1+2) + q*matrix1.getColumns() + k); if(q<i && k>j) *((double*)((unsigned*)minor+2)+q*(matrix1.getColumns()-1)+k-1) = *((double*)((unsigned*)d_1+2) + q*matrix1.getColumns() + k); if(q>i && k<j) *((double*)((unsigned*)minor+2)+(q-1)*(matrix1.getColumns()-1)+k) = *((double*)((unsigned*)d_1+2) + q*matrix1.getColumns() + k); if(q>i && k>j) *((double*)((unsigned*)minor+2)+(q-1)*(matrix1.getColumns()-1)+k-1) = *((double*)((unsigned*)d_1+2) + q*matrix1.getColumns() + k); } } unsigned temp2BLOCKS_PER_GRID = BLOCKS_PER_GRID; unsigned temp2THREADS_PER_BLOCK = THREADS_PER_BLOCK; switch(matrix1.getRows()-1){ case 1: temp2BLOCKS_PER_GRID=1; temp2THREADS_PER_BLOCK=1; break; case 2: temp2BLOCKS_PER_GRID=1; temp2THREADS_PER_BLOCK=1; break; case 3: temp2BLOCKS_PER_GRID=2; temp2THREADS_PER_BLOCK=2; break; case 4: temp2BLOCKS_PER_GRID=4; temp2THREADS_PER_BLOCK=4; break; case 5: temp2BLOCKS_PER_GRID=4; temp2THREADS_PER_BLOCK=16; break; case 6: temp2BLOCKS_PER_GRID=8; temp2THREADS_PER_BLOCK=64; break; default: temp2BLOCKS_PER_GRID = BLOCKS_PER_GRID; temp2THREADS_PER_BLOCK = THREADS_PER_BLOCK; } if(matrix1.getColumns()-1>1){ //kernel detHelper<<<temp2BLOCKS_PER_GRID, temp2THREADS_PER_BLOCK>>>(minor, tab); hipDeviceSynchronize(); for(unsigned q=0;q<temp2BLOCKS_PER_GRID*temp2THREADS_PER_BLOCK; q++){ w += tab[q]; } } else w = *((double*)((unsigned*)minor+2)); *((double*)((unsigned*)d_r+2)+j*matrix1.getColumns()+i) = (((i%2)+(j%2))%2==0?1.0:-1.0) * w / mainDet; } } hipFree(minor); } //koniec mierzenia czasu high_resolution_clock::time_point tk = high_resolution_clock::now(); //obliczenie delt czasowych: duration<double> time_alloc = duration_cast<duration<double>>(tk - tzk); //duration<double> time_nalloc = duration_cast<duration<double>>(tk - tbk); printf("Wynik obliczen na GPU:\n"); printMatrixGPU("%.2f ", d_r); printf("\n"); printf("(Device) wykonano w ciagu: %fs\n", time_alloc.count()); //printf("(Device) ... %fs, jesli uzwglednic zarzadzanie pamiecia.\n", time_alloc.count()); printf("\n"); } hipFree(tab); hipFree(d_r); hipFree(d_1); if(mainDet != 0.0 && mainDet != -0.0){ try{ //poczatek pomiaru czasu na CPU high_resolution_clock::time_point tpc = high_resolution_clock::now(); Matrix result = matrix1.inverse(); //koniec pomiaru czasu na CPU high_resolution_clock::time_point tpk = high_resolution_clock::now(); duration<double> timeCPU = duration_cast<duration<double>>(tpk - tpc); printf("Wynik obliczen na CPU:\n"); printMatrixCPU("%.2f ", result); printf("\n"); printf("(Host) wykonano w ciagu: %fs\n", timeCPU.count()); printf("\n"); for(unsigned i=0;i<matrix1.getRows();i++){ for(unsigned j=0;j<matrix1.getColumns();j++){ matrix1.set(i,j,result.get(i,j)); } } } catch(...){} } } break; case 0: //wyjscie exit(0); default: printf("Nie rozumiem - sprobuj ponownie!"); break; } } return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <limits.h> #include <math.h> #include <cuda.h> #include <algorithm> #define BLOCK_SIZE 1024 __device__ unsigned int counter, counter_2; __constant__ const unsigned int INTMAX = 2147483647; // structure for dictionary struct huffmanDictionary{ unsigned char bitSequence[256][191]; unsigned char bitSequenceLength[256]; }; // structure for node struct huffmanNode{ unsigned char letter; unsigned int frequency; struct huffmanNode * left, * right; }; struct huffmanNode * huffmanTreeNode_head; struct huffmanDictionary huffmanDict; struct huffmanNode huffmanTreeNode[512]; unsigned char bitSequenceConstMemory[256][255]; __global__ void CalculateFrequency(unsigned char * device_inputFileData , unsigned int * device_frequency, unsigned int inputFileLength) { unsigned int id = blockIdx.x * blockDim.x + threadIdx.x ; if(id < inputFileLength){ atomicAdd(& device_frequency[device_inputFileData[id]] , 1); } } __device__ int findIndex(unsigned int *freq, unsigned int size,unsigned int search){ for(int i=0;i<size;i++){ if(freq[i] == search){ return i; } } return -1; } __global__ void findLeastFrequent(unsigned int *freq, unsigned int *min, int size, unsigned int threads, unsigned int* count, unsigned int *index){ int id = blockIdx.x*blockDim.x + threadIdx.x; counter_2 = 0; __syncthreads(); int ind; if(id<threads){ while(1){ min[counter_2] = INTMAX; atomicMin(&min[counter_2], freq[id]); // Need global barrier __syncthreads(); ind = findIndex(freq, threads, min[counter_2]); index[counter_2] = ind; // Need global barrier __syncthreads(); freq[ind] = INTMAX; if(id == 0) atomicInc(&counter_2, size); // Need global barrier __syncthreads(); min[counter_2] = INTMAX; atomicMin(&min[counter_2], freq[id]); // Need global barrier __syncthreads(); ind = findIndex(freq, threads, min[counter_2]); index[counter_2] = ind; // Need global barrier __syncthreads(); freq[ind] = min[counter_2] + min[counter_2-1]; if(id == 0) atomicInc(&counter_2, size); // Need global barrier __syncthreads(); if(min[counter_2] == INTMAX || min[counter_2-1] == INTMAX){ count[0] = counter_2; break; } } } } __global__ void searchSimilarIndex(unsigned int *index, unsigned int *resultIndex, unsigned int *cnt, int threads){ int id = blockIdx.x*blockDim.x + threadIdx.x; __syncthreads(); counter = 0; if(id != threads){ if(index[id] == index[threads]){ int temp = atomicInc(&counter, threads+1); resultIndex[temp] = id; } __syncthreads(); cnt[0] = counter; } } __global__ void compress(unsigned char * device_inputFileData, unsigned int * device_compressedDataOffset, struct huffmanDictionary * device_huffmanDictionary, unsigned char * device_byteCompressedData, unsigned int device_inputFileLength) { __shared__ struct huffmanDictionary table; memcpy(& table, device_huffmanDictionary, sizeof(struct huffmanDictionary)); unsigned int inputFileLength = device_inputFileLength; unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x; for(int i = pos; i < inputFileLength; i += blockDim.x){ for(int k = 0; k < table.bitSequenceLength[device_inputFileData[i]]; k++){ device_byteCompressedData[device_compressedDataOffset[i] + k] = table.bitSequence[device_inputFileData[i]][k]; } } __syncthreads(); if(pos == inputFileLength-1){ unsigned int lastLetterOffset = device_compressedDataOffset[pos] ; unsigned int lastLetterSeqLength = table.bitSequenceLength[device_inputFileData[pos]] ; unsigned int ActualOffset = lastLetterOffset + lastLetterSeqLength ; unsigned int formalOffset = device_compressedDataOffset[inputFileLength] ; if(ActualOffset < formalOffset){ for(int i = ActualOffset; i < formalOffset; i++){ device_byteCompressedData[i] = 0 ; } } } } void buildHuffmanTree(int count,unsigned char *uniqueChar, unsigned int *frequency,int newIndex, int childIndex){ if(count == 0){ huffmanTreeNode[newIndex].frequency = frequency[childIndex]; huffmanTreeNode[newIndex].letter = uniqueChar[childIndex]; huffmanTreeNode[newIndex].left = NULL; huffmanTreeNode[newIndex].right = NULL; } else{ huffmanTreeNode[newIndex].frequency = huffmanTreeNode[childIndex].frequency + huffmanTreeNode[childIndex + 1].frequency; huffmanTreeNode[newIndex].left = & huffmanTreeNode[childIndex]; huffmanTreeNode[newIndex].right = & huffmanTreeNode[childIndex + 1]; huffmanTreeNode_head = & (huffmanTreeNode[newIndex]); } } void buildHuffmanDictionary(struct huffmanNode * root, unsigned char * bitSequence, unsigned char bitSequenceLength){ if(root -> left){ bitSequence[bitSequenceLength] = 0; buildHuffmanDictionary(root -> left, bitSequence, bitSequenceLength + 1); } if(root -> right){ bitSequence[bitSequenceLength] = 1; buildHuffmanDictionary(root -> right, bitSequence, bitSequenceLength + 1); } // copy the bit sequence and the length to the dictionary if(root -> right == NULL && root -> left == NULL){ huffmanDict.bitSequenceLength[root -> letter] = bitSequenceLength; memcpy(huffmanDict.bitSequence[root -> letter], bitSequence, bitSequenceLength * sizeof(unsigned char)); } } void createDataOffsetArray(unsigned int * compressedDataOffset, unsigned char * inputFileData, unsigned int inputFileLength) { compressedDataOffset[0] = 0; for(int i = 0; i < inputFileLength; i++){ compressedDataOffset[i + 1] = huffmanDict.bitSequenceLength[inputFileData[i]] + compressedDataOffset[i]; } // not a byte & remaining values if(compressedDataOffset[inputFileLength] % 8 != 0){ compressedDataOffset[inputFileLength] = compressedDataOffset[inputFileLength] + (8 - (compressedDataOffset[inputFileLength] % 8)); } } void launchCudaHuffmanCompress(unsigned char * inputFileData, unsigned int * compressedDataOffset, unsigned char *compressedData, unsigned int inputFileLength, int NumBlocks) { struct huffmanDictionary * device_huffmanDictionary; unsigned char * device_inputFileData, * device_byteCompressedData; unsigned int * device_compressedDataOffset; createDataOffsetArray(compressedDataOffset, inputFileData, inputFileLength); cudaMalloc((void **) & device_inputFileData, inputFileLength * sizeof(unsigned char)); cudaMalloc((void **) & device_compressedDataOffset, (inputFileLength + 1) * sizeof(unsigned int)); cudaMalloc((void **) & device_huffmanDictionary, sizeof(huffmanDictionary)); cudaMemcpy(device_inputFileData, inputFileData, inputFileLength * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(device_compressedDataOffset, compressedDataOffset, (inputFileLength + 1) * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(device_huffmanDictionary, & huffmanDict, sizeof(huffmanDict), cudaMemcpyHostToDevice); cudaMalloc((void **) & device_byteCompressedData, (compressedDataOffset[inputFileLength]) * sizeof(unsigned char)); cudaMemset(device_byteCompressedData, 0, compressedDataOffset[inputFileLength] * sizeof(unsigned char)); compress<<<NumBlocks, BLOCK_SIZE>>>(device_inputFileData, device_compressedDataOffset, device_huffmanDictionary, device_byteCompressedData, inputFileLength); // copy compressed data from GPU to CPU memory cudaMemcpy(compressedData, device_byteCompressedData, ((compressedDataOffset[inputFileLength])) * sizeof(unsigned char), cudaMemcpyDeviceToHost); // free allocated memory cudaFree(device_inputFileData); cudaFree(device_compressedDataOffset); cudaFree(device_huffmanDictionary); cudaFree(device_byteCompressedData); } int main(int argc, char ** argv){ unsigned int distinctCharacterCount, inputFileLength; unsigned int frequency[256]; unsigned char * inputFileData, bitSequenceLength = 0, bitSequence[255]; unsigned int * compressedDataOffset, cpuTimeUsed; long unsigned int memOffset; clock_t start, end; FILE * inputFile, * compressedFile; // check the arguments if(argc != 3){ printf("Arguments should be input file and output file"); return -1; } // read input file, get length and data inputFile = fopen(argv[1], "rb"); fseek(inputFile, 0, SEEK_END); inputFileLength = ftell(inputFile); printf("Input File length : %d\n", inputFileLength); fseek(inputFile, 0, SEEK_SET); inputFileData = (unsigned char *) malloc(inputFileLength * sizeof(unsigned char)); fread(inputFileData, sizeof(unsigned char), inputFileLength, inputFile); fclose(inputFile); // starting the clock, tick tick start = clock(); // find frequency of each symbols for(int i = 0; i < 256; i++) frequency[i] = 0; unsigned int *device_frequency; cudaMalloc(& device_frequency, 256*sizeof(unsigned int)); cudaMemcpy(device_frequency, frequency, 256*sizeof(unsigned int), cudaMemcpyHostToDevice); unsigned char * device_inputFileData; cudaMalloc(& device_inputFileData, inputFileLength*sizeof(unsigned char)); cudaMemcpy(device_inputFileData, inputFileData, inputFileLength*sizeof(unsigned char), cudaMemcpyHostToDevice); int NumBlocks; if( inputFileLength > 1024){ NumBlocks = ceil( (float)inputFileLength / BLOCK_SIZE ); } else{ NumBlocks = 1; } printf("Num of blocks %d\n",NumBlocks); CalculateFrequency<<< NumBlocks, BLOCK_SIZE >>>(device_inputFileData, device_frequency, inputFileLength); cudaMemcpy(frequency, device_frequency, 256*sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaFree(device_inputFileData); cudaFree(device_frequency); // initialize the nodes distinctCharacterCount = 0; for(int i = 0; i < 256; i++){ if(frequency[i] > 0){ distinctCharacterCount ++; } } int unique = 0; unsigned char *uniqueChar, *duniqueChar; uniqueChar = (unsigned char *)malloc(256*sizeof(unsigned char)); cudaMalloc(&duniqueChar, 256*sizeof(unsigned char)); for(int i = 0; i<256; i++){ if(frequency[i] > 0){ uniqueChar[unique++] = i; printf("%d ",frequency[i]); } } printf("\n"); cudaMemcpy(duniqueChar, uniqueChar, 256*sizeof(unsigned char), cudaMemcpyHostToDevice); // *** FIND MINIMUM 2 FREQUENCY FOR ADDING NEW NODE *** unsigned int *tempFreq, *tempDFreq; unsigned int *min, *dmin; unsigned int *cntMin, *dcntMin; unsigned int *indMin, *dindMin; int ctr; tempFreq = (unsigned int *)malloc(unique*sizeof(unsigned int)); min = (unsigned int *)malloc(inputFileLength*sizeof(unsigned int)); cntMin = (unsigned int *)malloc(sizeof(unsigned int)); indMin = (unsigned int *)malloc(inputFileLength*sizeof(unsigned int)); ctr = 0; for(unsigned int i=0;i<256;i++){ if(frequency[i]!=0){ tempFreq[ctr++] = frequency[i]; } } // for(unsigned int i=0;i<unique;i++) printf("%d:%c ",tempFreq[i],uniqueChar[i]); // printf("\n"); cudaMalloc(&tempDFreq, unique*sizeof(unsigned int)); cudaMalloc(&dmin, inputFileLength*sizeof(unsigned int)); cudaMalloc(&dindMin, inputFileLength*sizeof(unsigned int)); cudaMalloc(&dcntMin, sizeof(unsigned int)); cudaMemcpy(tempDFreq, tempFreq, unique*sizeof(unsigned int), cudaMemcpyHostToDevice); float num = (float)(unique)/(float)BLOCK_SIZE; int mod = BLOCK_SIZE; if(unique < BLOCK_SIZE) mod = unique%BLOCK_SIZE; int n = ceil(num); printf("%d %d\n",n,mod); findLeastFrequent<<<n, mod>>>(tempDFreq, dmin, inputFileLength, unique, dcntMin, dindMin); cudaDeviceSynchronize(); cudaMemcpy(min, dmin, inputFileLength*sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(indMin, dindMin, inputFileLength*sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(cntMin, dcntMin, sizeof(unsigned int), cudaMemcpyDeviceToHost); // printf("count : %d\n",cntMin[0]); // for(unsigned int i=0;i<cntMin[0];i++){ // printf("%d:%d:%d ",i,indMin[i],min[i]); // } // printf("\n"); // printf("Min:\n"); // for(unsigned int i=0;i<cntMin[0];i++) printf("%d ",min[i]); // printf("\nIndMin:\n"); // for(unsigned int i=0;i<cntMin[0];i++) printf("%d ",indMin[i]); // Get all children unsigned int *resultIndex, *dresultIndex; unsigned int *cnt, *dcnt; resultIndex = (unsigned int *)malloc(cntMin[0]*sizeof(unsigned int)); cudaMalloc(&dresultIndex, cntMin[0]*sizeof(unsigned int)); cnt = (unsigned int *)malloc(sizeof(unsigned int)); cudaMalloc(&dcnt, sizeof(unsigned int)); int indexChild; for(int i=0;i<cntMin[0]-1;i++){ num = (float)(i+1)/(float)BLOCK_SIZE; mod = BLOCK_SIZE; if(i+1 < BLOCK_SIZE) mod = (i+1)%BLOCK_SIZE; n = ceil(num); searchSimilarIndex<<<n, mod>>>(dindMin, dresultIndex, dcnt, i); cudaDeviceSynchronize(); cudaMemcpy(resultIndex, dresultIndex, cntMin[0]*sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(cnt, dcnt, sizeof(unsigned int), cudaMemcpyDeviceToHost); if(cnt[0] == 0) indexChild = indMin[i]; else indexChild = *std::max_element(resultIndex, resultIndex + cnt[0])-1; buildHuffmanTree(cnt[0], uniqueChar, tempFreq, i, indexChild); } // for(int j=0;j<cntMin[0]-1;j++){ // printf("Index %d:Frequency %u",j,huffmanTreeNode[j].frequency); // if(huffmanTreeNode[j].letter != '\0') printf(":Letter %c\n",huffmanTreeNode[j].letter); // if(huffmanTreeNode[j].left != NULL) printf(":Left %u:Right %u\n",(huffmanTreeNode[j].left)->frequency,(huffmanTreeNode[j].right)->frequency); // } if(distinctCharacterCount == 1){ huffmanTreeNode_head = & huffmanTreeNode[0]; } // build the huffman dictionary buildHuffmanDictionary(huffmanTreeNode_head, bitSequence, bitSequenceLength); // printf("HOST DICTIONARY\n"); // for(int i = 0; i < 256; i ++){ // if(frequency[i]>0){ // printf("%c\t",i); // for(int k = 0; k < huffmanDict.bitSequenceLength[i]; k++){ // printf("%u",huffmanDict.bitSequence[i][k]); // } // printf("\n"); // } // } memOffset = 0; for(int i = 0; i < 256; i++) memOffset += frequency[i] * huffmanDict.bitSequenceLength[i]; long unsigned int actualOffset = memOffset; //printf("actual offset %ld\n",actualOffset); memOffset = memOffset % 8 == 0 ? memOffset : memOffset + 8 - memOffset % 8; printf("Output file length : %ld\n",memOffset/8); unsigned int extra = memOffset - actualOffset ; // generate offset data array compressedDataOffset = (unsigned int * ) malloc((inputFileLength + 1) * sizeof(unsigned int)); unsigned char *compressedData = (unsigned char * ) malloc(compressedDataOffset[inputFileLength] * sizeof(unsigned char)); // launch kernel launchCudaHuffmanCompress(inputFileData, compressedDataOffset,compressedData, inputFileLength, NumBlocks); // end the clock, tick tick end = clock(); // writing the compressed file to the output compressedFile = fopen(argv[2], "wb"); fwrite(& inputFileLength, sizeof(unsigned int), 1, compressedFile); fwrite(& extra, sizeof(unsigned int), 1, compressedFile); fwrite(frequency, sizeof(unsigned int), 256, compressedFile); fwrite(compressedData, sizeof(unsigned char), compressedDataOffset[inputFileLength], compressedFile); fclose(compressedFile); cpuTimeUsed = ((end - start)) * 1000 / CLOCKS_PER_SEC; printf("\n\nTime taken :: %d:%d s\n", cpuTimeUsed / 1000, cpuTimeUsed % 1000); free(inputFileData); free(compressedDataOffset); return 0; }
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <limits.h> #include <math.h> #include <hip/hip_runtime.h> #include <algorithm> #define BLOCK_SIZE 1024 __device__ unsigned int counter, counter_2; __constant__ const unsigned int INTMAX = 2147483647; // structure for dictionary struct huffmanDictionary{ unsigned char bitSequence[256][191]; unsigned char bitSequenceLength[256]; }; // structure for node struct huffmanNode{ unsigned char letter; unsigned int frequency; struct huffmanNode * left, * right; }; struct huffmanNode * huffmanTreeNode_head; struct huffmanDictionary huffmanDict; struct huffmanNode huffmanTreeNode[512]; unsigned char bitSequenceConstMemory[256][255]; __global__ void CalculateFrequency(unsigned char * device_inputFileData , unsigned int * device_frequency, unsigned int inputFileLength) { unsigned int id = blockIdx.x * blockDim.x + threadIdx.x ; if(id < inputFileLength){ atomicAdd(& device_frequency[device_inputFileData[id]] , 1); } } __device__ int findIndex(unsigned int *freq, unsigned int size,unsigned int search){ for(int i=0;i<size;i++){ if(freq[i] == search){ return i; } } return -1; } __global__ void findLeastFrequent(unsigned int *freq, unsigned int *min, int size, unsigned int threads, unsigned int* count, unsigned int *index){ int id = blockIdx.x*blockDim.x + threadIdx.x; counter_2 = 0; __syncthreads(); int ind; if(id<threads){ while(1){ min[counter_2] = INTMAX; atomicMin(&min[counter_2], freq[id]); // Need global barrier __syncthreads(); ind = findIndex(freq, threads, min[counter_2]); index[counter_2] = ind; // Need global barrier __syncthreads(); freq[ind] = INTMAX; if(id == 0) atomicInc(&counter_2, size); // Need global barrier __syncthreads(); min[counter_2] = INTMAX; atomicMin(&min[counter_2], freq[id]); // Need global barrier __syncthreads(); ind = findIndex(freq, threads, min[counter_2]); index[counter_2] = ind; // Need global barrier __syncthreads(); freq[ind] = min[counter_2] + min[counter_2-1]; if(id == 0) atomicInc(&counter_2, size); // Need global barrier __syncthreads(); if(min[counter_2] == INTMAX || min[counter_2-1] == INTMAX){ count[0] = counter_2; break; } } } } __global__ void searchSimilarIndex(unsigned int *index, unsigned int *resultIndex, unsigned int *cnt, int threads){ int id = blockIdx.x*blockDim.x + threadIdx.x; __syncthreads(); counter = 0; if(id != threads){ if(index[id] == index[threads]){ int temp = atomicInc(&counter, threads+1); resultIndex[temp] = id; } __syncthreads(); cnt[0] = counter; } } __global__ void compress(unsigned char * device_inputFileData, unsigned int * device_compressedDataOffset, struct huffmanDictionary * device_huffmanDictionary, unsigned char * device_byteCompressedData, unsigned int device_inputFileLength) { __shared__ struct huffmanDictionary table; memcpy(& table, device_huffmanDictionary, sizeof(struct huffmanDictionary)); unsigned int inputFileLength = device_inputFileLength; unsigned int pos = blockIdx.x * blockDim.x + threadIdx.x; for(int i = pos; i < inputFileLength; i += blockDim.x){ for(int k = 0; k < table.bitSequenceLength[device_inputFileData[i]]; k++){ device_byteCompressedData[device_compressedDataOffset[i] + k] = table.bitSequence[device_inputFileData[i]][k]; } } __syncthreads(); if(pos == inputFileLength-1){ unsigned int lastLetterOffset = device_compressedDataOffset[pos] ; unsigned int lastLetterSeqLength = table.bitSequenceLength[device_inputFileData[pos]] ; unsigned int ActualOffset = lastLetterOffset + lastLetterSeqLength ; unsigned int formalOffset = device_compressedDataOffset[inputFileLength] ; if(ActualOffset < formalOffset){ for(int i = ActualOffset; i < formalOffset; i++){ device_byteCompressedData[i] = 0 ; } } } } void buildHuffmanTree(int count,unsigned char *uniqueChar, unsigned int *frequency,int newIndex, int childIndex){ if(count == 0){ huffmanTreeNode[newIndex].frequency = frequency[childIndex]; huffmanTreeNode[newIndex].letter = uniqueChar[childIndex]; huffmanTreeNode[newIndex].left = NULL; huffmanTreeNode[newIndex].right = NULL; } else{ huffmanTreeNode[newIndex].frequency = huffmanTreeNode[childIndex].frequency + huffmanTreeNode[childIndex + 1].frequency; huffmanTreeNode[newIndex].left = & huffmanTreeNode[childIndex]; huffmanTreeNode[newIndex].right = & huffmanTreeNode[childIndex + 1]; huffmanTreeNode_head = & (huffmanTreeNode[newIndex]); } } void buildHuffmanDictionary(struct huffmanNode * root, unsigned char * bitSequence, unsigned char bitSequenceLength){ if(root -> left){ bitSequence[bitSequenceLength] = 0; buildHuffmanDictionary(root -> left, bitSequence, bitSequenceLength + 1); } if(root -> right){ bitSequence[bitSequenceLength] = 1; buildHuffmanDictionary(root -> right, bitSequence, bitSequenceLength + 1); } // copy the bit sequence and the length to the dictionary if(root -> right == NULL && root -> left == NULL){ huffmanDict.bitSequenceLength[root -> letter] = bitSequenceLength; memcpy(huffmanDict.bitSequence[root -> letter], bitSequence, bitSequenceLength * sizeof(unsigned char)); } } void createDataOffsetArray(unsigned int * compressedDataOffset, unsigned char * inputFileData, unsigned int inputFileLength) { compressedDataOffset[0] = 0; for(int i = 0; i < inputFileLength; i++){ compressedDataOffset[i + 1] = huffmanDict.bitSequenceLength[inputFileData[i]] + compressedDataOffset[i]; } // not a byte & remaining values if(compressedDataOffset[inputFileLength] % 8 != 0){ compressedDataOffset[inputFileLength] = compressedDataOffset[inputFileLength] + (8 - (compressedDataOffset[inputFileLength] % 8)); } } void launchCudaHuffmanCompress(unsigned char * inputFileData, unsigned int * compressedDataOffset, unsigned char *compressedData, unsigned int inputFileLength, int NumBlocks) { struct huffmanDictionary * device_huffmanDictionary; unsigned char * device_inputFileData, * device_byteCompressedData; unsigned int * device_compressedDataOffset; createDataOffsetArray(compressedDataOffset, inputFileData, inputFileLength); hipMalloc((void **) & device_inputFileData, inputFileLength * sizeof(unsigned char)); hipMalloc((void **) & device_compressedDataOffset, (inputFileLength + 1) * sizeof(unsigned int)); hipMalloc((void **) & device_huffmanDictionary, sizeof(huffmanDictionary)); hipMemcpy(device_inputFileData, inputFileData, inputFileLength * sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(device_compressedDataOffset, compressedDataOffset, (inputFileLength + 1) * sizeof(unsigned int), hipMemcpyHostToDevice); hipMemcpy(device_huffmanDictionary, & huffmanDict, sizeof(huffmanDict), hipMemcpyHostToDevice); hipMalloc((void **) & device_byteCompressedData, (compressedDataOffset[inputFileLength]) * sizeof(unsigned char)); hipMemset(device_byteCompressedData, 0, compressedDataOffset[inputFileLength] * sizeof(unsigned char)); compress<<<NumBlocks, BLOCK_SIZE>>>(device_inputFileData, device_compressedDataOffset, device_huffmanDictionary, device_byteCompressedData, inputFileLength); // copy compressed data from GPU to CPU memory hipMemcpy(compressedData, device_byteCompressedData, ((compressedDataOffset[inputFileLength])) * sizeof(unsigned char), hipMemcpyDeviceToHost); // free allocated memory hipFree(device_inputFileData); hipFree(device_compressedDataOffset); hipFree(device_huffmanDictionary); hipFree(device_byteCompressedData); } int main(int argc, char ** argv){ unsigned int distinctCharacterCount, inputFileLength; unsigned int frequency[256]; unsigned char * inputFileData, bitSequenceLength = 0, bitSequence[255]; unsigned int * compressedDataOffset, cpuTimeUsed; long unsigned int memOffset; clock_t start, end; FILE * inputFile, * compressedFile; // check the arguments if(argc != 3){ printf("Arguments should be input file and output file"); return -1; } // read input file, get length and data inputFile = fopen(argv[1], "rb"); fseek(inputFile, 0, SEEK_END); inputFileLength = ftell(inputFile); printf("Input File length : %d\n", inputFileLength); fseek(inputFile, 0, SEEK_SET); inputFileData = (unsigned char *) malloc(inputFileLength * sizeof(unsigned char)); fread(inputFileData, sizeof(unsigned char), inputFileLength, inputFile); fclose(inputFile); // starting the clock, tick tick start = clock(); // find frequency of each symbols for(int i = 0; i < 256; i++) frequency[i] = 0; unsigned int *device_frequency; hipMalloc(& device_frequency, 256*sizeof(unsigned int)); hipMemcpy(device_frequency, frequency, 256*sizeof(unsigned int), hipMemcpyHostToDevice); unsigned char * device_inputFileData; hipMalloc(& device_inputFileData, inputFileLength*sizeof(unsigned char)); hipMemcpy(device_inputFileData, inputFileData, inputFileLength*sizeof(unsigned char), hipMemcpyHostToDevice); int NumBlocks; if( inputFileLength > 1024){ NumBlocks = ceil( (float)inputFileLength / BLOCK_SIZE ); } else{ NumBlocks = 1; } printf("Num of blocks %d\n",NumBlocks); CalculateFrequency<<< NumBlocks, BLOCK_SIZE >>>(device_inputFileData, device_frequency, inputFileLength); hipMemcpy(frequency, device_frequency, 256*sizeof(unsigned int), hipMemcpyDeviceToHost); hipFree(device_inputFileData); hipFree(device_frequency); // initialize the nodes distinctCharacterCount = 0; for(int i = 0; i < 256; i++){ if(frequency[i] > 0){ distinctCharacterCount ++; } } int unique = 0; unsigned char *uniqueChar, *duniqueChar; uniqueChar = (unsigned char *)malloc(256*sizeof(unsigned char)); hipMalloc(&duniqueChar, 256*sizeof(unsigned char)); for(int i = 0; i<256; i++){ if(frequency[i] > 0){ uniqueChar[unique++] = i; printf("%d ",frequency[i]); } } printf("\n"); hipMemcpy(duniqueChar, uniqueChar, 256*sizeof(unsigned char), hipMemcpyHostToDevice); // *** FIND MINIMUM 2 FREQUENCY FOR ADDING NEW NODE *** unsigned int *tempFreq, *tempDFreq; unsigned int *min, *dmin; unsigned int *cntMin, *dcntMin; unsigned int *indMin, *dindMin; int ctr; tempFreq = (unsigned int *)malloc(unique*sizeof(unsigned int)); min = (unsigned int *)malloc(inputFileLength*sizeof(unsigned int)); cntMin = (unsigned int *)malloc(sizeof(unsigned int)); indMin = (unsigned int *)malloc(inputFileLength*sizeof(unsigned int)); ctr = 0; for(unsigned int i=0;i<256;i++){ if(frequency[i]!=0){ tempFreq[ctr++] = frequency[i]; } } // for(unsigned int i=0;i<unique;i++) printf("%d:%c ",tempFreq[i],uniqueChar[i]); // printf("\n"); hipMalloc(&tempDFreq, unique*sizeof(unsigned int)); hipMalloc(&dmin, inputFileLength*sizeof(unsigned int)); hipMalloc(&dindMin, inputFileLength*sizeof(unsigned int)); hipMalloc(&dcntMin, sizeof(unsigned int)); hipMemcpy(tempDFreq, tempFreq, unique*sizeof(unsigned int), hipMemcpyHostToDevice); float num = (float)(unique)/(float)BLOCK_SIZE; int mod = BLOCK_SIZE; if(unique < BLOCK_SIZE) mod = unique%BLOCK_SIZE; int n = ceil(num); printf("%d %d\n",n,mod); findLeastFrequent<<<n, mod>>>(tempDFreq, dmin, inputFileLength, unique, dcntMin, dindMin); hipDeviceSynchronize(); hipMemcpy(min, dmin, inputFileLength*sizeof(unsigned int), hipMemcpyDeviceToHost); hipMemcpy(indMin, dindMin, inputFileLength*sizeof(unsigned int), hipMemcpyDeviceToHost); hipMemcpy(cntMin, dcntMin, sizeof(unsigned int), hipMemcpyDeviceToHost); // printf("count : %d\n",cntMin[0]); // for(unsigned int i=0;i<cntMin[0];i++){ // printf("%d:%d:%d ",i,indMin[i],min[i]); // } // printf("\n"); // printf("Min:\n"); // for(unsigned int i=0;i<cntMin[0];i++) printf("%d ",min[i]); // printf("\nIndMin:\n"); // for(unsigned int i=0;i<cntMin[0];i++) printf("%d ",indMin[i]); // Get all children unsigned int *resultIndex, *dresultIndex; unsigned int *cnt, *dcnt; resultIndex = (unsigned int *)malloc(cntMin[0]*sizeof(unsigned int)); hipMalloc(&dresultIndex, cntMin[0]*sizeof(unsigned int)); cnt = (unsigned int *)malloc(sizeof(unsigned int)); hipMalloc(&dcnt, sizeof(unsigned int)); int indexChild; for(int i=0;i<cntMin[0]-1;i++){ num = (float)(i+1)/(float)BLOCK_SIZE; mod = BLOCK_SIZE; if(i+1 < BLOCK_SIZE) mod = (i+1)%BLOCK_SIZE; n = ceil(num); searchSimilarIndex<<<n, mod>>>(dindMin, dresultIndex, dcnt, i); hipDeviceSynchronize(); hipMemcpy(resultIndex, dresultIndex, cntMin[0]*sizeof(unsigned int), hipMemcpyDeviceToHost); hipMemcpy(cnt, dcnt, sizeof(unsigned int), hipMemcpyDeviceToHost); if(cnt[0] == 0) indexChild = indMin[i]; else indexChild = *std::max_element(resultIndex, resultIndex + cnt[0])-1; buildHuffmanTree(cnt[0], uniqueChar, tempFreq, i, indexChild); } // for(int j=0;j<cntMin[0]-1;j++){ // printf("Index %d:Frequency %u",j,huffmanTreeNode[j].frequency); // if(huffmanTreeNode[j].letter != '\0') printf(":Letter %c\n",huffmanTreeNode[j].letter); // if(huffmanTreeNode[j].left != NULL) printf(":Left %u:Right %u\n",(huffmanTreeNode[j].left)->frequency,(huffmanTreeNode[j].right)->frequency); // } if(distinctCharacterCount == 1){ huffmanTreeNode_head = & huffmanTreeNode[0]; } // build the huffman dictionary buildHuffmanDictionary(huffmanTreeNode_head, bitSequence, bitSequenceLength); // printf("HOST DICTIONARY\n"); // for(int i = 0; i < 256; i ++){ // if(frequency[i]>0){ // printf("%c\t",i); // for(int k = 0; k < huffmanDict.bitSequenceLength[i]; k++){ // printf("%u",huffmanDict.bitSequence[i][k]); // } // printf("\n"); // } // } memOffset = 0; for(int i = 0; i < 256; i++) memOffset += frequency[i] * huffmanDict.bitSequenceLength[i]; long unsigned int actualOffset = memOffset; //printf("actual offset %ld\n",actualOffset); memOffset = memOffset % 8 == 0 ? memOffset : memOffset + 8 - memOffset % 8; printf("Output file length : %ld\n",memOffset/8); unsigned int extra = memOffset - actualOffset ; // generate offset data array compressedDataOffset = (unsigned int * ) malloc((inputFileLength + 1) * sizeof(unsigned int)); unsigned char *compressedData = (unsigned char * ) malloc(compressedDataOffset[inputFileLength] * sizeof(unsigned char)); // launch kernel launchCudaHuffmanCompress(inputFileData, compressedDataOffset,compressedData, inputFileLength, NumBlocks); // end the clock, tick tick end = clock(); // writing the compressed file to the output compressedFile = fopen(argv[2], "wb"); fwrite(& inputFileLength, sizeof(unsigned int), 1, compressedFile); fwrite(& extra, sizeof(unsigned int), 1, compressedFile); fwrite(frequency, sizeof(unsigned int), 256, compressedFile); fwrite(compressedData, sizeof(unsigned char), compressedDataOffset[inputFileLength], compressedFile); fclose(compressedFile); cpuTimeUsed = ((end - start)) * 1000 / CLOCKS_PER_SEC; printf("\n\nTime taken :: %d:%d s\n", cpuTimeUsed / 1000, cpuTimeUsed % 1000); free(inputFileData); free(compressedDataOffset); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void rotate2(float*a,float b, float * c, int sx,int sy,int sz, int dx, int dy, int dz) { int ids=(blockIdx.x*blockDim.x+threadIdx.x); // id of this processor int x=(ids + dx)%sx; // advance by the offset steps along the chain int y=(ids/sx + dy)%sy; int z=(ids/(sx*sy) + dz)%sz; int idd=x+sx*y+sx*sy*z; if(ids>=sx*sy*sz) return; // float tmp = a[ids]; // __syncthreads(); // nice try but does not work ! c[idd] = b*a[ids]; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void rotate2(float*a,float b, float * c, int sx,int sy,int sz, int dx, int dy, int dz) { int ids=(blockIdx.x*blockDim.x+threadIdx.x); // id of this processor int x=(ids + dx)%sx; // advance by the offset steps along the chain int y=(ids/sx + dy)%sy; int z=(ids/(sx*sy) + dz)%sz; int idd=x+sx*y+sx*sy*z; if(ids>=sx*sy*sz) return; // float tmp = a[ids]; // __syncthreads(); // nice try but does not work ! c[idd] = b*a[ids]; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #define ROWS 2 #define COLS 3 // F1 // __global__ void add(int *a, int *b, int *c){ // int idx = blockIdx.x*blockDim.x + threadIdx.x; // if (idx<ROWS*COLS){ // c[idx] = a[idx] + b[idx]; // } // } // F2 __global__ void add(int *a, int *b, int *c){ int col = blockIdx.x*blockDim.x; int row = blockIdx.y*blockDim.y; if (row<ROWS && col<COLS){ int idx = row*COLS + col; c[idx] = a[idx] + b[idx]; } } int main(){ int a[ROWS][COLS], b[ROWS][COLS], c[ROWS][COLS]; for (int i=0; i<ROWS; i++){ for (int j=0; j<COLS; j++){ a[i][j] = rand()%100; b[i][j] = rand()%100; } } int *d1, *d2, *d3; int size = sizeof(int); cudaMalloc((void **)&d1, ROWS*COLS*size); cudaMalloc((void **)&d2, ROWS*COLS*size); cudaMalloc((void **)&d3, ROWS*COLS*size); cudaMemcpy(d1, a, ROWS*COLS*size, cudaMemcpyHostToDevice); cudaMemcpy(d2, b, ROWS*COLS*size, cudaMemcpyHostToDevice); // add<<<ROWS,COLS>>>(d1, d2, d3); //use with F1 dim3 blocks_per_grid(COLS, ROWS); //2-D grid add<<<blocks_per_grid,1>>>(d1, d2, d3); //use with F2 cudaMemcpy(c, d3, ROWS*COLS*size, cudaMemcpyDeviceToHost); cudaFree(d1); cudaFree(d2); cudaFree(d3); for (int i=0; i<ROWS; i++){ for (int j=0; j<COLS; j++){ printf("Sum of %d and %d = %d\n", a[i][j], b[i][j], c[i][j]); } } return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define ROWS 2 #define COLS 3 // F1 // __global__ void add(int *a, int *b, int *c){ // int idx = blockIdx.x*blockDim.x + threadIdx.x; // if (idx<ROWS*COLS){ // c[idx] = a[idx] + b[idx]; // } // } // F2 __global__ void add(int *a, int *b, int *c){ int col = blockIdx.x*blockDim.x; int row = blockIdx.y*blockDim.y; if (row<ROWS && col<COLS){ int idx = row*COLS + col; c[idx] = a[idx] + b[idx]; } } int main(){ int a[ROWS][COLS], b[ROWS][COLS], c[ROWS][COLS]; for (int i=0; i<ROWS; i++){ for (int j=0; j<COLS; j++){ a[i][j] = rand()%100; b[i][j] = rand()%100; } } int *d1, *d2, *d3; int size = sizeof(int); hipMalloc((void **)&d1, ROWS*COLS*size); hipMalloc((void **)&d2, ROWS*COLS*size); hipMalloc((void **)&d3, ROWS*COLS*size); hipMemcpy(d1, a, ROWS*COLS*size, hipMemcpyHostToDevice); hipMemcpy(d2, b, ROWS*COLS*size, hipMemcpyHostToDevice); // add<<<ROWS,COLS>>>(d1, d2, d3); //use with F1 dim3 blocks_per_grid(COLS, ROWS); //2-D grid add<<<blocks_per_grid,1>>>(d1, d2, d3); //use with F2 hipMemcpy(c, d3, ROWS*COLS*size, hipMemcpyDeviceToHost); hipFree(d1); hipFree(d2); hipFree(d3); for (int i=0; i<ROWS; i++){ for (int j=0; j<COLS; j++){ printf("Sum of %d and %d = %d\n", a[i][j], b[i][j], c[i][j]); } } return 0; }
Convert the following CUDA code to AMD GPU code: cuda //nvcc -ptx "E:\семестр 7\НИР\kernel.cu" -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64" //nvcc -ptx "E:\семестр 7\НИР\kernel.cu" -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64" -gencode arch=compute_35,code=sm_35 -rdc=true __device__ float getneighbor(float3 *A, const unsigned int N,const unsigned int M, const unsigned int x, const unsigned int y){ if (A[x+y*M].z == 0.0) { return A[x+y*M].x; } return 0.0;//A[x+y*M].x; } __global__ void kernel(float *U, const unsigned int N,const unsigned int M,const int t,const float v,const float d_x,const float d_t, const float b=20) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) {U[(x+y*M)*4+(t+1)%3]=0.0;return;} float val; if (U[(x+y*M)*4+3] > 0.0){ val = -4*U[(x+y*M)*4+t%3]; val+= (U[(x-1+y*M)*4+3]>0.0)? U[(x-1+y*M)*4+t%3]:0;//U[(x+1+y*M)*4+t%3]; val+= (U[(x+1+y*M)*4+3]>0.0)? U[(x+1+y*M)*4+t%3]:0;//U[(x-1+y*M)*4+t%3]; val+= (U[(x+M+y*M)*4+3]>0.0)? U[(x+M+y*M)*4+t%3]:0;//U[(x-M+y*M)*4+t%3]; val+= (U[(x-M+y*M)*4+3]>0.0)? U[(x-M+y*M)*4+t%3]:0;//U[(x+M+y*M)*4+t%3]; val*=(U[(x+y*M)*4+3]*U[(x+y*M)*4+3])*v*v*d_t*d_t/(d_x*d_x); // ??? //float b=20; val += 2*U[(x+y*M)*4+t%3]-U[(x+y*M)*4+(t-1)%3]*(1-d_t*b*0.5); val/=(1+d_t*b*0.5); } else {val = 0.0;} U[(x+y*M)*4+(t+1)%3]=val; } __global__ void kernelAndSetIR(float *U, float *IR, const unsigned int N,const unsigned int M,const int t,const float v,const float d_x,const float d_t,const int x_ir,const int y_ir, const float b=20) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) {U[(x+y*M)*4+(t+1)%3]=0.0;return;} float val; if (U[(x+y*M)*4+3] > 0.0){ val = -4*U[(x+y*M)*4+t%3]; val+= (U[(x-1+y*M)*4+3]>0.0)? U[(x-1+y*M)*4+t%3]:0;//U[(x+1+y*M)*4+t%3]; val+= (U[(x+1+y*M)*4+3]>0.0)? U[(x+1+y*M)*4+t%3]:0;//U[(x-1+y*M)*4+t%3]; val+= (U[(x+M+y*M)*4+3]>0.0)? U[(x+M+y*M)*4+t%3]:0;//U[(x-M+y*M)*4+t%3]; val+= (U[(x-M+y*M)*4+3]>0.0)? U[(x-M+y*M)*4+t%3]:0;//U[(x+M+y*M)*4+t%3]; val*=(U[(x+y*M)*4+3]*U[(x+y*M)*4+3])*v*v*d_t*d_t/(d_x*d_x); // ??? //float b=20; val += 2*U[(x+y*M)*4+t%3]-U[(x+y*M)*4+(t-1)%3]*(1-d_t*b*0.5); val/=(1+d_t*b*0.5); } else {val = 0.0;} U[(x+y*M)*4+(t+1)%3]=val; if (x==x_ir && y==y_ir) IR[t-1]=val; // (t-1) because we run with t+1 } __global__ void kernelAndSetIRAndSource(float *U, float *IR, const unsigned int N,const unsigned int M,const int t,const float v,const float d_x,const float d_t,const int x_ir,const int y_ir, const int x_s,const int y_s,float* F,const float b=20) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) {U[(x+y*M)*4+(t+1)%3]=0.0;return;} float val; if (U[(x+y*M)*4+3] > 0.0){ val = -4*U[(x+y*M)*4+t%3]; val+= (U[(x-1+y*M)*4+3]>0.0)? U[(x-1+y*M)*4+t%3]:0;//U[(x+1+y*M)*4+t%3]; val+= (U[(x+1+y*M)*4+3]>0.0)? U[(x+1+y*M)*4+t%3]:0;//U[(x-1+y*M)*4+t%3]; val+= (U[(x+M+y*M)*4+3]>0.0)? U[(x+M+y*M)*4+t%3]:0;//U[(x-M+y*M)*4+t%3]; val+= (U[(x-M+y*M)*4+3]>0.0)? U[(x-M+y*M)*4+t%3]:0;//U[(x+M+y*M)*4+t%3]; if (x==x_s && y==y_s) val+=F[t-1]; // Source val*=(U[(x+y*M)*4+3]*U[(x+y*M)*4+3])*v*v*d_t*d_t/(d_x*d_x); // ??? //float b=20; val += 2*U[(x+y*M)*4+t%3]-U[(x+y*M)*4+(t-1)%3]*(1-d_t*b*0.5); val/=(1+d_t*b*0.5); } else {val = 0.0;} U[(x+y*M)*4+(t+1)%3]=val; if (x==x_ir && y==y_ir) IR[t-1]=val; // (t-1) because we run with t+1 } __global__ void kernel2(float *U, float *U1, const unsigned int N,const unsigned int M,const float v,const float d_x,const float d_t) { float3 *A=(float3*) U; float3 *A1=(float3*) U1; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) { A1[x+y*M] = A[x+y*M]; return;} //A[i].x = i; float pos = A[x+y*M].x; float vel = A[x+y*M].y; float wall = A[x+y*M].z; if (wall == 0.0){ float m = A[x+1+y*M].x;//getneighbor(A,N,M,x+1,y); m += A[x-1+y*M].x;//getneighbor(A,N,M,x-1,y); m += A[x+y*M+M].x;//getneighbor(A,N,M,x,y+1); m += A[x+y*M-M].x;//getneighbor(A,N,M,x,y-1); m *= .25; vel = 1.5*(1.0-wall)*(m-pos)+vel; pos=pos+vel; } else {pos = 0.0;vel = 0.0;} A1[x+y*M].x = pos;//+0.1; A1[x+y*M].y = vel; A1[x+y*M].z = wall; } // __global__ void kernel2(float *U, const unsigned int N,const unsigned int M,int t,const float v,const float d_x,const float d_t) // { // float3 *A1=(float3*) U; // float3 *A2=(float3*) (U+1); // float3 *A3=(float3*) (U+2); // float3 *wall=(float3*) (U+3); // float3* A[3] = {A1,A2,A3}; // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i >= N*M) return; // unsigned int x = i % M; // unsigned int y = i / M; // if (x+1>=M || y+1>=N || x<1 || y<1) {return;} // // //for (t:=0;t<1000;++t){ // float val; // if (wall[x+y*M].x == 0.0){ // val = A[t%3][x-1+y*M].x-4*A[t%3][x+y*M].x+A[t%3][x+1+y*M].x+A[t%3][x+y*M+M].x+A[t%3][x+y*M-M].x; // val*=v*v*d_t*d_t/(d_x*d_x); // val += 2*A[t%3][x+y*M].x-A[(t-1)%3][x+y*M].x; // } else {val = 0.0;} // A[(t+1)%3][x+y*M].x=val; // // }
#include <hip/hip_runtime.h> //nvcc -ptx "E:\семестр 7\НИР\kernel.cu" -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64" //nvcc -ptx "E:\семестр 7\НИР\kernel.cu" -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Tools\MSVC\14.16.27023\bin\Hostx64\x64" -gencode arch=compute_35,code=sm_35 -rdc=true __device__ float getneighbor(float3 *A, const unsigned int N,const unsigned int M, const unsigned int x, const unsigned int y){ if (A[x+y*M].z == 0.0) { return A[x+y*M].x; } return 0.0;//A[x+y*M].x; } __global__ void kernel(float *U, const unsigned int N,const unsigned int M,const int t,const float v,const float d_x,const float d_t, const float b=20) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) {U[(x+y*M)*4+(t+1)%3]=0.0;return;} float val; if (U[(x+y*M)*4+3] > 0.0){ val = -4*U[(x+y*M)*4+t%3]; val+= (U[(x-1+y*M)*4+3]>0.0)? U[(x-1+y*M)*4+t%3]:0;//U[(x+1+y*M)*4+t%3]; val+= (U[(x+1+y*M)*4+3]>0.0)? U[(x+1+y*M)*4+t%3]:0;//U[(x-1+y*M)*4+t%3]; val+= (U[(x+M+y*M)*4+3]>0.0)? U[(x+M+y*M)*4+t%3]:0;//U[(x-M+y*M)*4+t%3]; val+= (U[(x-M+y*M)*4+3]>0.0)? U[(x-M+y*M)*4+t%3]:0;//U[(x+M+y*M)*4+t%3]; val*=(U[(x+y*M)*4+3]*U[(x+y*M)*4+3])*v*v*d_t*d_t/(d_x*d_x); // ??? //float b=20; val += 2*U[(x+y*M)*4+t%3]-U[(x+y*M)*4+(t-1)%3]*(1-d_t*b*0.5); val/=(1+d_t*b*0.5); } else {val = 0.0;} U[(x+y*M)*4+(t+1)%3]=val; } __global__ void kernelAndSetIR(float *U, float *IR, const unsigned int N,const unsigned int M,const int t,const float v,const float d_x,const float d_t,const int x_ir,const int y_ir, const float b=20) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) {U[(x+y*M)*4+(t+1)%3]=0.0;return;} float val; if (U[(x+y*M)*4+3] > 0.0){ val = -4*U[(x+y*M)*4+t%3]; val+= (U[(x-1+y*M)*4+3]>0.0)? U[(x-1+y*M)*4+t%3]:0;//U[(x+1+y*M)*4+t%3]; val+= (U[(x+1+y*M)*4+3]>0.0)? U[(x+1+y*M)*4+t%3]:0;//U[(x-1+y*M)*4+t%3]; val+= (U[(x+M+y*M)*4+3]>0.0)? U[(x+M+y*M)*4+t%3]:0;//U[(x-M+y*M)*4+t%3]; val+= (U[(x-M+y*M)*4+3]>0.0)? U[(x-M+y*M)*4+t%3]:0;//U[(x+M+y*M)*4+t%3]; val*=(U[(x+y*M)*4+3]*U[(x+y*M)*4+3])*v*v*d_t*d_t/(d_x*d_x); // ??? //float b=20; val += 2*U[(x+y*M)*4+t%3]-U[(x+y*M)*4+(t-1)%3]*(1-d_t*b*0.5); val/=(1+d_t*b*0.5); } else {val = 0.0;} U[(x+y*M)*4+(t+1)%3]=val; if (x==x_ir && y==y_ir) IR[t-1]=val; // (t-1) because we run with t+1 } __global__ void kernelAndSetIRAndSource(float *U, float *IR, const unsigned int N,const unsigned int M,const int t,const float v,const float d_x,const float d_t,const int x_ir,const int y_ir, const int x_s,const int y_s,float* F,const float b=20) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) {U[(x+y*M)*4+(t+1)%3]=0.0;return;} float val; if (U[(x+y*M)*4+3] > 0.0){ val = -4*U[(x+y*M)*4+t%3]; val+= (U[(x-1+y*M)*4+3]>0.0)? U[(x-1+y*M)*4+t%3]:0;//U[(x+1+y*M)*4+t%3]; val+= (U[(x+1+y*M)*4+3]>0.0)? U[(x+1+y*M)*4+t%3]:0;//U[(x-1+y*M)*4+t%3]; val+= (U[(x+M+y*M)*4+3]>0.0)? U[(x+M+y*M)*4+t%3]:0;//U[(x-M+y*M)*4+t%3]; val+= (U[(x-M+y*M)*4+3]>0.0)? U[(x-M+y*M)*4+t%3]:0;//U[(x+M+y*M)*4+t%3]; if (x==x_s && y==y_s) val+=F[t-1]; // Source val*=(U[(x+y*M)*4+3]*U[(x+y*M)*4+3])*v*v*d_t*d_t/(d_x*d_x); // ??? //float b=20; val += 2*U[(x+y*M)*4+t%3]-U[(x+y*M)*4+(t-1)%3]*(1-d_t*b*0.5); val/=(1+d_t*b*0.5); } else {val = 0.0;} U[(x+y*M)*4+(t+1)%3]=val; if (x==x_ir && y==y_ir) IR[t-1]=val; // (t-1) because we run with t+1 } __global__ void kernel2(float *U, float *U1, const unsigned int N,const unsigned int M,const float v,const float d_x,const float d_t) { float3 *A=(float3*) U; float3 *A1=(float3*) U1; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= N*M) return; unsigned int x = i % M; unsigned int y = i / M; if (x+1>=M || y+1>=N || x<1 || y<1) { A1[x+y*M] = A[x+y*M]; return;} //A[i].x = i; float pos = A[x+y*M].x; float vel = A[x+y*M].y; float wall = A[x+y*M].z; if (wall == 0.0){ float m = A[x+1+y*M].x;//getneighbor(A,N,M,x+1,y); m += A[x-1+y*M].x;//getneighbor(A,N,M,x-1,y); m += A[x+y*M+M].x;//getneighbor(A,N,M,x,y+1); m += A[x+y*M-M].x;//getneighbor(A,N,M,x,y-1); m *= .25; vel = 1.5*(1.0-wall)*(m-pos)+vel; pos=pos+vel; } else {pos = 0.0;vel = 0.0;} A1[x+y*M].x = pos;//+0.1; A1[x+y*M].y = vel; A1[x+y*M].z = wall; } // __global__ void kernel2(float *U, const unsigned int N,const unsigned int M,int t,const float v,const float d_x,const float d_t) // { // float3 *A1=(float3*) U; // float3 *A2=(float3*) (U+1); // float3 *A3=(float3*) (U+2); // float3 *wall=(float3*) (U+3); // float3* A[3] = {A1,A2,A3}; // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i >= N*M) return; // unsigned int x = i % M; // unsigned int y = i / M; // if (x+1>=M || y+1>=N || x<1 || y<1) {return;} // // //for (t:=0;t<1000;++t){ // float val; // if (wall[x+y*M].x == 0.0){ // val = A[t%3][x-1+y*M].x-4*A[t%3][x+y*M].x+A[t%3][x+1+y*M].x+A[t%3][x+y*M+M].x+A[t%3][x+y*M-M].x; // val*=v*v*d_t*d_t/(d_x*d_x); // val += 2*A[t%3][x+y*M].x-A[(t-1)%3][x+y*M].x; // } else {val = 0.0;} // A[(t+1)%3][x+y*M].x=val; // // }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <chrono> #include <iostream> #include <cmath> #define TPB 256 /* array size 5000: CPU computing time:1.21e-05 GPU computing time:0.167205 The difference is 0.000000 array size 50000: CPU computing time:0.0001292 GPU computing time:0.158399 The difference is 0.000000 array size 500000: CPU computing time:0.0013506 GPU computing time:0.167526 The difference is 0.000000 array size 5000000: CPU computing time:0.0176518 GPU computing time:0.173352 The difference is 0.000000 array size 50000000: CPU computing time:0.199978 GPU computing time:0.274253 The difference is 0.000000 array size 500000000: CPU computing time:2.09953 GPU computing time:1.18089 The difference is 0.000000 */ __global__ void saxpyKernel(float *x, float *y, const float a, const int n) { const int id = blockIdx.x*blockDim.x + threadIdx.x; if(id>=n) { return; } y[id] = x[id] * y[id] + a; } void cpuSaxpy(float *x, float *y, const float a, const int n) { for(size_t i=0; i<n;++i) { y[i] = x[i] * y[i] + a; } } float difference(float *a, float *b, const int n) { float result = 0.0; for(size_t i=0; i<n;++i) { result += std::abs(a[i] - b[i]); } return result; } int main() { int array_size; std::cin >> array_size; const float a = 1.0; // switch to heap memory to allow for larger array size float *x1 = new float[array_size]; float *y1 = new float[array_size]; for(size_t i = 0; i<array_size;++i) { x1[i] = 3.3+1e-10; y1[i] = 3.4+1e-10; } auto start_time_cpu = std::chrono::high_resolution_clock::now(); cpuSaxpy(x1,y1,a, array_size); auto end_time_cpu = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> time_cpu = end_time_cpu-start_time_cpu; printf("CPU SAXPY completed!\n"); std::cout<<"CPU computing time:"<<time_cpu.count()<<std::endl; float *x2= new float[array_size]; float *y2= new float[array_size]; for(size_t i = 0; i<array_size;++i) { x2[i] = 3.3+1e-10; y2[i] = 3.4+1e-10; } auto start_time_gpu = std::chrono::high_resolution_clock::now(); float *dx2 = nullptr; float *dy2 = nullptr; auto byteSize = array_size * sizeof(float); cudaMalloc(&dx2, byteSize); cudaMalloc(&dy2, byteSize); cudaMemcpy(dx2, x2, byteSize, cudaMemcpyHostToDevice); cudaMemcpy(dy2, y2, byteSize, cudaMemcpyHostToDevice); saxpyKernel<<<(array_size+TPB-1)/TPB, TPB>>>(dx2,dy2,a, array_size); cudaDeviceSynchronize(); cudaMemcpy(y2, dy2, byteSize, cudaMemcpyDeviceToHost); cudaFree(dx2); cudaFree(dy2); auto end_time_gpu = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> time_gpu = end_time_gpu-start_time_gpu; printf("GPU SAXPY completed!\n"); std::cout<<"GPU computing time:"<<time_gpu.count()<<std::endl; float diff = difference(y1, y2,array_size); printf("Comparison completed! The difference is %f\n", diff); delete x1,x2,y1,y2; return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <chrono> #include <iostream> #include <cmath> #define TPB 256 /* array size 5000: CPU computing time:1.21e-05 GPU computing time:0.167205 The difference is 0.000000 array size 50000: CPU computing time:0.0001292 GPU computing time:0.158399 The difference is 0.000000 array size 500000: CPU computing time:0.0013506 GPU computing time:0.167526 The difference is 0.000000 array size 5000000: CPU computing time:0.0176518 GPU computing time:0.173352 The difference is 0.000000 array size 50000000: CPU computing time:0.199978 GPU computing time:0.274253 The difference is 0.000000 array size 500000000: CPU computing time:2.09953 GPU computing time:1.18089 The difference is 0.000000 */ __global__ void saxpyKernel(float *x, float *y, const float a, const int n) { const int id = blockIdx.x*blockDim.x + threadIdx.x; if(id>=n) { return; } y[id] = x[id] * y[id] + a; } void cpuSaxpy(float *x, float *y, const float a, const int n) { for(size_t i=0; i<n;++i) { y[i] = x[i] * y[i] + a; } } float difference(float *a, float *b, const int n) { float result = 0.0; for(size_t i=0; i<n;++i) { result += std::abs(a[i] - b[i]); } return result; } int main() { int array_size; std::cin >> array_size; const float a = 1.0; // switch to heap memory to allow for larger array size float *x1 = new float[array_size]; float *y1 = new float[array_size]; for(size_t i = 0; i<array_size;++i) { x1[i] = 3.3+1e-10; y1[i] = 3.4+1e-10; } auto start_time_cpu = std::chrono::high_resolution_clock::now(); cpuSaxpy(x1,y1,a, array_size); auto end_time_cpu = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> time_cpu = end_time_cpu-start_time_cpu; printf("CPU SAXPY completed!\n"); std::cout<<"CPU computing time:"<<time_cpu.count()<<std::endl; float *x2= new float[array_size]; float *y2= new float[array_size]; for(size_t i = 0; i<array_size;++i) { x2[i] = 3.3+1e-10; y2[i] = 3.4+1e-10; } auto start_time_gpu = std::chrono::high_resolution_clock::now(); float *dx2 = nullptr; float *dy2 = nullptr; auto byteSize = array_size * sizeof(float); hipMalloc(&dx2, byteSize); hipMalloc(&dy2, byteSize); hipMemcpy(dx2, x2, byteSize, hipMemcpyHostToDevice); hipMemcpy(dy2, y2, byteSize, hipMemcpyHostToDevice); saxpyKernel<<<(array_size+TPB-1)/TPB, TPB>>>(dx2,dy2,a, array_size); hipDeviceSynchronize(); hipMemcpy(y2, dy2, byteSize, hipMemcpyDeviceToHost); hipFree(dx2); hipFree(dy2); auto end_time_gpu = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> time_gpu = end_time_gpu-start_time_gpu; printf("GPU SAXPY completed!\n"); std::cout<<"GPU computing time:"<<time_gpu.count()<<std::endl; float diff = difference(y1, y2,array_size); printf("Comparison completed! The difference is %f\n", diff); delete x1,x2,y1,y2; return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void big_add(int *a, int *b, int *c, unsigned int N){ int tid; tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; while(tid < N){ c[tid] = a[tid] + b[tid]; tid += stride; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void big_add(int *a, int *b, int *c, unsigned int N){ int tid; tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; while(tid < N){ c[tid] = a[tid] + b[tid]; tid += stride; } }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> // Kernel detenition __global__ void MatAdd(int N, float *A, float *B, float *C){ int i = threadIdx.x; int j = threadIdx.y; C[N*i + j] = A[N*i + j] + B[N*i + j]; } int main(){ float *A, *B, *C; int N = 100; cudaMalloc((void**)&A, N*N*sizeof(float)); cudaMalloc((void**)&B, N*N*sizeof(float)); cudaMalloc((void**)&C, N*N*sizeof(float)); auto *a = malloc(N*N*sizeof(float)); auto *b = malloc(N*N*sizeof(float)); auto *c = malloc(N*N*sizeof(float)); cudaMemcpy(A, a, N*N*sizeof(*A), cudaMemcpyHostToDevice); cudaMemcpy(B, b, N*N*sizeof(*B), cudaMemcpyHostToDevice); // Kernel invocation with one block of N*N*1thread int numBlocks = 1; dim3 threadsPerBlock(N, N, 1); MatAdd <<< numBlocks, threadsPerBlock >>>(N, (float*)A, (float*)B, (float*)C); cudaMemcpy(c, C, N*N*sizeof(*C), cudaMemcpyDeviceToHost); cudaFree(A); cudaFree(B); cudaFree(C); std::cout << "Done" << std::endl; }
#include <hip/hip_runtime.h> #include <iostream> // Kernel detenition __global__ void MatAdd(int N, float *A, float *B, float *C){ int i = threadIdx.x; int j = threadIdx.y; C[N*i + j] = A[N*i + j] + B[N*i + j]; } int main(){ float *A, *B, *C; int N = 100; hipMalloc((void**)&A, N*N*sizeof(float)); hipMalloc((void**)&B, N*N*sizeof(float)); hipMalloc((void**)&C, N*N*sizeof(float)); auto *a = malloc(N*N*sizeof(float)); auto *b = malloc(N*N*sizeof(float)); auto *c = malloc(N*N*sizeof(float)); hipMemcpy(A, a, N*N*sizeof(*A), hipMemcpyHostToDevice); hipMemcpy(B, b, N*N*sizeof(*B), hipMemcpyHostToDevice); // Kernel invocation with one block of N*N*1thread int numBlocks = 1; dim3 threadsPerBlock(N, N, 1); MatAdd <<< numBlocks, threadsPerBlock >>>(N, (float*)A, (float*)B, (float*)C); hipMemcpy(c, C, N*N*sizeof(*C), hipMemcpyDeviceToHost); hipFree(A); hipFree(B); hipFree(C); std::cout << "Done" << std::endl; }
Convert the following CUDA code to AMD GPU code: cuda // *---------------------------------------------- // Author Contact Information: // Hao Gao // [email protected] || [email protected] // Department of Mathematics and Computer Science, Emory University // Department of Radiology and Imaging Sciences, Emory University // // Copyright (c) Hao Gao 2012 // ----------------------------------------------*/ // // If you find this code useful, you may cite the following reference: // H. Gao. "Fast parallel algorithms for the X-ray transform and its adjoint", Medical Physics (2012). // The full source codes are available at https://sites.google.com/site/fastxraytransform #include <math.h> #include <malloc.h> #define ABS(a) (a>0?a:-(a)) #define BLOCK_SIZE_x 16 #define BLOCK_SIZE_y 16 extern "C" void Ax_fan_mf_gpu_new(float *X,float *y,float SO,float OD,float scale,int nx,int ny,int nv,float *sd_phi,int nd,float *y_det,int *id_X,int nt); __global__ void Ax_fan_mf_gpu_new_kernel(float *X,float *y,float SO,float OD,float scale,int nx,int ny,int nv,float *sd_phi,int nd,float *y_det,int *id_X) // Please note that this version has O(Nx) per thread, since GPU threads are already saturated. // O(1) per thread can be achieved by parallelizing the "for" loop here, given sufficient number of GPU threads. { int bx=blockIdx.x; int by=blockIdx.y; int tx0=threadIdx.x; int ty0=threadIdx.y; int iv=bx*BLOCK_SIZE_x+tx0; int id=by*BLOCK_SIZE_y+ty0; if(iv<nv&&id<nd) { int n,nx2,ny2,ix,iy,c1,c2; float *x,cos_phi,sin_phi,x1,y1,x2,y2,xx1,yy1,xx2,yy2,slope,l,d; nx2=nx/2;ny2=ny/2; n=nx*ny; x=&X[id_X[iv]*n]; cos_phi=(float)cos(sd_phi[iv]);sin_phi=(float)sin(sd_phi[iv]); x1=cos_phi*(-SO); y1=sin_phi*(-SO); x2=cos_phi*OD-sin_phi*y_det[id]; y2=sin_phi*OD+cos_phi*y_det[id]; y[iv*nd+id]=0; if(ABS(x1-x2)>ABS(y1-y2)) { slope=(y2-y1)/(x2-x1); for(ix=0;ix<nx;ix++) { xx1=(float)(ix-nx2);xx2=xx1+1; if(slope>=0) { yy1=y1+slope*(xx1-x1)+ny2; yy2=y1+slope*(xx2-x1)+ny2; } else { yy1=y1+slope*(xx2-x1)+ny2; yy2=y1+slope*(xx1-x1)+ny2; } c1=(int)floor(yy1); c2=(int)floor(yy2); if(c2==c1)// c1 and c2 differs less than 1 { if(c1>=0&&c1<=ny-1) { d=yy2-yy1;l=(float)sqrt(d*d+1); iy=c1;y[iv*nd+id]+=l*x[iy*nx+ix]; } } else { if(c2>0&&c2<ny) { d=yy2-yy1;l=(float)sqrt(d*d+1); iy=c1;y[iv*nd+id]+=((c2-yy1)/d)*l*x[iy*nx+ix]; iy=c2;y[iv*nd+id]+=((yy2-c2)/d)*l*x[iy*nx+ix]; } else { if(c2==0) { d=yy2-yy1;l=(float)sqrt(d*d+1); iy=c2;y[iv*nd+id]+=((yy2-c2)/d)*l*x[iy*nx+ix]; } if(c2==ny) { d=yy2-yy1;l=(float)sqrt(d*d+1); iy=c1;y[iv*nd+id]+=((c2-yy1)/d)*l*x[iy*nx+ix]; } } } } } else { slope=(x2-x1)/(y2-y1); for(iy=0;iy<ny;iy++) { yy1=(float)(iy-ny2);yy2=yy1+1; if(slope>=0) { xx1=x1+slope*(yy1-y1)+nx2; xx2=x1+slope*(yy2-y1)+nx2; } else { xx1=x1+slope*(yy2-y1)+nx2; xx2=x1+slope*(yy1-y1)+nx2; } c1=(int)floor(xx1); c2=(int)floor(xx2); if(c2==c1)// c1 and c2 differs less than 1 { if(c1>=0&&c1<=nx-1) { d=xx2-xx1;l=(float)sqrt(d*d+1); ix=c1;y[iv*nd+id]+=l*x[iy*nx+ix]; } } else { if(c2>0&&c2<nx) { d=xx2-xx1;l=(float)sqrt(d*d+1); ix=c1;y[iv*nd+id]+=((c2-xx1)/d)*l*x[iy*nx+ix]; ix=c2;y[iv*nd+id]+=((xx2-c2)/d)*l*x[iy*nx+ix]; } else { if(c2==0) { d=xx2-xx1;l=(float)sqrt(d*d+1); ix=c2;y[iv*nd+id]+=((xx2-c2)/d)*l*x[iy*nx+ix]; } if(c2==ny) { d=xx2-xx1;l=(float)sqrt(d*d+1); ix=c1;y[iv*nd+id]+=((c2-xx1)/d)*l*x[iy*nx+ix]; } } } } } y[iv*nd+id]*=scale; } } void Ax_fan_mf_gpu_new(float *X,float *y,float SO,float OD,float scale,int nx,int ny,int nv,float *sd_phi,int nd,float *y_det,int *id_X,int nt) // A new method for computing the X-ray transform (infinitely-narrow beam) // The algorithm details are available in // H. Gao. "Fast parallel algorithms for the X-ray transform and its adjoint", Medical Physics (2012). { float *y_d,*X_d,*sd_phi_d,*y_det_d; int *id_X_d; cudaMalloc(&y_d,nv*nd*sizeof(float)); cudaMalloc(&X_d,nx*ny*nt*sizeof(float)); cudaMalloc(&sd_phi_d,nv*sizeof(float)); cudaMalloc(&y_det_d,nd*sizeof(float)); cudaMalloc(&id_X_d,nv*sizeof(int)); cudaMemcpy(X_d,X,nx*ny*nt*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(sd_phi_d,sd_phi,nv*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(y_det_d,y_det,nd*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(id_X_d,id_X,nv*sizeof(int),cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE_x,BLOCK_SIZE_y); dim3 dimGrid_t((nv+dimBlock.x-1)/dimBlock.x,(nd+dimBlock.y-1)/dimBlock.y); Ax_fan_mf_gpu_new_kernel<<<dimGrid_t, dimBlock>>>(X_d,y_d,SO,OD,scale,nx,ny,nv,sd_phi_d,nd,y_det_d,id_X_d); cudaMemcpy(y,y_d,nv*nd*sizeof(float),cudaMemcpyDeviceToHost); cudaFree(y_d);cudaFree(X_d);cudaFree(sd_phi_d);cudaFree(y_det_d);cudaFree(id_X_d); }
// *---------------------------------------------- // Author Contact Information: // Hao Gao // [email protected] || [email protected] // Department of Mathematics and Computer Science, Emory University // Department of Radiology and Imaging Sciences, Emory University // // Copyright (c) Hao Gao 2012 // ----------------------------------------------*/ // // If you find this code useful, you may cite the following reference: // H. Gao. "Fast parallel algorithms for the X-ray transform and its adjoint", Medical Physics (2012). // The full source codes are available at https://sites.google.com/site/fastxraytransform #include <hip/hip_runtime.h> #include <math.h> #include <malloc.h> #define ABS(a) (a>0?a:-(a)) #define BLOCK_SIZE_x 16 #define BLOCK_SIZE_y 16 extern "C" void Ax_fan_mf_gpu_new(float *X,float *y,float SO,float OD,float scale,int nx,int ny,int nv,float *sd_phi,int nd,float *y_det,int *id_X,int nt); __global__ void Ax_fan_mf_gpu_new_kernel(float *X,float *y,float SO,float OD,float scale,int nx,int ny,int nv,float *sd_phi,int nd,float *y_det,int *id_X) // Please note that this version has O(Nx) per thread, since GPU threads are already saturated. // O(1) per thread can be achieved by parallelizing the "for" loop here, given sufficient number of GPU threads. { int bx=blockIdx.x; int by=blockIdx.y; int tx0=threadIdx.x; int ty0=threadIdx.y; int iv=bx*BLOCK_SIZE_x+tx0; int id=by*BLOCK_SIZE_y+ty0; if(iv<nv&&id<nd) { int n,nx2,ny2,ix,iy,c1,c2; float *x,cos_phi,sin_phi,x1,y1,x2,y2,xx1,yy1,xx2,yy2,slope,l,d; nx2=nx/2;ny2=ny/2; n=nx*ny; x=&X[id_X[iv]*n]; cos_phi=(float)cos(sd_phi[iv]);sin_phi=(float)sin(sd_phi[iv]); x1=cos_phi*(-SO); y1=sin_phi*(-SO); x2=cos_phi*OD-sin_phi*y_det[id]; y2=sin_phi*OD+cos_phi*y_det[id]; y[iv*nd+id]=0; if(ABS(x1-x2)>ABS(y1-y2)) { slope=(y2-y1)/(x2-x1); for(ix=0;ix<nx;ix++) { xx1=(float)(ix-nx2);xx2=xx1+1; if(slope>=0) { yy1=y1+slope*(xx1-x1)+ny2; yy2=y1+slope*(xx2-x1)+ny2; } else { yy1=y1+slope*(xx2-x1)+ny2; yy2=y1+slope*(xx1-x1)+ny2; } c1=(int)floor(yy1); c2=(int)floor(yy2); if(c2==c1)// c1 and c2 differs less than 1 { if(c1>=0&&c1<=ny-1) { d=yy2-yy1;l=(float)sqrt(d*d+1); iy=c1;y[iv*nd+id]+=l*x[iy*nx+ix]; } } else { if(c2>0&&c2<ny) { d=yy2-yy1;l=(float)sqrt(d*d+1); iy=c1;y[iv*nd+id]+=((c2-yy1)/d)*l*x[iy*nx+ix]; iy=c2;y[iv*nd+id]+=((yy2-c2)/d)*l*x[iy*nx+ix]; } else { if(c2==0) { d=yy2-yy1;l=(float)sqrt(d*d+1); iy=c2;y[iv*nd+id]+=((yy2-c2)/d)*l*x[iy*nx+ix]; } if(c2==ny) { d=yy2-yy1;l=(float)sqrt(d*d+1); iy=c1;y[iv*nd+id]+=((c2-yy1)/d)*l*x[iy*nx+ix]; } } } } } else { slope=(x2-x1)/(y2-y1); for(iy=0;iy<ny;iy++) { yy1=(float)(iy-ny2);yy2=yy1+1; if(slope>=0) { xx1=x1+slope*(yy1-y1)+nx2; xx2=x1+slope*(yy2-y1)+nx2; } else { xx1=x1+slope*(yy2-y1)+nx2; xx2=x1+slope*(yy1-y1)+nx2; } c1=(int)floor(xx1); c2=(int)floor(xx2); if(c2==c1)// c1 and c2 differs less than 1 { if(c1>=0&&c1<=nx-1) { d=xx2-xx1;l=(float)sqrt(d*d+1); ix=c1;y[iv*nd+id]+=l*x[iy*nx+ix]; } } else { if(c2>0&&c2<nx) { d=xx2-xx1;l=(float)sqrt(d*d+1); ix=c1;y[iv*nd+id]+=((c2-xx1)/d)*l*x[iy*nx+ix]; ix=c2;y[iv*nd+id]+=((xx2-c2)/d)*l*x[iy*nx+ix]; } else { if(c2==0) { d=xx2-xx1;l=(float)sqrt(d*d+1); ix=c2;y[iv*nd+id]+=((xx2-c2)/d)*l*x[iy*nx+ix]; } if(c2==ny) { d=xx2-xx1;l=(float)sqrt(d*d+1); ix=c1;y[iv*nd+id]+=((c2-xx1)/d)*l*x[iy*nx+ix]; } } } } } y[iv*nd+id]*=scale; } } void Ax_fan_mf_gpu_new(float *X,float *y,float SO,float OD,float scale,int nx,int ny,int nv,float *sd_phi,int nd,float *y_det,int *id_X,int nt) // A new method for computing the X-ray transform (infinitely-narrow beam) // The algorithm details are available in // H. Gao. "Fast parallel algorithms for the X-ray transform and its adjoint", Medical Physics (2012). { float *y_d,*X_d,*sd_phi_d,*y_det_d; int *id_X_d; hipMalloc(&y_d,nv*nd*sizeof(float)); hipMalloc(&X_d,nx*ny*nt*sizeof(float)); hipMalloc(&sd_phi_d,nv*sizeof(float)); hipMalloc(&y_det_d,nd*sizeof(float)); hipMalloc(&id_X_d,nv*sizeof(int)); hipMemcpy(X_d,X,nx*ny*nt*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(sd_phi_d,sd_phi,nv*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(y_det_d,y_det,nd*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(id_X_d,id_X,nv*sizeof(int),hipMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE_x,BLOCK_SIZE_y); dim3 dimGrid_t((nv+dimBlock.x-1)/dimBlock.x,(nd+dimBlock.y-1)/dimBlock.y); Ax_fan_mf_gpu_new_kernel<<<dimGrid_t, dimBlock>>>(X_d,y_d,SO,OD,scale,nx,ny,nv,sd_phi_d,nd,y_det_d,id_X_d); hipMemcpy(y,y_d,nv*nd*sizeof(float),hipMemcpyDeviceToHost); hipFree(y_d);hipFree(X_d);hipFree(sd_phi_d);hipFree(y_det_d);hipFree(id_X_d); }
Convert the following CUDA code to AMD GPU code: cuda #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/transform.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/random.h> #include <thrust/inner_product.h> // This example shows how thrust::zip_iterator can be used to create a // 'virtual' array of structures. In this case the structure is a 3d // vector type (Float3) whose (x,y,z) components will be stored in // three separate float arrays. The zip_iterator "zips" these arrays // into a single virtual Float3 array. // We'll use a 3-tuple to store our 3d vector type typedef thrust::tuple<float,float,float> Float3; // This functor implements the dot product between 3d vectors struct Float3Product : public thrust::binary_function<Float3,Float3,float> { __host__ __device__ Float3 operator()(const Float3& a, const Float3& b) const { return Float3(thrust::get<0>(a) * thrust::get<0>(b), // x components thrust::get<1>(a) * thrust::get<1>(b), // y components thrust::get<2>(a) * thrust::get<2>(b)); // z components } }; // This functor implements the dot product between 3d vectors struct Float3Add : public thrust::binary_function<Float3,Float3,float> { __host__ __device__ Float3 operator()(const Float3& a, const Float3& b) const { return Float3(thrust::get<0>(a) + thrust::get<0>(b), // x components thrust::get<1>(a) + thrust::get<1>(b), // y components thrust::get<2>(a) + thrust::get<2>(b)); // z components } }; int main(void) { thrust::device_vector<float> A0(3); // x components of the 'A' vectors thrust::device_vector<float> A1(3); // y components of the 'A' vectors thrust::device_vector<float> A2(3); // z components of the 'A' vectors A0[0] = 1; A1[0] = 4; A2[0] = 7; A0[1] = 2; A1[1] = 5; A2[1] = 8; A0[2] = 3; A1[2] = 6; A2[2] = 9; thrust::device_vector<float> B0(3); // x components of the 'B' vectors thrust::device_vector<float> B1(3); // y components of the 'B' vectors thrust::device_vector<float> B2(3); // z components of the 'B' vectors B0[0] = 10; B1[0] = 13; B2[0] = 16; B0[1] = 11; B1[1] = 14; B2[1] = 17; B0[2] = 12; B1[2] = 15; B2[2] = 18; // Storage for result of each dot product Float3 init(0, 0, 0); Float3 result = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(A0.begin(), A1.begin(), A2.begin())), thrust::make_zip_iterator(thrust::make_tuple(A0.end(), A1.end(), A2.end())), thrust::make_zip_iterator(thrust::make_tuple(B0.begin(), B1.begin(), B2.begin())), init, Float3Add(), Float3Product()); std::cout << "result: "; std::cout << "(" << thrust::get<0>(result) << "," << thrust::get<1>(result) << "," << thrust::get<2>(result) << ")"; std::cout << "\n"; return 0; }
#include <hip/hip_runtime.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/transform.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/random.h> #include <thrust/inner_product.h> // This example shows how thrust::zip_iterator can be used to create a // 'virtual' array of structures. In this case the structure is a 3d // vector type (Float3) whose (x,y,z) components will be stored in // three separate float arrays. The zip_iterator "zips" these arrays // into a single virtual Float3 array. // We'll use a 3-tuple to store our 3d vector type typedef thrust::tuple<float,float,float> Float3; // This functor implements the dot product between 3d vectors struct Float3Product : public thrust::binary_function<Float3,Float3,float> { __host__ __device__ Float3 operator()(const Float3& a, const Float3& b) const { return Float3(thrust::get<0>(a) * thrust::get<0>(b), // x components thrust::get<1>(a) * thrust::get<1>(b), // y components thrust::get<2>(a) * thrust::get<2>(b)); // z components } }; // This functor implements the dot product between 3d vectors struct Float3Add : public thrust::binary_function<Float3,Float3,float> { __host__ __device__ Float3 operator()(const Float3& a, const Float3& b) const { return Float3(thrust::get<0>(a) + thrust::get<0>(b), // x components thrust::get<1>(a) + thrust::get<1>(b), // y components thrust::get<2>(a) + thrust::get<2>(b)); // z components } }; int main(void) { thrust::device_vector<float> A0(3); // x components of the 'A' vectors thrust::device_vector<float> A1(3); // y components of the 'A' vectors thrust::device_vector<float> A2(3); // z components of the 'A' vectors A0[0] = 1; A1[0] = 4; A2[0] = 7; A0[1] = 2; A1[1] = 5; A2[1] = 8; A0[2] = 3; A1[2] = 6; A2[2] = 9; thrust::device_vector<float> B0(3); // x components of the 'B' vectors thrust::device_vector<float> B1(3); // y components of the 'B' vectors thrust::device_vector<float> B2(3); // z components of the 'B' vectors B0[0] = 10; B1[0] = 13; B2[0] = 16; B0[1] = 11; B1[1] = 14; B2[1] = 17; B0[2] = 12; B1[2] = 15; B2[2] = 18; // Storage for result of each dot product Float3 init(0, 0, 0); Float3 result = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(A0.begin(), A1.begin(), A2.begin())), thrust::make_zip_iterator(thrust::make_tuple(A0.end(), A1.end(), A2.end())), thrust::make_zip_iterator(thrust::make_tuple(B0.begin(), B1.begin(), B2.begin())), init, Float3Add(), Float3Product()); std::cout << "result: "; std::cout << "(" << thrust::get<0>(result) << "," << thrust::get<1>(result) << "," << thrust::get<2>(result) << ")"; std::cout << "\n"; return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <cuda_runtime_api.h> __global__ void batchmap_add_kernel( float *xs, int frame_len, int batch_size, float alpha, const float *scalars) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int i = idx % frame_len; int batch_idx = idx / frame_len; if ((i < frame_len) && (batch_idx < batch_size)) { float x = xs[idx] + alpha * scalars[batch_idx]; xs[idx] = x; } } extern "C" void neuralops_cuda_batchmap_add( float *xs, size_t frame_len, size_t batch_size, float alpha, const float *scalars, cudaStream_t stream) { int n = frame_len * batch_size; batchmap_add_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(xs, frame_len, batch_size, alpha, scalars); } __global__ void batchmap_div_kernel( float *xs, int frame_len, int batch_size, const float *scalars) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int i = idx % frame_len; int batch_idx = idx / frame_len; if ((i < frame_len) && (batch_idx < batch_size)) { float x = xs[idx] / scalars[batch_idx]; xs[idx] = x; } } extern "C" void neuralops_cuda_batchmap_div( float *xs, size_t frame_len, size_t batch_size, const float *scalars, cudaStream_t stream) { int n = frame_len * batch_size; batchmap_div_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(xs, frame_len, batch_size, scalars); }
#include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> __global__ void batchmap_add_kernel( float *xs, int frame_len, int batch_size, float alpha, const float *scalars) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int i = idx % frame_len; int batch_idx = idx / frame_len; if ((i < frame_len) && (batch_idx < batch_size)) { float x = xs[idx] + alpha * scalars[batch_idx]; xs[idx] = x; } } extern "C" void neuralops_cuda_batchmap_add( float *xs, size_t frame_len, size_t batch_size, float alpha, const float *scalars, hipStream_t stream) { int n = frame_len * batch_size; batchmap_add_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(xs, frame_len, batch_size, alpha, scalars); } __global__ void batchmap_div_kernel( float *xs, int frame_len, int batch_size, const float *scalars) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int i = idx % frame_len; int batch_idx = idx / frame_len; if ((i < frame_len) && (batch_idx < batch_size)) { float x = xs[idx] / scalars[batch_idx]; xs[idx] = x; } } extern "C" void neuralops_cuda_batchmap_div( float *xs, size_t frame_len, size_t batch_size, const float *scalars, hipStream_t stream) { int n = frame_len * batch_size; batchmap_div_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(xs, frame_len, batch_size, scalars); }
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda.h" #include "device_atomic_functions.h" #include <stdio.h> #include "Bfs.cuh" __global__ void bfs_kernel(unsigned int* current_set, unsigned int* new_set, int current_set_size, int* current_set_size_new, Node* node_list, Edge* edge_list, int* color, int* cost, int level) { // get tread number int tid = blockIdx.x * blockDim.x + threadIdx.x; for (int j = tid; j<current_set_size; j += blockDim.x*gridDim.x) { unsigned int index = current_set[j];// get one from the current set current_set[j] = 0; // make it not visited cost[index] = level; Node cur_node = node_list[index]; // all adjacent vertices for (int i = cur_node.start; i < cur_node.start + cur_node.edge_num; i++) { unsigned int id = edge_list[i].dest; int old_color = atomicExch((int*)&color[id], BLACK);// visit adjacent node if (old_color == WHITE) { // was not visited int write_position = atomicAdd((int*) &(*current_set_size_new), 1); new_set[write_position] = id; // add to set of the next level } } } } void callBFSKernel(const unsigned int blocks, const unsigned int threadsPerBlock, unsigned int* current_set, unsigned int* new_set, int current_set_size, int* current_set_size_new, Node* node_list, Edge* edge_list, int* color, int* cost, int level) { bfs_kernel << <blocks, threadsPerBlock >> > (current_set, new_set, current_set_size, current_set_size_new, node_list, edge_list, color, cost, level); }
#ifndef BFS_CUH #define BFS_CUH #define WHITE 0 /* not visited */ #define GREY 1 /* visiting */ #define BLACK 2 /* visited */ #define INF 2147483647 /* infinity distance */ #define MAX_LEVEL 20000 #define THREAD_PER_BLOCK 512 /* can depend on number of nodes*/ #define BLOCK_NUM 256 typedef struct node_t { unsigned int start; /* starting index of edges */ unsigned int edge_num; } Node; typedef struct edge_t { unsigned int dest; /* index of nodes */ unsigned int cost; } Edge; void callBFSKernel(const unsigned int blocks, const unsigned int threadsPerBlock, unsigned int* current_set, unsigned int* new_set, int current_set_size, int* current_set_size_new, Node* node_list, Edge* edge_list, int* color, int* cost, int level); #endif // BFS_CUH
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> __global__ void kernel(unsigned char *p) { // int i = blockIdx.x; // int j = blockIdx.y; int i = threadIdx.x; int j = threadIdx.y; p[i * 9 + j] = (i + 1) * (j + 1); } int main( void ) { unsigned char table[81]; unsigned char *mem; cudaMalloc((void **)&mem, 81); // dim3 b(9, 9); dim3 t(9, 9); kernel<<<1,t>>>(mem); cudaMemcpy(table, mem, 81, cudaMemcpyDeviceToHost); cudaFree(mem); for (int i = 0; i < 9; i++) { for (int j = 0; j < 9; j++) { printf(" %2d", table[i * 9 + j]); } putchar('\n'); } return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> __global__ void kernel(unsigned char *p) { // int i = blockIdx.x; // int j = blockIdx.y; int i = threadIdx.x; int j = threadIdx.y; p[i * 9 + j] = (i + 1) * (j + 1); } int main( void ) { unsigned char table[81]; unsigned char *mem; hipMalloc((void **)&mem, 81); // dim3 b(9, 9); dim3 t(9, 9); kernel<<<1,t>>>(mem); hipMemcpy(table, mem, 81, hipMemcpyDeviceToHost); hipFree(mem); for (int i = 0; i < 9; i++) { for (int j = 0; j < 9; j++) { printf(" %2d", table[i * 9 + j]); } putchar('\n'); } return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "Backpropogation.cuh" #include "NeuralNetwork.cuh" /** * Gets the errors associated with each neuron's output * Parameter nn: the neural network to get the error for * Parameter errors: the matrix to put the errors in * Parameter layer: the layer to get the error for * Return: nothing */ __global__ void getErrorKernel(NeuralNet* nn, double** errors, double** outputs, double* expected, int layer){ for(int neuron1 = threadIdx.x + (blockDim.x * blockIdx.x); neuron1 < nn ->neurons[layer]; neuron1 += blockDim.x*gridDim.x){ // The last layer's error if(layer == nn->layers - 1){ //printf("actual - expected = %f - %f = %f\n", outputs[nn->layers - 1][neuron1], expected[neuron1], outputs[nn->layers - 1][neuron1]-expected[neuron1]); errors[layer][neuron1] = outputs[nn->layers - 1][neuron1] - expected[neuron1]; } // The remaining layers' errors else{ // Zeros the error errors[layer][neuron1] = 0; // Uses the derivative of the outputs with respect to the inputs for(int neuron2 = 0; neuron2 < nn->neurons[layer+1]; neuron2++){ switch(nn->activations[layer][neuron2]){ case LINEAR: errors[layer][neuron1] += (errors[layer + 1][neuron2] * nn -> weights[layer][neuron1][neuron2]); break; case BINARY_STEP: break; case LOGISTIC: errors[layer][neuron1] += (errors[layer + 1][neuron2] * nn -> weights[layer][neuron1][neuron2] * outputs[layer + 1][neuron2] * (1 - outputs[layer + 1][neuron2])); break; case TANH: errors[layer][neuron1] += (errors[layer + 1][neuron2] * nn -> weights[layer][neuron1][neuron2] * (1 - (outputs[layer + 1][neuron2] * outputs[layer + 1][neuron2]))); break; case RELU: if(outputs[layer + 1][neuron2] > 0){ errors[layer][neuron1] += (errors[layer + 1][neuron2] * nn -> weights[layer][neuron1][neuron2]); } break; case LEAKYRELU: if(outputs[layer + 1][neuron2] < 0){ errors[layer][neuron1] += (errors[layer + 1][neuron2] * nn -> weights[layer][neuron1][neuron2] * .01); } else{ errors[layer][neuron1] += (errors[layer + 1][neuron2] * nn -> weights[layer][neuron1][neuron2]); } break; } } } } } /** * Backpropogates the weights in the neural net * Parameter nn: the neural network to backpropogate * Parameter layer: the layer that is being backpropogated * Returns: nothing */ __global__ void backpropogationWeightsKernel(NeuralNet* nn, double** outputs, double** error, double*** deltaWeights, int layer){ for(int neuron1 = threadIdx.x + (blockDim.x* blockIdx.x); neuron1 < nn -> neurons[layer]; neuron1 += (blockDim.x * gridDim.x)){ for(int neuron2 = threadIdx.y + (blockDim.y * blockIdx.y); neuron2 < nn->neurons[layer + 1]; neuron2 += (blockDim.y * gridDim.y)){ switch(nn->activations[layer][neuron2]){ case LINEAR: deltaWeights[layer][neuron1][neuron2] += (LEARNING_RATE * error[layer + 1][neuron2] * outputs[layer][neuron1]); break; case BINARY_STEP: // This has a derivative of 0 everywhere so // nothing needs to be added break; case LOGISTIC: deltaWeights[layer][neuron1][neuron2] += (LEARNING_RATE * error[layer + 1][neuron2] * outputs[layer][neuron1] * (outputs[layer][neuron2] * (1 - outputs[layer][neuron2]))); break; case TANH: deltaWeights[layer][neuron1][neuron2] += (LEARNING_RATE * error[layer + 1][neuron2] * outputs[layer][neuron1] * (1 - (outputs[layer][neuron2] * outputs[layer][neuron2]))); break; case RELU: if(outputs[layer][neuron2] > 0){ deltaWeights[layer][neuron1][neuron2] += (LEARNING_RATE * error[layer + 1][neuron2] * outputs[layer][neuron1]); } break; case LEAKYRELU: if(outputs[layer + 1][neuron2] < 0){ deltaWeights[layer][neuron1][neuron2] += (LEARNING_RATE * error[layer + 1][neuron2] * outputs[layer][neuron1] * .01); } else{ deltaWeights[layer][neuron1][neuron2] += (LEARNING_RATE * error[layer + 1][neuron2] * outputs[layer][neuron1]); } break; } } } } /** * Backpropogates the biases in the neural net * Parameter nn: the neural network to backpropogate * Parameter layer: the layer that is being backpropogated * Returns: nothing */ __global__ void backpropogationBiasesKernel(NeuralNet* nn, double** outputs, double** error, double** deltaBiases, int layer){ for(int neuron = threadIdx.x + (blockDim.x * blockIdx.x); neuron < nn->neurons[layer]; neuron += (blockDim.x * gridDim.x)){ switch(nn->activations[layer][neuron]){ case LINEAR: deltaBiases[layer][neuron] += (LEARNING_RATE * error[layer][neuron]); break; case BINARY_STEP: // This has a derivative of 0 everywhere // so nothing needs to be added break; case LOGISTIC: deltaBiases[layer][neuron] += (LEARNING_RATE * error[layer][neuron] * outputs[layer][neuron] * (1 - outputs[layer][neuron])); break; case TANH: deltaBiases[layer][neuron] += (LEARNING_RATE * error[layer][neuron] * (1 - (outputs[layer][neuron] * outputs[layer][neuron]))); break; case RELU: if(outputs[layer][neuron] > 0){ deltaBiases[layer][neuron] += (LEARNING_RATE * error[layer][neuron]); } break; case LEAKYRELU: if(outputs[layer][neuron] < 0){ deltaBiases[layer][neuron] += (LEARNING_RATE * error[layer][neuron] * .01); } else{ deltaBiases[layer][neuron] += (LEARNING_RATE * error[layer][neuron]); } break; } } } /** * Changes the weights in the neural net * Parameter nn: the neural network to change the weights in * Parameter deltaWeights: the matrix of total unaveraged weight changes * Parameter numOutputs: the number of outputs to average * Returns: nothing */ __global__ void changeWeights(NeuralNet* nn, double*** deltaWeights, int numOutputs){ for(int layer = threadIdx.x + (blockDim.x * blockIdx.x); layer < nn->layers-1; layer += (blockDim.x * gridDim.x)){ for(int neuron1 = threadIdx.y + (blockDim.y * blockIdx.y); neuron1 < nn->neurons[layer]; neuron1 += (blockDim.y * gridDim.y)){ for(int neuron2 = threadIdx.z + (blockDim.z * blockIdx.z); neuron2 < nn->neurons[layer + 1]; neuron2 += (blockDim.z * gridDim.z)){ double delta=deltaWeights[layer][neuron1][neuron2]/numOutputs; if(delta<GRADIENT_CEILING && delta>-GRADIENT_CEILING){ nn->weights[layer][neuron1][neuron2] -= (deltaWeights[layer][neuron1][neuron2] / numOutputs); } else{ if(delta<0){ nn->weights[layer][neuron1][neuron2]-=(-GRADIENT_CEILING); } else{ nn->weights[layer][neuron1][neuron2]-=(GRADIENT_CEILING); } } } } } } /** * Changes the biases in the neural net * Parameter nn: the neural network to change the biases in * Parameter deltaBiases: the matrix of total unaveraged bias changes * Parameter numOutputs: the total number of outputs to average * Returns: nothing */ __global__ void changeBiases(NeuralNet* nn, double** deltaBiases, int numOutputs){ for(int layer = threadIdx.x + (blockDim.x * blockIdx.x); layer < nn->layers - 1; layer += (blockDim.x * gridDim.x)){ for(int neuron = threadIdx.y + (blockDim.y * blockIdx.y); neuron < nn->neurons[layer]; neuron += blockDim.y * gridDim.y){ double delta=deltaBiases[layer][neuron]/numOutputs; if(delta<GRADIENT_CEILING && delta>-GRADIENT_CEILING){ nn->biases[layer][neuron] -= (deltaBiases[layer][neuron] / numOutputs); } else{ if(delta<0){ nn->biases[layer][neuron] -= (-GRADIENT_CEILING); } else{ nn->biases[layer][neuron] -= (GRADIENT_CEILING); } } } } } /** * Backpropogates the neural network with the actual and expected outputs * Parameter nn: the neural network to backpropogate * Parameter actual: the actual outputs given by the neural network * Parameter expected: the expected outputs given by the neural network * Parameter numOutputs: the number of outputs * Returns: nothing */ void backpropogate(NeuralNet* nn, double*** outputs, double** expected, int numOutputs){ // Gets an matrix for the error double** error = getNeuralMatrix(nn); double** deltaBiasMatrix=getNeuralMatrix(nn); double*** deltaWeightMatrix=getNeuralWeightMatrix(nn); // Loops through the outputs for(int output = 0; output < numOutputs; output++){ for(int layer=nn->layers - 1; layer >= 0; layer--){ // Gets the error getErrorKernel<<<NUMBLOCKS, BLOCKSIZE>>>(nn, error, outputs[output], expected[output], layer); cudaDeviceSynchronize(); // Backpropgate the weights/biases if(layer != nn->layers - 1){ backpropogationWeightsKernel<<<dim3(NUMBLOCKS, NUMBLOCKS), dim3(BLOCKSIZE/4, BLOCKSIZE/4)>>>(nn, outputs[output], error, deltaWeightMatrix, layer); cudaDeviceSynchronize(); backpropogationBiasesKernel<<<NUMBLOCKS, BLOCKSIZE>>>(nn, outputs[output], error, deltaBiasMatrix, layer); cudaDeviceSynchronize(); } } } // Changes the weights in the neural net changeWeights<<<dim3(NUMBLOCKS, NUMBLOCKS, NUMBLOCKS), dim3(BLOCKSIZE/16, BLOCKSIZE/16, BLOCKSIZE/16)>>>(nn, deltaWeightMatrix, numOutputs); cudaDeviceSynchronize(); // Changes the biases in the neural net changeBiases<<<dim3(NUMBLOCKS, NUMBLOCKS), dim3(BLOCKSIZE/4, BLOCKSIZE/4)>>>(nn, deltaBiasMatrix, numOutputs); cudaDeviceSynchronize(); // Frees the error matrix freeNeuralMatrix(error, nn); // Frees the bias matrix freeNeuralMatrix(deltaBiasMatrix, nn); // Frees the weight matrix freeNeuralWeightMatrix(deltaWeightMatrix, nn); } /** * Allocates memory for and zeros a matrix of the same size as the neural network's nodes * Parameter nn: the neural network to get the dimensions from * Returns: a matrix of the same size as the neural network's nodes */ double** getNeuralMatrix(NeuralNet* nn){ double** matrix; cudaMallocManaged(&matrix, nn->layers*sizeof(double*)); for(int layer = 0; layer < nn->layers; layer++){ cudaMallocManaged(&matrix[layer], nn->neurons[layer]*sizeof(double)); for(int neuron = 0; neuron < nn->neurons[layer]; neuron++){ matrix[layer][neuron] = 0; } } return matrix; } /** * Frees the neural matrix * Parameter matrix: the matrix to feree * Parameter nn: the neural network to get the dimensions from * Returns: nothing */ void freeNeuralMatrix(double** matrix, NeuralNet* nn){ for(int layer = 0; layer < nn->layers; layer++){ cudaFree(matrix[layer]); } cudaFree(matrix); } /** * Allocates memory and zeros a matrix of the same size as the neural network's weight matrix * Parameter nn: the neural network to get the weight matrix from * Returns: a matrix of the same size as the neural network's weight matrix */ double*** getNeuralWeightMatrix(NeuralNet* nn){ double*** matrix; cudaMallocManaged(&matrix, nn->layers * sizeof(double**)); for(int layer = 0; layer < nn->layers-1; layer++){ cudaMallocManaged(&matrix[layer], nn->neurons[layer] * sizeof(double*)); for(int neuron1 = 0; neuron1 < nn->neurons[layer]; neuron1++){ cudaMallocManaged(&matrix[layer][neuron1], nn->neurons[layer+1] * sizeof(double)); for(int neuron2 = 0; neuron2 < nn->neurons[layer + 1]; neuron2++){ matrix[layer][neuron1][neuron2] = 0; } } } return matrix; } /** * Frees the neural weight matrix * Parameter matrix: the neural weight matrix * Parameter nn: the neural network to get the dimensions from * Returns: nothing */ void freeNeuralWeightMatrix(double*** matrix, NeuralNet* nn){ for(int layer = 0; layer < nn->layers - 1; layer++){ for(int neuron1 = 0; neuron1 < nn->neurons[layer]; neuron1++){ cudaFree(matrix[layer][neuron1]); } cudaFree(matrix[layer]); } cudaFree(matrix); }
#include <hip/hip_runtime.h> #include "Backpropogation.cuh" #include "NeuralNetwork.cuh" /** * Gets the errors associated with each neuron's output * Parameter nn: the neural network to get the error for * Parameter errors: the matrix to put the errors in * Parameter layer: the layer to get the error for * Return: nothing */ __global__ void getErrorKernel(NeuralNet* nn, double** errors, double** outputs, double* expected, int layer){ for(int neuron1 = threadIdx.x + (blockDim.x * blockIdx.x); neuron1 < nn ->neurons[layer]; neuron1 += blockDim.x*gridDim.x){ // The last layer's error if(layer == nn->layers - 1){ //printf("actual - expected = %f - %f = %f\n", outputs[nn->layers - 1][neuron1], expected[neuron1], outputs[nn->layers - 1][neuron1]-expected[neuron1]); errors[layer][neuron1] = outputs[nn->layers - 1][neuron1] - expected[neuron1]; } // The remaining layers' errors else{ // Zeros the error errors[layer][neuron1] = 0; // Uses the derivative of the outputs with respect to the inputs for(int neuron2 = 0; neuron2 < nn->neurons[layer+1]; neuron2++){ switch(nn->activations[layer][neuron2]){ case LINEAR: errors[layer][neuron1] += (errors[layer + 1][neuron2] * nn -> weights[layer][neuron1][neuron2]); break; case BINARY_STEP: break; case LOGISTIC: errors[layer][neuron1] += (errors[layer + 1][neuron2] * nn -> weights[layer][neuron1][neuron2] * outputs[layer + 1][neuron2] * (1 - outputs[layer + 1][neuron2])); break; case TANH: errors[layer][neuron1] += (errors[layer + 1][neuron2] * nn -> weights[layer][neuron1][neuron2] * (1 - (outputs[layer + 1][neuron2] * outputs[layer + 1][neuron2]))); break; case RELU: if(outputs[layer + 1][neuron2] > 0){ errors[layer][neuron1] += (errors[layer + 1][neuron2] * nn -> weights[layer][neuron1][neuron2]); } break; case LEAKYRELU: if(outputs[layer + 1][neuron2] < 0){ errors[layer][neuron1] += (errors[layer + 1][neuron2] * nn -> weights[layer][neuron1][neuron2] * .01); } else{ errors[layer][neuron1] += (errors[layer + 1][neuron2] * nn -> weights[layer][neuron1][neuron2]); } break; } } } } } /** * Backpropogates the weights in the neural net * Parameter nn: the neural network to backpropogate * Parameter layer: the layer that is being backpropogated * Returns: nothing */ __global__ void backpropogationWeightsKernel(NeuralNet* nn, double** outputs, double** error, double*** deltaWeights, int layer){ for(int neuron1 = threadIdx.x + (blockDim.x* blockIdx.x); neuron1 < nn -> neurons[layer]; neuron1 += (blockDim.x * gridDim.x)){ for(int neuron2 = threadIdx.y + (blockDim.y * blockIdx.y); neuron2 < nn->neurons[layer + 1]; neuron2 += (blockDim.y * gridDim.y)){ switch(nn->activations[layer][neuron2]){ case LINEAR: deltaWeights[layer][neuron1][neuron2] += (LEARNING_RATE * error[layer + 1][neuron2] * outputs[layer][neuron1]); break; case BINARY_STEP: // This has a derivative of 0 everywhere so // nothing needs to be added break; case LOGISTIC: deltaWeights[layer][neuron1][neuron2] += (LEARNING_RATE * error[layer + 1][neuron2] * outputs[layer][neuron1] * (outputs[layer][neuron2] * (1 - outputs[layer][neuron2]))); break; case TANH: deltaWeights[layer][neuron1][neuron2] += (LEARNING_RATE * error[layer + 1][neuron2] * outputs[layer][neuron1] * (1 - (outputs[layer][neuron2] * outputs[layer][neuron2]))); break; case RELU: if(outputs[layer][neuron2] > 0){ deltaWeights[layer][neuron1][neuron2] += (LEARNING_RATE * error[layer + 1][neuron2] * outputs[layer][neuron1]); } break; case LEAKYRELU: if(outputs[layer + 1][neuron2] < 0){ deltaWeights[layer][neuron1][neuron2] += (LEARNING_RATE * error[layer + 1][neuron2] * outputs[layer][neuron1] * .01); } else{ deltaWeights[layer][neuron1][neuron2] += (LEARNING_RATE * error[layer + 1][neuron2] * outputs[layer][neuron1]); } break; } } } } /** * Backpropogates the biases in the neural net * Parameter nn: the neural network to backpropogate * Parameter layer: the layer that is being backpropogated * Returns: nothing */ __global__ void backpropogationBiasesKernel(NeuralNet* nn, double** outputs, double** error, double** deltaBiases, int layer){ for(int neuron = threadIdx.x + (blockDim.x * blockIdx.x); neuron < nn->neurons[layer]; neuron += (blockDim.x * gridDim.x)){ switch(nn->activations[layer][neuron]){ case LINEAR: deltaBiases[layer][neuron] += (LEARNING_RATE * error[layer][neuron]); break; case BINARY_STEP: // This has a derivative of 0 everywhere // so nothing needs to be added break; case LOGISTIC: deltaBiases[layer][neuron] += (LEARNING_RATE * error[layer][neuron] * outputs[layer][neuron] * (1 - outputs[layer][neuron])); break; case TANH: deltaBiases[layer][neuron] += (LEARNING_RATE * error[layer][neuron] * (1 - (outputs[layer][neuron] * outputs[layer][neuron]))); break; case RELU: if(outputs[layer][neuron] > 0){ deltaBiases[layer][neuron] += (LEARNING_RATE * error[layer][neuron]); } break; case LEAKYRELU: if(outputs[layer][neuron] < 0){ deltaBiases[layer][neuron] += (LEARNING_RATE * error[layer][neuron] * .01); } else{ deltaBiases[layer][neuron] += (LEARNING_RATE * error[layer][neuron]); } break; } } } /** * Changes the weights in the neural net * Parameter nn: the neural network to change the weights in * Parameter deltaWeights: the matrix of total unaveraged weight changes * Parameter numOutputs: the number of outputs to average * Returns: nothing */ __global__ void changeWeights(NeuralNet* nn, double*** deltaWeights, int numOutputs){ for(int layer = threadIdx.x + (blockDim.x * blockIdx.x); layer < nn->layers-1; layer += (blockDim.x * gridDim.x)){ for(int neuron1 = threadIdx.y + (blockDim.y * blockIdx.y); neuron1 < nn->neurons[layer]; neuron1 += (blockDim.y * gridDim.y)){ for(int neuron2 = threadIdx.z + (blockDim.z * blockIdx.z); neuron2 < nn->neurons[layer + 1]; neuron2 += (blockDim.z * gridDim.z)){ double delta=deltaWeights[layer][neuron1][neuron2]/numOutputs; if(delta<GRADIENT_CEILING && delta>-GRADIENT_CEILING){ nn->weights[layer][neuron1][neuron2] -= (deltaWeights[layer][neuron1][neuron2] / numOutputs); } else{ if(delta<0){ nn->weights[layer][neuron1][neuron2]-=(-GRADIENT_CEILING); } else{ nn->weights[layer][neuron1][neuron2]-=(GRADIENT_CEILING); } } } } } } /** * Changes the biases in the neural net * Parameter nn: the neural network to change the biases in * Parameter deltaBiases: the matrix of total unaveraged bias changes * Parameter numOutputs: the total number of outputs to average * Returns: nothing */ __global__ void changeBiases(NeuralNet* nn, double** deltaBiases, int numOutputs){ for(int layer = threadIdx.x + (blockDim.x * blockIdx.x); layer < nn->layers - 1; layer += (blockDim.x * gridDim.x)){ for(int neuron = threadIdx.y + (blockDim.y * blockIdx.y); neuron < nn->neurons[layer]; neuron += blockDim.y * gridDim.y){ double delta=deltaBiases[layer][neuron]/numOutputs; if(delta<GRADIENT_CEILING && delta>-GRADIENT_CEILING){ nn->biases[layer][neuron] -= (deltaBiases[layer][neuron] / numOutputs); } else{ if(delta<0){ nn->biases[layer][neuron] -= (-GRADIENT_CEILING); } else{ nn->biases[layer][neuron] -= (GRADIENT_CEILING); } } } } } /** * Backpropogates the neural network with the actual and expected outputs * Parameter nn: the neural network to backpropogate * Parameter actual: the actual outputs given by the neural network * Parameter expected: the expected outputs given by the neural network * Parameter numOutputs: the number of outputs * Returns: nothing */ void backpropogate(NeuralNet* nn, double*** outputs, double** expected, int numOutputs){ // Gets an matrix for the error double** error = getNeuralMatrix(nn); double** deltaBiasMatrix=getNeuralMatrix(nn); double*** deltaWeightMatrix=getNeuralWeightMatrix(nn); // Loops through the outputs for(int output = 0; output < numOutputs; output++){ for(int layer=nn->layers - 1; layer >= 0; layer--){ // Gets the error getErrorKernel<<<NUMBLOCKS, BLOCKSIZE>>>(nn, error, outputs[output], expected[output], layer); hipDeviceSynchronize(); // Backpropgate the weights/biases if(layer != nn->layers - 1){ backpropogationWeightsKernel<<<dim3(NUMBLOCKS, NUMBLOCKS), dim3(BLOCKSIZE/4, BLOCKSIZE/4)>>>(nn, outputs[output], error, deltaWeightMatrix, layer); hipDeviceSynchronize(); backpropogationBiasesKernel<<<NUMBLOCKS, BLOCKSIZE>>>(nn, outputs[output], error, deltaBiasMatrix, layer); hipDeviceSynchronize(); } } } // Changes the weights in the neural net changeWeights<<<dim3(NUMBLOCKS, NUMBLOCKS, NUMBLOCKS), dim3(BLOCKSIZE/16, BLOCKSIZE/16, BLOCKSIZE/16)>>>(nn, deltaWeightMatrix, numOutputs); hipDeviceSynchronize(); // Changes the biases in the neural net changeBiases<<<dim3(NUMBLOCKS, NUMBLOCKS), dim3(BLOCKSIZE/4, BLOCKSIZE/4)>>>(nn, deltaBiasMatrix, numOutputs); hipDeviceSynchronize(); // Frees the error matrix freeNeuralMatrix(error, nn); // Frees the bias matrix freeNeuralMatrix(deltaBiasMatrix, nn); // Frees the weight matrix freeNeuralWeightMatrix(deltaWeightMatrix, nn); } /** * Allocates memory for and zeros a matrix of the same size as the neural network's nodes * Parameter nn: the neural network to get the dimensions from * Returns: a matrix of the same size as the neural network's nodes */ double** getNeuralMatrix(NeuralNet* nn){ double** matrix; hipMallocManaged(&matrix, nn->layers*sizeof(double*)); for(int layer = 0; layer < nn->layers; layer++){ hipMallocManaged(&matrix[layer], nn->neurons[layer]*sizeof(double)); for(int neuron = 0; neuron < nn->neurons[layer]; neuron++){ matrix[layer][neuron] = 0; } } return matrix; } /** * Frees the neural matrix * Parameter matrix: the matrix to feree * Parameter nn: the neural network to get the dimensions from * Returns: nothing */ void freeNeuralMatrix(double** matrix, NeuralNet* nn){ for(int layer = 0; layer < nn->layers; layer++){ hipFree(matrix[layer]); } hipFree(matrix); } /** * Allocates memory and zeros a matrix of the same size as the neural network's weight matrix * Parameter nn: the neural network to get the weight matrix from * Returns: a matrix of the same size as the neural network's weight matrix */ double*** getNeuralWeightMatrix(NeuralNet* nn){ double*** matrix; hipMallocManaged(&matrix, nn->layers * sizeof(double**)); for(int layer = 0; layer < nn->layers-1; layer++){ hipMallocManaged(&matrix[layer], nn->neurons[layer] * sizeof(double*)); for(int neuron1 = 0; neuron1 < nn->neurons[layer]; neuron1++){ hipMallocManaged(&matrix[layer][neuron1], nn->neurons[layer+1] * sizeof(double)); for(int neuron2 = 0; neuron2 < nn->neurons[layer + 1]; neuron2++){ matrix[layer][neuron1][neuron2] = 0; } } } return matrix; } /** * Frees the neural weight matrix * Parameter matrix: the neural weight matrix * Parameter nn: the neural network to get the dimensions from * Returns: nothing */ void freeNeuralWeightMatrix(double*** matrix, NeuralNet* nn){ for(int layer = 0; layer < nn->layers - 1; layer++){ for(int neuron1 = 0; neuron1 < nn->neurons[layer]; neuron1++){ hipFree(matrix[layer][neuron1]); } hipFree(matrix[layer]); } hipFree(matrix); }
Convert the following CUDA code to AMD GPU code: cuda //add 2 arrays in parallel, often this is faster on CPU than GPU //reason is computation is not intense, require more data than //computation #include <cstdlib> #include <ctime> #include <iostream> #define BSZ 2048 #define TSZ 1024 #define TEST_SIZE BSZ * TSZ #define TT float #define EPS 10e-6 using namespace std; template <typename T> __global__ void add1d_cuda(T* c, T* a, T* b){ int idx = blockIdx.x * blockDim.x + threadIdx.x; c[idx] = a[idx] + b[idx]; } template <typename T> clock_t add1d(T* c, T* a, T* b, size_t sz){ for (size_t i = 0; i < sz; ++i) c[i] = a[i] + b[i]; return clock(); } template <typename T> void random_array(T* array, size_t sz){ srand(time(0)); for (size_t i = 0; i < sz; ++i) array[i] = (TT)rand() / 100.F; } int main(){ TT *a = new TT[TEST_SIZE], *b = new TT[TEST_SIZE], *c = new TT[TEST_SIZE], *d = new TT[TEST_SIZE]; TT* da, *db, *dc; random_array(a, TEST_SIZE); random_array(b, TEST_SIZE); cudaMalloc((void**)&da, sizeof(TT) * TEST_SIZE); cudaMalloc((void**)&db, sizeof(TT) * TEST_SIZE); cudaMalloc((void**)&dc, sizeof(TT) * TEST_SIZE); clock_t timing_start = clock(); cudaMemcpy(da, a, sizeof(TT) * TEST_SIZE, cudaMemcpyHostToDevice); cudaMemcpy(db, b, sizeof(TT) * TEST_SIZE, cudaMemcpyHostToDevice); add1d_cuda<<<BSZ, TSZ>>>(dc, da, db); cudaMemcpy(c, dc, sizeof(TT) * TEST_SIZE, cudaMemcpyDeviceToHost); cout << "CUDA time: " << (clock() - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl; cudaFree(da); cudaFree(db); cudaFree(dc); timing_start = clock(); clock_t timing_end = add1d(d, a, b, TEST_SIZE); cout << "CPU time: " << (timing_end - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl; bool match = true; for (size_t i = 0; i < TEST_SIZE; ++i) if (c[i] - d[i] > EPS){ cout << "value does not match" << endl; match = false; } if (match) cout << "All values match" << endl; }
//add 2 arrays in parallel, often this is faster on CPU than GPU //reason is computation is not intense, require more data than //computation #include <hip/hip_runtime.h> #include <cstdlib> #include <ctime> #include <iostream> #define BSZ 2048 #define TSZ 1024 #define TEST_SIZE BSZ * TSZ #define TT float #define EPS 10e-6 using namespace std; template <typename T> __global__ void add1d_cuda(T* c, T* a, T* b){ int idx = blockIdx.x * blockDim.x + threadIdx.x; c[idx] = a[idx] + b[idx]; } template <typename T> clock_t add1d(T* c, T* a, T* b, size_t sz){ for (size_t i = 0; i < sz; ++i) c[i] = a[i] + b[i]; return clock(); } template <typename T> void random_array(T* array, size_t sz){ srand(time(0)); for (size_t i = 0; i < sz; ++i) array[i] = (TT)rand() / 100.F; } int main(){ TT *a = new TT[TEST_SIZE], *b = new TT[TEST_SIZE], *c = new TT[TEST_SIZE], *d = new TT[TEST_SIZE]; TT* da, *db, *dc; random_array(a, TEST_SIZE); random_array(b, TEST_SIZE); hipMalloc((void**)&da, sizeof(TT) * TEST_SIZE); hipMalloc((void**)&db, sizeof(TT) * TEST_SIZE); hipMalloc((void**)&dc, sizeof(TT) * TEST_SIZE); clock_t timing_start = clock(); hipMemcpy(da, a, sizeof(TT) * TEST_SIZE, hipMemcpyHostToDevice); hipMemcpy(db, b, sizeof(TT) * TEST_SIZE, hipMemcpyHostToDevice); add1d_cuda<<<BSZ, TSZ>>>(dc, da, db); hipMemcpy(c, dc, sizeof(TT) * TEST_SIZE, hipMemcpyDeviceToHost); cout << "CUDA time: " << (clock() - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl; hipFree(da); hipFree(db); hipFree(dc); timing_start = clock(); clock_t timing_end = add1d(d, a, b, TEST_SIZE); cout << "CPU time: " << (timing_end - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl; bool match = true; for (size_t i = 0; i < TEST_SIZE; ++i) if (c[i] - d[i] > EPS){ cout << "value does not match" << endl; match = false; } if (match) cout << "All values match" << endl; }
Convert the following CUDA code to AMD GPU code: cuda /* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */ #include <stdio.h> #include <stdlib.h> #define N (1000*1000*8) /* Calculate SAXPY, single-precision vector math */ /* y[i]=a*x[i]+y[i] */ __global__ void saxpy (int n, float a, float *x, float *y) { int i=blockIdx.x*blockDim.x+threadIdx.x; /* Only run calculation if we are in range */ /* where i is valid. It can be out of range */ /* if our vector is shorter than a */ /* multiple of the blocksize */ if (i<n) { y[i]=a*x[i]+y[i]; } } int main(int argc, char **argv) { int i; float *x, *y, *dev_x, *dev_y; float a; float loops=1.0; if (argc>1) { loops=atof(argv[1]); } /* Allocate vectors on CPU */ x=(float *)malloc(N*sizeof(float)); y=(float *)malloc(N*sizeof(float)); /* Allocate vectors on GPU */ cudaMalloc((void **)&dev_x,N*sizeof(float)); cudaMalloc((void **)&dev_y,N*sizeof(float)); /* Initialize the host vectors */ for(i=0;i<N;i++) { x[i]=(float)i; y[i]=(float)(10.0*i); } cudaMemcpy(dev_x,x,N*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(dev_y,y,N*sizeof(float),cudaMemcpyHostToDevice); printf("Size: %d\n",(N+255)/256); /* Perform SAXPY */ for(a=0;a<loops;a+=1.0) { saxpy<<<(N+255)/256,256>>>(N,a,dev_x,dev_y); } cudaMemcpy(y,dev_y,N*sizeof(float),cudaMemcpyDeviceToHost); /* results */ i=100; printf("y[%d]=%f %f\n",i,y[i],y[i+1]); /* 0: a=0, x=100, y=1000 ::::::: y=1000 */ /* 1: a=1, x=100, y=1000 ::::::: y=1100 */ /* 2: a=2, x=100, y=1100 ::::::: y=1300 */ /* 3: a=3, x=100, y=1300 ::::::: y=1600 */ /* 4: a=4, x=100, y=1600 ::::::: y=2000 */ cudaFree(dev_x); cudaFree(dev_y); return 0; }
/* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define N (1000*1000*8) /* Calculate SAXPY, single-precision vector math */ /* y[i]=a*x[i]+y[i] */ __global__ void saxpy (int n, float a, float *x, float *y) { int i=blockIdx.x*blockDim.x+threadIdx.x; /* Only run calculation if we are in range */ /* where i is valid. It can be out of range */ /* if our vector is shorter than a */ /* multiple of the blocksize */ if (i<n) { y[i]=a*x[i]+y[i]; } } int main(int argc, char **argv) { int i; float *x, *y, *dev_x, *dev_y; float a; float loops=1.0; if (argc>1) { loops=atof(argv[1]); } /* Allocate vectors on CPU */ x=(float *)malloc(N*sizeof(float)); y=(float *)malloc(N*sizeof(float)); /* Allocate vectors on GPU */ hipMalloc((void **)&dev_x,N*sizeof(float)); hipMalloc((void **)&dev_y,N*sizeof(float)); /* Initialize the host vectors */ for(i=0;i<N;i++) { x[i]=(float)i; y[i]=(float)(10.0*i); } hipMemcpy(dev_x,x,N*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(dev_y,y,N*sizeof(float),hipMemcpyHostToDevice); printf("Size: %d\n",(N+255)/256); /* Perform SAXPY */ for(a=0;a<loops;a+=1.0) { saxpy<<<(N+255)/256,256>>>(N,a,dev_x,dev_y); } hipMemcpy(y,dev_y,N*sizeof(float),hipMemcpyDeviceToHost); /* results */ i=100; printf("y[%d]=%f %f\n",i,y[i],y[i+1]); /* 0: a=0, x=100, y=1000 ::::::: y=1000 */ /* 1: a=1, x=100, y=1000 ::::::: y=1100 */ /* 2: a=2, x=100, y=1100 ::::::: y=1300 */ /* 3: a=3, x=100, y=1300 ::::::: y=1600 */ /* 4: a=4, x=100, y=1600 ::::::: y=2000 */ hipFree(dev_x); hipFree(dev_y); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <emmintrin.h> #include <sys/time.h> #include <stdio.h> //#include <> int N = 64000000; int nTrapsPow2 = 2046; int nSumsPow2 = 10; int nTraps; int nSums; int doPrint = 0; /////////////////////////////////////////////////////////////////////////////////////////////////////////// // HELPER CODE TO INITIALIZE, PRINT AND TIME struct timeval start, end; void initialize(float *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = pow(rand() % 10, 2); } } void print(float* a, int N) { if (doPrint) { int i; for (i = 0; i < N; ++i) printf("%f ", a[i]); printf("\n"); } } void starttime() { gettimeofday( &start, 0 ); } void endtime(const char* c) { gettimeofday( &end, 0 ); double elapsed = ( end.tv_sec - start.tv_sec ) * 1000.0 + ( end.tv_usec - start.tv_usec ) / 1000.0; printf("%s: %f ms\n", c, elapsed); } void init(const char* c) { printf("***************** %s **********************\n", c); //initialize(a, N); //print(a, N); starttime(); } void finish(const char* c) { endtime(c); //print(a, N); printf("***************************************************\n"); } ////////////////////////////////////////////////////////////////////////////////////////////////////////// // Normal C function to square root values void normal(float* a, int N) { int i; for (i = 0; i < N; ++i) a[i] = sqrt(a[i]); } // GPU function to square root values __global__ void gpu_sqrt(float* a, int N) { int element = blockIdx.x*blockDim.x + threadIdx.x; if (element < N) a[element] = sqrt(a[element]); } void gpu(float* a, int N) { int numThreads = 1024; // This can vary, up to 1024 int numCores = N / 1024 + 1; float* gpuA; cudaMalloc(&gpuA, N*sizeof(float)); // Allocate enough memory on the GPU cudaMemcpy(gpuA, a, N*sizeof(float), cudaMemcpyHostToDevice); // Copy array from CPU to GPU gpu_sqrt<<<numCores, numThreads>>>(gpuA, N); // Call GPU Sqrt cudaMemcpy(a, gpuA, N*sizeof(float), cudaMemcpyDeviceToHost); // Copy array from GPU to CPU cudaFree(&gpuA); // Free the memory on the GPU } ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// __device__ double myCurve(double x) { return x * x; } __device__ double Sum(int trapStart, int trapEnd, double* area) { int i; double total = 0; if(((trapEnd+1) - trapStart) <= 0) return -1; else { for(i = trapStart; i <= trapEnd; i++) { total += area[i]; } } return total; } __global__ void trapAreaPow2(int x1, int x2, double* area, int numTraps) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index < numTraps) { int n, t; double d, x; n = index + 2; n = log2((double) n); if(index == 6 || index == 62) { // printf("%d) log2(%f) = %d\n", index,(double) (index+2), n); n++; } n = exp2((double) n); d = (x2 - x1) / (double)n; t = index - n + 3; x = ((t - 1) * d) + x1; area[index] = ((myCurve(x) + myCurve(x+d)) / 2) * d; if(index >= 1022 && index <= 2045) { //printf("index = %d:\nn = %d\nd = %f\nt = %d\nx = %f\n", index, n, d, t, x); //printf("area[%d] = %f\n", index, area[index]); //printf("index = %d, n = %d\n", index, n); } } } __global__ void trapSumPow2(double* sum, int numSums, double* area) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index < numSums) { int n, trapStart, trapEnd; n = exp2((double) (index+1)); trapStart = n - 2; trapEnd = (n * 2) - 3; sum[index] = Sum(trapStart, trapEnd, area); } } __global__ void trapArea(int x1, int x2, int S, double* area, int numTraps) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index < numTraps) { int a, b, t, n, i=0; double d, c; t = index; while(t >= 0) { t = t - S - i; i++; } a = i - 1; n = S + a; d = (x2 - x1) / (double) n; b = ((a * a) - a) / 2; c = index - ((a * S) + b); c = x1 + (c * d); area[index] = ((myCurve(c) + myCurve(c+d)) / 2) * d; // printf("index: %d\na: %d\nn: %d\nd: %f\nc: %f\narea[%d] = %f\n", index, a, n, d, c, index, area[index]); } } __global__ void trapSum(int S, double* sum, int numSums, double* area) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index < numSums) { int trapStart, trapEnd; trapStart = (index * S) + (((index*index) - index) / 2); trapEnd = trapStart + S + index - 1; sum[index] = Sum(trapStart, trapEnd, area); } } int main() { init("Trapezoidal Sum"); int S = 0, x1 = 1, x2 = 4; int i, x = 0; double epsilon = 0.000001; double answer = 21; double* areaPow2; double* sumPow2; double* answersPow2; double* areaLinear; double* sumLinear; int numThreads = 1024; int numCores = nTrapsPow2 / numThreads + 1; double* answersLinear; // printf("no fault here\n"); answersPow2 = (double*)malloc(nSumsPow2*sizeof(double)); cudaMalloc(&areaPow2, nTrapsPow2*sizeof(double)); cudaMalloc(&sumPow2, nSumsPow2*sizeof(double)); // printf("so far so good\n"); trapAreaPow2<<<numCores, numThreads>>>(x1, x2, areaPow2, nTrapsPow2); // printf("got past trapAreaPow2\n"); numCores = nSumsPow2 / numThreads + 1; trapSumPow2<<<numCores, numThreads>>>(sumPow2, nSumsPow2, areaPow2); // printf("made it past trapSumPow2\n"); cudaMemcpy(answersPow2, sumPow2, nSumsPow2*sizeof(double), cudaMemcpyDeviceToHost); // printf("cudaMemcpy was sucessful\n"); // printf("nSumsPow2 = %d\n", nSumsPow2); for(i = nSumsPow2 - 1; i >= 0; i--) { printf("answersPow2[%d] = %f\n", i, answersPow2[i]); if((answer - answersPow2[i]) >= -epsilon && (answer - answersPow2[i]) <= epsilon) { x = i + 1; } } printf("loop sucessful\n"); S = exp2((double) x) / 2; printf("x = %d and S = %d\n", x, S); printf("S = %d, the sum with %d trapazoids was %f\n", S, S*2, answersPow2[x-1]); if(S == 1) printf("done\n"); else if(S == 0) printf("1024 trapazoids is too few to get an answer within epsilon.\n"); else { nTraps = ((S + 1) * S) + (((S * S) + S) / 2); nSums = S + 1; printf("nSums = %d\n", nSums); cudaMalloc(&sumLinear, nSums * sizeof(double)); cudaMalloc(&areaLinear, nTraps * sizeof(double)); numCores = (nTraps/numThreads) + 1; trapArea<<<numCores, numThreads>>>(x1, x2, S, areaLinear, nTraps); printf("this works\n"); numCores = (nSums/numThreads) + 1; trapSum<<<numCores, numThreads>>>(S, sumLinear, nSums, areaLinear); answersLinear = (double*) malloc(nSums * sizeof(double)); cudaMemcpy(answersLinear, sumLinear, nSums*sizeof(double), cudaMemcpyDeviceToHost); for(i=nSums-1; i >= 0; i--) { printf("answersLinear[%d] = %f\n", i, answersLinear[i]); if((answer - answersLinear[i]) >= -epsilon && (answer - answersLinear[i]) <= epsilon) x = i; } x = S + x; printf("This is the minimunm number of trapezoids needed to compute a trapezoidal sum that is within our epsilon of the actual answer: %d\n", x); } finish("Trapezoidal Sum"); ////////////////////////////////////////////////////////////////////////// // Necessary if you are doing SSE. Align on a 128-bit boundary (16 bytes) float* a; posix_memalign((void**)&a, 16, N * sizeof(float)); ///////////////////////////////////////////////////////////////////////// // Test 1: Sequential For Loop // init(a, N, "Normal"); // normal(a, N); // finish(a, N, "Normal"); // Test 2: Vectorization // init(a, N, "GPU"); // gpu(a, N); // finish(a, N, "GPU"); return 0; }
#include <hip/hip_runtime.h> #include <emmintrin.h> #include <sys/time.h> #include <stdio.h> //#include <> int N = 64000000; int nTrapsPow2 = 2046; int nSumsPow2 = 10; int nTraps; int nSums; int doPrint = 0; /////////////////////////////////////////////////////////////////////////////////////////////////////////// // HELPER CODE TO INITIALIZE, PRINT AND TIME struct timeval start, end; void initialize(float *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = pow(rand() % 10, 2); } } void print(float* a, int N) { if (doPrint) { int i; for (i = 0; i < N; ++i) printf("%f ", a[i]); printf("\n"); } } void starttime() { gettimeofday( &start, 0 ); } void endtime(const char* c) { gettimeofday( &end, 0 ); double elapsed = ( end.tv_sec - start.tv_sec ) * 1000.0 + ( end.tv_usec - start.tv_usec ) / 1000.0; printf("%s: %f ms\n", c, elapsed); } void init(const char* c) { printf("***************** %s **********************\n", c); //initialize(a, N); //print(a, N); starttime(); } void finish(const char* c) { endtime(c); //print(a, N); printf("***************************************************\n"); } ////////////////////////////////////////////////////////////////////////////////////////////////////////// // Normal C function to square root values void normal(float* a, int N) { int i; for (i = 0; i < N; ++i) a[i] = sqrt(a[i]); } // GPU function to square root values __global__ void gpu_sqrt(float* a, int N) { int element = blockIdx.x*blockDim.x + threadIdx.x; if (element < N) a[element] = sqrt(a[element]); } void gpu(float* a, int N) { int numThreads = 1024; // This can vary, up to 1024 int numCores = N / 1024 + 1; float* gpuA; hipMalloc(&gpuA, N*sizeof(float)); // Allocate enough memory on the GPU hipMemcpy(gpuA, a, N*sizeof(float), hipMemcpyHostToDevice); // Copy array from CPU to GPU gpu_sqrt<<<numCores, numThreads>>>(gpuA, N); // Call GPU Sqrt hipMemcpy(a, gpuA, N*sizeof(float), hipMemcpyDeviceToHost); // Copy array from GPU to CPU hipFree(&gpuA); // Free the memory on the GPU } ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// __device__ double myCurve(double x) { return x * x; } __device__ double Sum(int trapStart, int trapEnd, double* area) { int i; double total = 0; if(((trapEnd+1) - trapStart) <= 0) return -1; else { for(i = trapStart; i <= trapEnd; i++) { total += area[i]; } } return total; } __global__ void trapAreaPow2(int x1, int x2, double* area, int numTraps) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index < numTraps) { int n, t; double d, x; n = index + 2; n = log2((double) n); if(index == 6 || index == 62) { // printf("%d) log2(%f) = %d\n", index,(double) (index+2), n); n++; } n = exp2((double) n); d = (x2 - x1) / (double)n; t = index - n + 3; x = ((t - 1) * d) + x1; area[index] = ((myCurve(x) + myCurve(x+d)) / 2) * d; if(index >= 1022 && index <= 2045) { //printf("index = %d:\nn = %d\nd = %f\nt = %d\nx = %f\n", index, n, d, t, x); //printf("area[%d] = %f\n", index, area[index]); //printf("index = %d, n = %d\n", index, n); } } } __global__ void trapSumPow2(double* sum, int numSums, double* area) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index < numSums) { int n, trapStart, trapEnd; n = exp2((double) (index+1)); trapStart = n - 2; trapEnd = (n * 2) - 3; sum[index] = Sum(trapStart, trapEnd, area); } } __global__ void trapArea(int x1, int x2, int S, double* area, int numTraps) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index < numTraps) { int a, b, t, n, i=0; double d, c; t = index; while(t >= 0) { t = t - S - i; i++; } a = i - 1; n = S + a; d = (x2 - x1) / (double) n; b = ((a * a) - a) / 2; c = index - ((a * S) + b); c = x1 + (c * d); area[index] = ((myCurve(c) + myCurve(c+d)) / 2) * d; // printf("index: %d\na: %d\nn: %d\nd: %f\nc: %f\narea[%d] = %f\n", index, a, n, d, c, index, area[index]); } } __global__ void trapSum(int S, double* sum, int numSums, double* area) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index < numSums) { int trapStart, trapEnd; trapStart = (index * S) + (((index*index) - index) / 2); trapEnd = trapStart + S + index - 1; sum[index] = Sum(trapStart, trapEnd, area); } } int main() { init("Trapezoidal Sum"); int S = 0, x1 = 1, x2 = 4; int i, x = 0; double epsilon = 0.000001; double answer = 21; double* areaPow2; double* sumPow2; double* answersPow2; double* areaLinear; double* sumLinear; int numThreads = 1024; int numCores = nTrapsPow2 / numThreads + 1; double* answersLinear; // printf("no fault here\n"); answersPow2 = (double*)malloc(nSumsPow2*sizeof(double)); hipMalloc(&areaPow2, nTrapsPow2*sizeof(double)); hipMalloc(&sumPow2, nSumsPow2*sizeof(double)); // printf("so far so good\n"); trapAreaPow2<<<numCores, numThreads>>>(x1, x2, areaPow2, nTrapsPow2); // printf("got past trapAreaPow2\n"); numCores = nSumsPow2 / numThreads + 1; trapSumPow2<<<numCores, numThreads>>>(sumPow2, nSumsPow2, areaPow2); // printf("made it past trapSumPow2\n"); hipMemcpy(answersPow2, sumPow2, nSumsPow2*sizeof(double), hipMemcpyDeviceToHost); // printf("cudaMemcpy was sucessful\n"); // printf("nSumsPow2 = %d\n", nSumsPow2); for(i = nSumsPow2 - 1; i >= 0; i--) { printf("answersPow2[%d] = %f\n", i, answersPow2[i]); if((answer - answersPow2[i]) >= -epsilon && (answer - answersPow2[i]) <= epsilon) { x = i + 1; } } printf("loop sucessful\n"); S = exp2((double) x) / 2; printf("x = %d and S = %d\n", x, S); printf("S = %d, the sum with %d trapazoids was %f\n", S, S*2, answersPow2[x-1]); if(S == 1) printf("done\n"); else if(S == 0) printf("1024 trapazoids is too few to get an answer within epsilon.\n"); else { nTraps = ((S + 1) * S) + (((S * S) + S) / 2); nSums = S + 1; printf("nSums = %d\n", nSums); hipMalloc(&sumLinear, nSums * sizeof(double)); hipMalloc(&areaLinear, nTraps * sizeof(double)); numCores = (nTraps/numThreads) + 1; trapArea<<<numCores, numThreads>>>(x1, x2, S, areaLinear, nTraps); printf("this works\n"); numCores = (nSums/numThreads) + 1; trapSum<<<numCores, numThreads>>>(S, sumLinear, nSums, areaLinear); answersLinear = (double*) malloc(nSums * sizeof(double)); hipMemcpy(answersLinear, sumLinear, nSums*sizeof(double), hipMemcpyDeviceToHost); for(i=nSums-1; i >= 0; i--) { printf("answersLinear[%d] = %f\n", i, answersLinear[i]); if((answer - answersLinear[i]) >= -epsilon && (answer - answersLinear[i]) <= epsilon) x = i; } x = S + x; printf("This is the minimunm number of trapezoids needed to compute a trapezoidal sum that is within our epsilon of the actual answer: %d\n", x); } finish("Trapezoidal Sum"); ////////////////////////////////////////////////////////////////////////// // Necessary if you are doing SSE. Align on a 128-bit boundary (16 bytes) float* a; posix_memalign((void**)&a, 16, N * sizeof(float)); ///////////////////////////////////////////////////////////////////////// // Test 1: Sequential For Loop // init(a, N, "Normal"); // normal(a, N); // finish(a, N, "Normal"); // Test 2: Vectorization // init(a, N, "GPU"); // gpu(a, N); // finish(a, N, "GPU"); return 0; }
Convert the following CUDA code to AMD GPU code: cuda /*************************************************** * Module that applay the function sigmoid to all the elements of the matrix * Author: Alonso Vidales <[email protected]> * * To be compiled with nvcc -ptx matrix_sum_all.cu * Debug: nvcc -arch=sm_20 -ptx matrix_sum_all.cu * **************************************************/ #include <stdio.h> #ifdef __cplusplus extern "C" { #endif // CUDA Kernel __global__ void matrixSumAll(double* A, int wA, int size, double* sum) { __shared__ double res[1024]; double result = 0; for (int bx = 0; bx < wA; bx++) { int pos = (threadIdx.x * wA) + bx; if (pos < size) { result += A[pos]; //printf("Thread %d Pos %d Val: %f\n", threadIdx.x, pos, res[threadIdx.x]); } } res[threadIdx.x] = result; __syncthreads(); if(threadIdx.x == 0) { for (int i = 1; i < 1024; i++) { res[0] += res[i]; } sum[0] = res[0]; } } #ifdef __cplusplus } #endif
/*************************************************** * Module that applay the function sigmoid to all the elements of the matrix * Author: Alonso Vidales <[email protected]> * * To be compiled with nvcc -ptx matrix_sum_all.cu * Debug: nvcc -arch=sm_20 -ptx matrix_sum_all.cu * **************************************************/ #include <hip/hip_runtime.h> #include <stdio.h> #ifdef __cplusplus extern "C" { #endif // CUDA Kernel __global__ void matrixSumAll(double* A, int wA, int size, double* sum) { __shared__ double res[1024]; double result = 0; for (int bx = 0; bx < wA; bx++) { int pos = (threadIdx.x * wA) + bx; if (pos < size) { result += A[pos]; //printf("Thread %d Pos %d Val: %f\n", threadIdx.x, pos, res[threadIdx.x]); } } res[threadIdx.x] = result; __syncthreads(); if(threadIdx.x == 0) { for (int i = 1; i < 1024; i++) { res[0] += res[i]; } sum[0] = res[0]; } } #ifdef __cplusplus } #endif
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> /* CUDA C keyword __global__ indicates that a function - runs on the device - and is called from host code */ // define kernel for add two integers __global__ void add_kernel(int *dev_c, int const *dev_a, int const *dev_b) { // assign to the value in address 'c' the addition of values in addresses 'a' and 'b' *dev_c = *dev_a + *dev_b; } int main() { int a, b, c; // host copies of a, b, c int *dev_a, *dev_b, *dev_c; // device copies of a, b, c int size = sizeof(int); // we need space for an integer // allocates device memory for a, b, c cudaMalloc((void**)&dev_a, size); cudaMalloc((void**)&dev_b, size); cudaMalloc((void**)&dev_c, size); // assign values to host a, b a = 2; b = 3; // assign values to device a, b cudaMemcpy((void*)dev_a, &a, size, cudaMemcpyKind::cudaMemcpyHostToDevice); cudaMemcpy(dev_b, &b, size, cudaMemcpyKind::cudaMemcpyHostToDevice); // launch kernel for addition add_kernel<<<1, 1>>>(dev_c, dev_a, dev_b); // copy result from device to host copy of c cudaMemcpy(&c, dev_c, size, cudaMemcpyKind::cudaMemcpyDeviceToHost); // log results printf("c: %d\n", c); // de-allocates device memory cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
#include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> /* CUDA C keyword __global__ indicates that a function - runs on the device - and is called from host code */ // define kernel for add two integers __global__ void add_kernel(int *dev_c, int const *dev_a, int const *dev_b) { // assign to the value in address 'c' the addition of values in addresses 'a' and 'b' *dev_c = *dev_a + *dev_b; } int main() { int a, b, c; // host copies of a, b, c int *dev_a, *dev_b, *dev_c; // device copies of a, b, c int size = sizeof(int); // we need space for an integer // allocates device memory for a, b, c hipMalloc((void**)&dev_a, size); hipMalloc((void**)&dev_b, size); hipMalloc((void**)&dev_c, size); // assign values to host a, b a = 2; b = 3; // assign values to device a, b hipMemcpy((void*)dev_a, &a, size, hipMemcpyKind::hipMemcpyHostToDevice); hipMemcpy(dev_b, &b, size, hipMemcpyKind::hipMemcpyHostToDevice); // launch kernel for addition add_kernel<<<1, 1>>>(dev_c, dev_a, dev_b); // copy result from device to host copy of c hipMemcpy(&c, dev_c, size, hipMemcpyKind::hipMemcpyDeviceToHost); // log results printf("c: %d\n", c); // de-allocates device memory hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "LinkTest.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void addKernel(int* c, const int* a, const int* b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } void addWithCuda(int* c, const int* a, const int* b, unsigned int size) { int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; cudaSetDevice(0); cudaMalloc((void**)&dev_c, size * sizeof(int)); cudaMalloc((void**)&dev_a, size * sizeof(int)); cudaMalloc((void**)&dev_b, size * sizeof(int)); cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); addKernel << <1, size >> > (dev_c, dev_a, dev_b); cudaDeviceSynchronize(); cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); } template<unsigned int D>void AHH<D>::set() { gg[0] = 1; }
#include <hip/hip_runtime.h> void addWithCuda(int* c, const int* a, const int* b, unsigned int size); template<unsigned int D>struct AHH { int gg[D]; void set(); };
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void device_ll () { __shared__ double partial_ll[REDUC_THREADS] ; int i, n, ntarg, index ; double sum_ll ; index = threadIdx.x ; n = d_ncases ; ntarg = d_ntarg ; sum_ll = 0.0 ; for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) sum_ll -= log ( d_output[i*ntarg+d_class[i]] + 1.e-30 ) ; partial_ll[index] = sum_ll ; __syncthreads() ; for (i=blockDim.x>>1 ; i ; i>>=1) { if (index < i) partial_ll[index] += partial_ll[index+i] ; __syncthreads() ; } if (index == 0) d_mse_out[blockIdx.x] = partial_ll[0] ; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void device_ll () { __shared__ double partial_ll[REDUC_THREADS] ; int i, n, ntarg, index ; double sum_ll ; index = threadIdx.x ; n = d_ncases ; ntarg = d_ntarg ; sum_ll = 0.0 ; for (i=blockIdx.x*blockDim.x+index ; i<n ; i+=blockDim.x*gridDim.x) sum_ll -= log ( d_output[i*ntarg+d_class[i]] + 1.e-30 ) ; partial_ll[index] = sum_ll ; __syncthreads() ; for (i=blockDim.x>>1 ; i ; i>>=1) { if (index < i) partial_ll[index] += partial_ll[index+i] ; __syncthreads() ; } if (index == 0) d_mse_out[blockIdx.x] = partial_ll[0] ; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdint.h> #include <chrono> // Constant values. #define TB_SIZE 256 uint64_t ARRAY_SIZE = 300'000'000; // Check if the given pointer is NULL. #define PTR_CHECK(cmd) if ((x) == NULL) { \ printf("ERROR: null pointer at line %d\n", __LINE__); abort(); } // Check if the given command has returned an error. #define CUDA_CHECK(cmd) if ((cmd) != cudaSuccess) { \ printf("ERROR: cuda error at line %d\n", __LINE__); abort(); } // Kind of benchmark to do. enum BenchKind { kBoth, kOnlyCPU, kOnlyGPU }; // Run SAXPY on GPU. __global__ void SAXPY_gpu(float *x, float *y, float a, uint64_t ARRAY_SIZE) { auto index = blockDim.x * blockIdx.x + threadIdx.x; if (index < ARRAY_SIZE) y[index] += a * x[index]; } // Run SAXPY on CPU. void SAXPY_cpu(float *x, float *y, float a) { for (uint64_t index = 0; index < ARRAY_SIZE; index++) y[index] += a * x[index]; } // Initialize input array. void initArray(float *array) { for (uint64_t index = 0; index < ARRAY_SIZE; index++) array[index] = (float)index / 1.42; } // Entry point of the program. int main(int argc, const char **argv) { // When the program is ran with -h, show usage. if (argc == 2 && !strcmp(argv[1], "-h")) { printf("Usage: ./exercise_2 [array size] [kind]\n"); exit(0); } // Read data from CLI. if (argc >= 2) ARRAY_SIZE = atoll(argv[1]); printf("Using array size of %llu\n", ARRAY_SIZE); BenchKind kind = kBoth; if (argc >= 3 && !strcmp(argv[2], "cpu")) kind = kOnlyCPU; if (argc >= 3 && !strcmp(argv[2], "gpu")) kind = kOnlyGPU; // Allocate data elements. float a = 12.f; float *x = (float *)malloc(sizeof(float) * ARRAY_SIZE); PTR_CHECK(x); float *y = (float *)malloc(sizeof(float) * ARRAY_SIZE); PTR_CHECK(x); // Run SAXPY on GPU. if (kind != kOnlyCPU) { printf("Starting GPU test ...\n"); initArray(x); initArray(y); float *gpux, *gpuy; CUDA_CHECK(cudaMalloc(&gpux, sizeof(float) * ARRAY_SIZE)); CUDA_CHECK(cudaMalloc(&gpuy, sizeof(float) * ARRAY_SIZE)); auto start = std::chrono::system_clock::now(); CUDA_CHECK(cudaMemcpy(gpux, x, sizeof(float) * ARRAY_SIZE, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(gpuy, y, sizeof(float) * ARRAY_SIZE, cudaMemcpyHostToDevice)); SAXPY_gpu<<<((ARRAY_SIZE + TB_SIZE - 1) / TB_SIZE), TB_SIZE>>>(gpux, gpuy, a, ARRAY_SIZE); cudaDeviceSynchronize(); CUDA_CHECK(cudaMemcpy(y, gpuy, sizeof(float) * ARRAY_SIZE, cudaMemcpyDeviceToHost)); auto end = std::chrono::system_clock::now(); int ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count(); printf("GPU time: %d ms\n", ms); } // Run SAXPY on CPU. if (kind != kOnlyGPU) { printf("Starting CPU test ...\n"); initArray(x); initArray(y); auto start2 = std::chrono::system_clock::now(); SAXPY_cpu(x, y, a); auto end2 = std::chrono::system_clock::now(); int ms2 = std::chrono::duration_cast<std::chrono::milliseconds>(end2 - start2).count(); printf("CPU time: %d ms\n", ms2); } printf("Done\n", y[13]); }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdint.h> #include <chrono> // Constant values. #define TB_SIZE 256 uint64_t ARRAY_SIZE = 300'000'000; // Check if the given pointer is NULL. #define PTR_CHECK(cmd) if ((x) == NULL) { \ printf("ERROR: null pointer at line %d\n", __LINE__); abort(); } // Check if the given command has returned an error. #define CUDA_CHECK(cmd) if ((cmd) != hipSuccess) { \ printf("ERROR: cuda error at line %d\n", __LINE__); abort(); } // Kind of benchmark to do. enum BenchKind { kBoth, kOnlyCPU, kOnlyGPU }; // Run SAXPY on GPU. __global__ void SAXPY_gpu(float *x, float *y, float a, uint64_t ARRAY_SIZE) { auto index = blockDim.x * blockIdx.x + threadIdx.x; if (index < ARRAY_SIZE) y[index] += a * x[index]; } // Run SAXPY on CPU. void SAXPY_cpu(float *x, float *y, float a) { for (uint64_t index = 0; index < ARRAY_SIZE; index++) y[index] += a * x[index]; } // Initialize input array. void initArray(float *array) { for (uint64_t index = 0; index < ARRAY_SIZE; index++) array[index] = (float)index / 1.42; } // Entry point of the program. int main(int argc, const char **argv) { // When the program is ran with -h, show usage. if (argc == 2 && !strcmp(argv[1], "-h")) { printf("Usage: ./exercise_2 [array size] [kind]\n"); exit(0); } // Read data from CLI. if (argc >= 2) ARRAY_SIZE = atoll(argv[1]); printf("Using array size of %llu\n", ARRAY_SIZE); BenchKind kind = kBoth; if (argc >= 3 && !strcmp(argv[2], "cpu")) kind = kOnlyCPU; if (argc >= 3 && !strcmp(argv[2], "gpu")) kind = kOnlyGPU; // Allocate data elements. float a = 12.f; float *x = (float *)malloc(sizeof(float) * ARRAY_SIZE); PTR_CHECK(x); float *y = (float *)malloc(sizeof(float) * ARRAY_SIZE); PTR_CHECK(x); // Run SAXPY on GPU. if (kind != kOnlyCPU) { printf("Starting GPU test ...\n"); initArray(x); initArray(y); float *gpux, *gpuy; CUDA_CHECK(hipMalloc(&gpux, sizeof(float) * ARRAY_SIZE)); CUDA_CHECK(hipMalloc(&gpuy, sizeof(float) * ARRAY_SIZE)); auto start = std::chrono::system_clock::now(); CUDA_CHECK(hipMemcpy(gpux, x, sizeof(float) * ARRAY_SIZE, hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(gpuy, y, sizeof(float) * ARRAY_SIZE, hipMemcpyHostToDevice)); SAXPY_gpu<<<((ARRAY_SIZE + TB_SIZE - 1) / TB_SIZE), TB_SIZE>>>(gpux, gpuy, a, ARRAY_SIZE); hipDeviceSynchronize(); CUDA_CHECK(hipMemcpy(y, gpuy, sizeof(float) * ARRAY_SIZE, hipMemcpyDeviceToHost)); auto end = std::chrono::system_clock::now(); int ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count(); printf("GPU time: %d ms\n", ms); } // Run SAXPY on CPU. if (kind != kOnlyGPU) { printf("Starting CPU test ...\n"); initArray(x); initArray(y); auto start2 = std::chrono::system_clock::now(); SAXPY_cpu(x, y, a); auto end2 = std::chrono::system_clock::now(); int ms2 = std::chrono::duration_cast<std::chrono::milliseconds>(end2 - start2).count(); printf("CPU time: %d ms\n", ms2); } printf("Done\n", y[13]); }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void kernel_test0_global_write(char* _ptr, char* _end_ptr) { unsigned int* ptr = (unsigned int*)_ptr; unsigned int* end_ptr = (unsigned int*)_end_ptr; unsigned int* orig_ptr = ptr; unsigned int pattern = 1; unsigned long mask = 4; *ptr = pattern; while(ptr < end_ptr){ ptr = (unsigned int*) ( ((unsigned long)orig_ptr) | mask); if (ptr == orig_ptr){ mask = mask <<1; continue; } if (ptr >= end_ptr){ break; } *ptr = pattern; pattern = pattern << 1; mask = mask << 1; } return; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void kernel_test0_global_write(char* _ptr, char* _end_ptr) { unsigned int* ptr = (unsigned int*)_ptr; unsigned int* end_ptr = (unsigned int*)_end_ptr; unsigned int* orig_ptr = ptr; unsigned int pattern = 1; unsigned long mask = 4; *ptr = pattern; while(ptr < end_ptr){ ptr = (unsigned int*) ( ((unsigned long)orig_ptr) | mask); if (ptr == orig_ptr){ mask = mask <<1; continue; } if (ptr >= end_ptr){ break; } *ptr = pattern; pattern = pattern << 1; mask = mask << 1; } return; }
Convert the following CUDA code to AMD GPU code: cuda // This example demonstrates the use of shared per-block arrays // implement an optimized dense matrix multiplication algorithm. // Like the shared_variables.cu example, a per-block __shared__ // array acts as a "bandwidth multiplier" by eliminating redundant // loads issued by neighboring threads. #include <stdlib.h> #include <stdio.h> #include <vector> #include <algorithm> #include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define TILE_WIDTH 16 // a simple version of matrix_multiply which issues redundant loads from off-chip global memory __global__ void matrix_multiply_simple(int *a, int *b, int *ab, size_t width) { // calculate the row & column index of the element int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; int result = 0; // do dot product between row of a and column of b for(int k = 0; k < width; ++k) { result += a[row*width+k] * b[k*width+col]; } // write out this thread's result ab[row*width+col] = result; } void MatrixMulOnHost(int* M, int* N, int* P, int Width) { for (int i = 0; i < Width; ++i) { for (int j = 0; j < Width; ++j) { double sum = 0; for (int k = 0; k < Width; ++k) { double a = M[i * Width + k]; double b = N[k * Width + j]; sum += a * b; } P[i * Width + j] = sum; } } } int main(void) { // create a large workload so we can easily measure the // performance difference of both implementations // note that n measures the width of the matrix, not the number of total elements //const size_t n = 1<<10; const size_t n = 1024; std::cout << "Total element is " << n << "\n"; const dim3 block_size(TILE_WIDTH,TILE_WIDTH); const dim3 num_blocks(n / block_size.x, n / block_size.y); // generate random input on the host std::vector<int> h_a(n*n), h_b(n*n), h_c(n*n); for(int i = 0; i < n*n; ++i) { h_a[i] = static_cast<int>(rand()) / RAND_MAX; h_b[i] = static_cast<int>(rand()) / RAND_MAX; } // allocate storage for the device int *d_a = 0, *d_b = 0, *d_c = 0; cudaMalloc((void**)&d_a, sizeof(int) * n * n); cudaMalloc((void**)&d_b, sizeof(int) * n * n); cudaMalloc((void**)&d_c, sizeof(int) * n * n); // copy input to the device cudaMemcpy(d_a, &h_a[0], sizeof(int) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &h_b[0], sizeof(int) * n * n, cudaMemcpyHostToDevice); // to get accurate timings, launch a single "warm-up" kernel matrix_multiply_simple<<<num_blocks,block_size>>>(d_a, d_b, d_c, n); cudaMemcpy(&h_c[0], d_c, sizeof(int) * n * n, cudaMemcpyDeviceToHost); //------------------ int* h_r; h_r = (int*)malloc(sizeof(int) * n * n); MatrixMulOnHost(&h_a[0], &h_b[0], h_r, n); for (int i=0; i<(n*n); i++) { if (h_r[i] != h_c[i]) { std::cout << "Failed at i " << i << "h_r=" << h_r[i] << ",h_c=" << h_c[i] << "\n"; exit(1); } } std::cout << "Result is correct."; // deallocate device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
// This example demonstrates the use of shared per-block arrays // implement an optimized dense matrix multiplication algorithm. // Like the shared_variables.cu example, a per-block __shared__ // array acts as a "bandwidth multiplier" by eliminating redundant // loads issued by neighboring threads. #include <stdlib.h> #include <stdio.h> #include <vector> #include <algorithm> #include <iostream> #include "hip/hip_runtime.h" #define TILE_WIDTH 16 // a simple version of matrix_multiply which issues redundant loads from off-chip global memory __global__ void matrix_multiply_simple(int *a, int *b, int *ab, size_t width) { // calculate the row & column index of the element int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; int result = 0; // do dot product between row of a and column of b for(int k = 0; k < width; ++k) { result += a[row*width+k] * b[k*width+col]; } // write out this thread's result ab[row*width+col] = result; } void MatrixMulOnHost(int* M, int* N, int* P, int Width) { for (int i = 0; i < Width; ++i) { for (int j = 0; j < Width; ++j) { double sum = 0; for (int k = 0; k < Width; ++k) { double a = M[i * Width + k]; double b = N[k * Width + j]; sum += a * b; } P[i * Width + j] = sum; } } } int main(void) { // create a large workload so we can easily measure the // performance difference of both implementations // note that n measures the width of the matrix, not the number of total elements //const size_t n = 1<<10; const size_t n = 1024; std::cout << "Total element is " << n << "\n"; const dim3 block_size(TILE_WIDTH,TILE_WIDTH); const dim3 num_blocks(n / block_size.x, n / block_size.y); // generate random input on the host std::vector<int> h_a(n*n), h_b(n*n), h_c(n*n); for(int i = 0; i < n*n; ++i) { h_a[i] = static_cast<int>(rand()) / RAND_MAX; h_b[i] = static_cast<int>(rand()) / RAND_MAX; } // allocate storage for the device int *d_a = 0, *d_b = 0, *d_c = 0; hipMalloc((void**)&d_a, sizeof(int) * n * n); hipMalloc((void**)&d_b, sizeof(int) * n * n); hipMalloc((void**)&d_c, sizeof(int) * n * n); // copy input to the device hipMemcpy(d_a, &h_a[0], sizeof(int) * n * n, hipMemcpyHostToDevice); hipMemcpy(d_b, &h_b[0], sizeof(int) * n * n, hipMemcpyHostToDevice); // to get accurate timings, launch a single "warm-up" kernel matrix_multiply_simple<<<num_blocks,block_size>>>(d_a, d_b, d_c, n); hipMemcpy(&h_c[0], d_c, sizeof(int) * n * n, hipMemcpyDeviceToHost); //------------------ int* h_r; h_r = (int*)malloc(sizeof(int) * n * n); MatrixMulOnHost(&h_a[0], &h_b[0], h_r, n); for (int i=0; i<(n*n); i++) { if (h_r[i] != h_c[i]) { std::cout << "Failed at i " << i << "h_r=" << h_r[i] << ",h_c=" << h_c[i] << "\n"; exit(1); } } std::cout << "Result is correct."; // deallocate device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
Convert the following CUDA code to AMD GPU code: cuda /////// ///////////////////////////////////////////////////////////////////// // Calculate scalar products of VectorN vectors of ElementN elements on CPU. // Straight accumulation in double precision. //////////////////////////////////////////////////////////////////////////// #include <iostream> #include <cmath> using namespace std; void Kernel_5_CPU (int* B, int* Kernel_5_output, int* Kernel_4_output, int* Length_Seq_K4, int K3_Length, int K3_Report, int K3_Safety, int K5R, int K5_Length, int K4_S1, int K4_S2,int K4_S3) { int *Sum, *Center_Seq, *Center_Rev, *Kernel_5_out_shared, *D, *L, *Kernel_5_Temp; int Center_Loc; Sum = new int [K3_Report]; Center_Seq = new int [K5_Length]; Center_Rev = new int [K5_Length]; Kernel_5_out_shared = new int [K5_Length]; D = new int [(K5_Length+1)*(K5_Length+1)]; L = new int [(K5_Length+1)*(K5_Length+1)]; Kernel_5_Temp = new int [K3_Report*K3_Report*K5R]; for (int Sub_Thread=0; Sub_Thread<K5R; Sub_Thread++) { int counter = Sub_Thread*K3_Report*K3_Report; int Loc = counter; for (int i=0; i<K3_Report; i++) { for (int j=i+1; j<K3_Report; j++) { Kernel_5_Temp [i*K3_Report + j + Loc] = Kernel_4_output[counter]; Kernel_5_Temp [j*K3_Report + i + Loc] = Kernel_4_output[counter]; counter++; } } } // ---------------------------- finding summation of each row of matrix (minimum is a Center) -------------------------------- for(int Sub_Block =0; Sub_Block < K5R; Sub_Block ++) { for (int Sub_Thread=0; Sub_Thread<K3_Report; Sub_Thread++) { int iStart = Sub_Thread * K3_Report + Sub_Block * K3_Report * K3_Report; Sum[Sub_Thread] = 0; for (int j=0; j<K3_Report; j++) { Sum[Sub_Thread] = Sum[Sub_Thread] + Kernel_5_Temp [j + iStart] ; } } int Minimum_Sum = 10000; for (int i=0; i<K3_Report; i++) { if (Sum[i]<Minimum_Sum) { Minimum_Sum = Sum[i]; Center_Loc = i; } } //printf("--------- %i %i \n", Minimum_Sum,Center_Loc); //--------------------------------------------- Extract Center Sequence ---------------------------------- for (int i=0; i<K3_Length; i++) { Center_Seq [i]= B[i + Center_Loc * K3_Length * K3_Safety + Sub_Block * K3_Length * K3_Safety * K3_Report]; Center_Rev[i]=0; // printf("--------- %i %i \n", i,Center_Seq [i]); // Kernel_5_Temp[i+K5_Length*K3_Report*Sub_Block]=Center_Seq [i]; } //--------------------------------------------- Initialize D & L arrays ---------------------------------- L[0]=0; for (int i=0; i<K5_Length+1; i++) { D[i] = i * K4_S3; D[i*(K5_Length+1)] = i * K4_S3; if (i>0) { L[i] = i - 1; L[i*(K5_Length+1)] = (i-1)*(K5_Length+1); } } //--------------------------------------------- Start Multiple Alignment ---------------------------------- int Length_Center_Seq = Length_Seq_K4[Center_Loc + Sub_Block * K3_Report]; int Temp[6]; for (int k=0; k<K3_Report; k++) { int B_Loc = k * K3_Safety * K3_Length + Sub_Block * K3_Safety * K3_Length * K3_Report; int End_A = Length_Center_Seq; int End_B = Length_Seq_K4[k + Sub_Block * K3_Report]; for (int i = 0; i<End_B; i++ ) { for (int j = 0; j<End_A; j++) { int D_Sim; int Num = i *(K5_Length+1) + j ; int Num1 = (i+1)*(K5_Length+1) + j ; // First E then F then Similarity if (Center_Seq [j]==B[B_Loc + i]) D_Sim = D[Num]+K4_S1; else D_Sim=D[Num]+K4_S2; Temp[4]= D_Sim; Temp[5]= Num; Temp[0] = D[Num1] + K4_S3; Temp[1] = Num1; Temp[2] = D[Num+1] + K4_S3; Temp[3] = Num+1; int minD =1000; int minL = 0; for (int n=0; n<6; n=n+2) { if (Temp[n]<minD) { minD = Temp[n]; minL = Temp[n+1]; } } D[Num1+1] = minD; L[Num1+1] = minL; } } //------------------------------------------------ Trace Back ----------------------------------------------------------------------- int Loc_Temp = (End_B)*(K5_Length+1) + End_A; int Loc_Path = Loc_Temp; int Check = 0; int Index_A = Length_Center_Seq-1; int Index_B = B_Loc + End_B-1; int cnt = 0; int update = 0; while (Loc_Path != Check) { Loc_Path = L[Loc_Temp]; int Dif = Loc_Temp - Loc_Path; if (Dif==(K5_Length+1+1)) { Center_Rev [cnt] = Center_Seq [Index_A]; Kernel_5_output[cnt + K5_Length*k + K5_Length*K3_Report*Sub_Block] = B[Index_B]; Index_B -=1; Index_A -=1; } else { if (Dif==(K5_Length+1)) { Center_Rev [cnt] = 666; Kernel_5_output[cnt + K5_Length*k + K5_Length*K3_Report*Sub_Block] = B[Index_B]; Index_B -=1; update=1; // update previous sequences for (int j=0; j<k; j++) { for (int i=cnt; i<K5_Length; i++) { Kernel_5_out_shared[i] = Kernel_5_output[i + K5_Length*j + K5_Length*K3_Report*Sub_Block]; } Kernel_5_output[cnt + K5_Length*j + K5_Length*K3_Report*Sub_Block] = 666; for (int i=cnt+1; i<K5_Length; i++) { Kernel_5_output[i + K5_Length*j + K5_Length*K3_Report*Sub_Block] = Kernel_5_out_shared[i-1] ; } } } else { Center_Rev [cnt] = Center_Seq [Index_A]; Kernel_5_output[cnt + K5_Length*k + K5_Length*K3_Report*Sub_Block] = 666; Index_A -=1; } } Loc_Temp = Loc_Path; cnt++; } // While End if (update == 1) { Length_Center_Seq = cnt; for (int i=0; i<Length_Center_Seq; i++) { Center_Seq [i] = Center_Rev [Length_Center_Seq-i-1]; // Kernel_5_output2[i+ K5_Length*k + K5_Length*K3_Report*Sub_Block]=Center_Rev [i]; } } } // End K (Report) // Bring Center Sequence to the first of array for (int i=0; i<K5_Length; i++) { int Tmp = Kernel_5_output[i + K5_Length*0 + K5_Length*K3_Report*Sub_Block]; Kernel_5_output[i + K5_Length*0 + K5_Length*K3_Report*Sub_Block] = Center_Rev [i]; Kernel_5_output[i + K5_Length*Center_Loc + K5_Length*K3_Report*Sub_Block] = Tmp; } } for (int Sub_Thread=0; Sub_Thread<(K3_Report*K3_Report*K5R); Sub_Thread++) Kernel_4_output [Sub_Thread] = Kernel_5_Temp[Sub_Thread]; }
/////// ///////////////////////////////////////////////////////////////////// // Calculate scalar products of VectorN vectors of ElementN elements on CPU. // Straight accumulation in double precision. //////////////////////////////////////////////////////////////////////////// #include <hip/hip_runtime.h> #include <iostream> #include <cmath> using namespace std; void Kernel_5_CPU (int* B, int* Kernel_5_output, int* Kernel_4_output, int* Length_Seq_K4, int K3_Length, int K3_Report, int K3_Safety, int K5R, int K5_Length, int K4_S1, int K4_S2,int K4_S3) { int *Sum, *Center_Seq, *Center_Rev, *Kernel_5_out_shared, *D, *L, *Kernel_5_Temp; int Center_Loc; Sum = new int [K3_Report]; Center_Seq = new int [K5_Length]; Center_Rev = new int [K5_Length]; Kernel_5_out_shared = new int [K5_Length]; D = new int [(K5_Length+1)*(K5_Length+1)]; L = new int [(K5_Length+1)*(K5_Length+1)]; Kernel_5_Temp = new int [K3_Report*K3_Report*K5R]; for (int Sub_Thread=0; Sub_Thread<K5R; Sub_Thread++) { int counter = Sub_Thread*K3_Report*K3_Report; int Loc = counter; for (int i=0; i<K3_Report; i++) { for (int j=i+1; j<K3_Report; j++) { Kernel_5_Temp [i*K3_Report + j + Loc] = Kernel_4_output[counter]; Kernel_5_Temp [j*K3_Report + i + Loc] = Kernel_4_output[counter]; counter++; } } } // ---------------------------- finding summation of each row of matrix (minimum is a Center) -------------------------------- for(int Sub_Block =0; Sub_Block < K5R; Sub_Block ++) { for (int Sub_Thread=0; Sub_Thread<K3_Report; Sub_Thread++) { int iStart = Sub_Thread * K3_Report + Sub_Block * K3_Report * K3_Report; Sum[Sub_Thread] = 0; for (int j=0; j<K3_Report; j++) { Sum[Sub_Thread] = Sum[Sub_Thread] + Kernel_5_Temp [j + iStart] ; } } int Minimum_Sum = 10000; for (int i=0; i<K3_Report; i++) { if (Sum[i]<Minimum_Sum) { Minimum_Sum = Sum[i]; Center_Loc = i; } } //printf("--------- %i %i \n", Minimum_Sum,Center_Loc); //--------------------------------------------- Extract Center Sequence ---------------------------------- for (int i=0; i<K3_Length; i++) { Center_Seq [i]= B[i + Center_Loc * K3_Length * K3_Safety + Sub_Block * K3_Length * K3_Safety * K3_Report]; Center_Rev[i]=0; // printf("--------- %i %i \n", i,Center_Seq [i]); // Kernel_5_Temp[i+K5_Length*K3_Report*Sub_Block]=Center_Seq [i]; } //--------------------------------------------- Initialize D & L arrays ---------------------------------- L[0]=0; for (int i=0; i<K5_Length+1; i++) { D[i] = i * K4_S3; D[i*(K5_Length+1)] = i * K4_S3; if (i>0) { L[i] = i - 1; L[i*(K5_Length+1)] = (i-1)*(K5_Length+1); } } //--------------------------------------------- Start Multiple Alignment ---------------------------------- int Length_Center_Seq = Length_Seq_K4[Center_Loc + Sub_Block * K3_Report]; int Temp[6]; for (int k=0; k<K3_Report; k++) { int B_Loc = k * K3_Safety * K3_Length + Sub_Block * K3_Safety * K3_Length * K3_Report; int End_A = Length_Center_Seq; int End_B = Length_Seq_K4[k + Sub_Block * K3_Report]; for (int i = 0; i<End_B; i++ ) { for (int j = 0; j<End_A; j++) { int D_Sim; int Num = i *(K5_Length+1) + j ; int Num1 = (i+1)*(K5_Length+1) + j ; // First E then F then Similarity if (Center_Seq [j]==B[B_Loc + i]) D_Sim = D[Num]+K4_S1; else D_Sim=D[Num]+K4_S2; Temp[4]= D_Sim; Temp[5]= Num; Temp[0] = D[Num1] + K4_S3; Temp[1] = Num1; Temp[2] = D[Num+1] + K4_S3; Temp[3] = Num+1; int minD =1000; int minL = 0; for (int n=0; n<6; n=n+2) { if (Temp[n]<minD) { minD = Temp[n]; minL = Temp[n+1]; } } D[Num1+1] = minD; L[Num1+1] = minL; } } //------------------------------------------------ Trace Back ----------------------------------------------------------------------- int Loc_Temp = (End_B)*(K5_Length+1) + End_A; int Loc_Path = Loc_Temp; int Check = 0; int Index_A = Length_Center_Seq-1; int Index_B = B_Loc + End_B-1; int cnt = 0; int update = 0; while (Loc_Path != Check) { Loc_Path = L[Loc_Temp]; int Dif = Loc_Temp - Loc_Path; if (Dif==(K5_Length+1+1)) { Center_Rev [cnt] = Center_Seq [Index_A]; Kernel_5_output[cnt + K5_Length*k + K5_Length*K3_Report*Sub_Block] = B[Index_B]; Index_B -=1; Index_A -=1; } else { if (Dif==(K5_Length+1)) { Center_Rev [cnt] = 666; Kernel_5_output[cnt + K5_Length*k + K5_Length*K3_Report*Sub_Block] = B[Index_B]; Index_B -=1; update=1; // update previous sequences for (int j=0; j<k; j++) { for (int i=cnt; i<K5_Length; i++) { Kernel_5_out_shared[i] = Kernel_5_output[i + K5_Length*j + K5_Length*K3_Report*Sub_Block]; } Kernel_5_output[cnt + K5_Length*j + K5_Length*K3_Report*Sub_Block] = 666; for (int i=cnt+1; i<K5_Length; i++) { Kernel_5_output[i + K5_Length*j + K5_Length*K3_Report*Sub_Block] = Kernel_5_out_shared[i-1] ; } } } else { Center_Rev [cnt] = Center_Seq [Index_A]; Kernel_5_output[cnt + K5_Length*k + K5_Length*K3_Report*Sub_Block] = 666; Index_A -=1; } } Loc_Temp = Loc_Path; cnt++; } // While End if (update == 1) { Length_Center_Seq = cnt; for (int i=0; i<Length_Center_Seq; i++) { Center_Seq [i] = Center_Rev [Length_Center_Seq-i-1]; // Kernel_5_output2[i+ K5_Length*k + K5_Length*K3_Report*Sub_Block]=Center_Rev [i]; } } } // End K (Report) // Bring Center Sequence to the first of array for (int i=0; i<K5_Length; i++) { int Tmp = Kernel_5_output[i + K5_Length*0 + K5_Length*K3_Report*Sub_Block]; Kernel_5_output[i + K5_Length*0 + K5_Length*K3_Report*Sub_Block] = Center_Rev [i]; Kernel_5_output[i + K5_Length*Center_Loc + K5_Length*K3_Report*Sub_Block] = Tmp; } } for (int Sub_Thread=0; Sub_Thread<(K3_Report*K3_Report*K5R); Sub_Thread++) Kernel_4_output [Sub_Thread] = Kernel_5_Temp[Sub_Thread]; }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void Kernel(int* a,int* b,int *c,int n){ int i = blockIdx.x*blockDim.x + threadIdx.x; __shared__ extern int shared_mem[]; int reg; if(i>= n) return; reg = a[i] + b[i]; shared_mem[i] = reg; c[i] = shared_mem[i]; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void Kernel(int* a,int* b,int *c,int n){ int i = blockIdx.x*blockDim.x + threadIdx.x; __shared__ extern int shared_mem[]; int reg; if(i>= n) return; reg = a[i] + b[i]; shared_mem[i] = reg; c[i] = shared_mem[i]; }
Convert the following CUDA code to AMD GPU code: cuda #include "sha5.cuh" #include <string.h> __device__ static const uint64_t rhash_k512[80] = { I64(0x428a2f98d728ae22), I64(0x7137449123ef65cd), I64(0xb5c0fbcfec4d3b2f), I64(0xe9b5dba58189dbbc), I64(0x3956c25bf348b538), I64(0x59f111f1b605d019), I64(0x923f82a4af194f9b), I64(0xab1c5ed5da6d8118), I64(0xd807aa98a3030242), I64(0x12835b0145706fbe), I64(0x243185be4ee4b28c), I64(0x550c7dc3d5ffb4e2), I64(0x72be5d74f27b896f), I64(0x80deb1fe3b1696b1), I64(0x9bdc06a725c71235), I64(0xc19bf174cf692694), I64(0xe49b69c19ef14ad2), I64(0xefbe4786384f25e3), I64(0x0fc19dc68b8cd5b5), I64(0x240ca1cc77ac9c65), I64(0x2de92c6f592b0275), I64(0x4a7484aa6ea6e483), I64(0x5cb0a9dcbd41fbd4), I64(0x76f988da831153b5), I64(0x983e5152ee66dfab), I64(0xa831c66d2db43210), I64(0xb00327c898fb213f), I64(0xbf597fc7beef0ee4), I64(0xc6e00bf33da88fc2), I64(0xd5a79147930aa725), I64(0x06ca6351e003826f), I64(0x142929670a0e6e70), I64(0x27b70a8546d22ffc), I64(0x2e1b21385c26c926), I64(0x4d2c6dfc5ac42aed), I64(0x53380d139d95b3df), I64(0x650a73548baf63de), I64(0x766a0abb3c77b2a8), I64(0x81c2c92e47edaee6), I64(0x92722c851482353b), I64(0xa2bfe8a14cf10364), I64(0xa81a664bbc423001), I64(0xc24b8b70d0f89791), I64(0xc76c51a30654be30), I64(0xd192e819d6ef5218), I64(0xd69906245565a910), I64(0xf40e35855771202a), I64(0x106aa07032bbd1b8), I64(0x19a4c116b8d2d0c8), I64(0x1e376c085141ab53), I64(0x2748774cdf8eeb99), I64(0x34b0bcb5e19b48a8), I64(0x391c0cb3c5c95a63), I64(0x4ed8aa4ae3418acb), I64(0x5b9cca4f7763e373), I64(0x682e6ff3d6b2b8a3), I64(0x748f82ee5defb2fc), I64(0x78a5636f43172f60), I64(0x84c87814a1f0ab72), I64(0x8cc702081a6439ec), I64(0x90befffa23631e28), I64(0xa4506cebde82bde9), I64(0xbef9a3f7b2c67915), I64(0xc67178f2e372532b), I64(0xca273eceea26619c), I64(0xd186b8c721c0c207), I64(0xeada7dd6cde0eb1e), I64(0xf57d4f7fee6ed178), I64(0x06f067aa72176fba), I64(0x0a637dc5a2c898a6), I64(0x113f9804bef90dae), I64(0x1b710b35131c471b), I64(0x28db77f523047d84), I64(0x32caab7b40c72493), I64(0x3c9ebe0a15c9bebc), I64(0x431d67c49c100d4c), I64(0x4cc5d4becb3e42b6), I64(0x597f299cfc657e2a), I64(0x5fcb6fab3ad6faec), I64(0x6c44198c4a475817) }; /* The SHA512/384 functions defined by FIPS 180-3, 4.1.3 */ /* Optimized version of Ch(x,y,z)=((x & y) | (~x & z)) */ #define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* Optimized version of Maj(x,y,z)=((x & y) ^ (x & z) ^ (y & z)) */ #define Maj(x,y,z) (((x) & (y)) ^ ((z) & ((x) ^ (y)))) #define Sigma0(x) (ROTR64((x), 28) ^ ROTR64((x), 34) ^ ROTR64((x), 39)) #define Sigma1(x) (ROTR64((x), 14) ^ ROTR64((x), 18) ^ ROTR64((x), 41)) #define sigma0(x) (ROTR64((x), 1) ^ ROTR64((x), 8) ^ ((x) >> 7)) #define sigma1(x) (ROTR64((x), 19) ^ ROTR64((x), 61) ^ ((x) >> 6)) /* Recalculate element n-th of circular buffer W using formula * W[n] = sigma1(W[n - 2]) + W[n - 7] + sigma0(W[n - 15]) + W[n - 16]; */ #define RECALCULATE_W(W,n) (W[n] += \ (sigma1(W[(n - 2) & 15]) + W[(n - 7) & 15] + sigma0(W[(n - 15) & 15]))) #define ROUND(a,b,c,d,e,f,g,h,k,data) { \ uint64_t T1 = h + Sigma1(e) + Ch(e,f,g) + k + (data); \ d += T1, h = T1 + Sigma0(a) + Maj(a,b,c); } #define ROUND_1_16(a,b,c,d,e,f,g,h,n) \ ROUND(a,b,c,d,e,f,g,h, rhash_k512[n], W[n] = be2me_64(block[n])) #define ROUND_17_80(a,b,c,d,e,f,g,h,n) \ ROUND(a,b,c,d,e,f,g,h, k[n], RECALCULATE_W(W, n)) /** * Initialize context before calculating hash. * * @param ctx context to initialize */ __device__ void rhash_sha512_init(sha512_ctx *ctx) { /* Initial values. These words were obtained by taking the first 32 * bits of the fractional parts of the square roots of the first * eight prime numbers. */ static const uint64_t SHA512_H0[8] = { I64(0x6a09e667f3bcc908), I64(0xbb67ae8584caa73b), I64(0x3c6ef372fe94f82b), I64(0xa54ff53a5f1d36f1), I64(0x510e527fade682d1), I64(0x9b05688c2b3e6c1f), I64(0x1f83d9abfb41bd6b), I64(0x5be0cd19137e2179) }; ctx->length = 0; ctx->digest_length = sha512_hash_size; /* initialize algorithm state */ memcpy(ctx->hash, SHA512_H0, sizeof(ctx->hash)); } /** * The core transformation. Process a 512-bit block. * * @param hash algorithm state * @param block the message block to process */ __device__ static void rhash_sha512_process_block(uint64_t hash[8], uint64_t block[16]) { uint64_t A, B, C, D, E, F, G, H; uint64_t W[16]; const uint64_t *k; int i; A = hash[0], B = hash[1], C = hash[2], D = hash[3]; E = hash[4], F = hash[5], G = hash[6], H = hash[7]; /* Compute SHA using alternate Method: FIPS 180-3 6.1.3 */ ROUND_1_16(A, B, C, D, E, F, G, H, 0); ROUND_1_16(H, A, B, C, D, E, F, G, 1); ROUND_1_16(G, H, A, B, C, D, E, F, 2); ROUND_1_16(F, G, H, A, B, C, D, E, 3); ROUND_1_16(E, F, G, H, A, B, C, D, 4); ROUND_1_16(D, E, F, G, H, A, B, C, 5); ROUND_1_16(C, D, E, F, G, H, A, B, 6); ROUND_1_16(B, C, D, E, F, G, H, A, 7); ROUND_1_16(A, B, C, D, E, F, G, H, 8); ROUND_1_16(H, A, B, C, D, E, F, G, 9); ROUND_1_16(G, H, A, B, C, D, E, F, 10); ROUND_1_16(F, G, H, A, B, C, D, E, 11); ROUND_1_16(E, F, G, H, A, B, C, D, 12); ROUND_1_16(D, E, F, G, H, A, B, C, 13); ROUND_1_16(C, D, E, F, G, H, A, B, 14); ROUND_1_16(B, C, D, E, F, G, H, A, 15); for (i = 16, k = &rhash_k512[16]; i < 80; i += 16, k += 16) { ROUND_17_80(A, B, C, D, E, F, G, H, 0); ROUND_17_80(H, A, B, C, D, E, F, G, 1); ROUND_17_80(G, H, A, B, C, D, E, F, 2); ROUND_17_80(F, G, H, A, B, C, D, E, 3); ROUND_17_80(E, F, G, H, A, B, C, D, 4); ROUND_17_80(D, E, F, G, H, A, B, C, 5); ROUND_17_80(C, D, E, F, G, H, A, B, 6); ROUND_17_80(B, C, D, E, F, G, H, A, 7); ROUND_17_80(A, B, C, D, E, F, G, H, 8); ROUND_17_80(H, A, B, C, D, E, F, G, 9); ROUND_17_80(G, H, A, B, C, D, E, F, 10); ROUND_17_80(F, G, H, A, B, C, D, E, 11); ROUND_17_80(E, F, G, H, A, B, C, D, 12); ROUND_17_80(D, E, F, G, H, A, B, C, 13); ROUND_17_80(C, D, E, F, G, H, A, B, 14); ROUND_17_80(B, C, D, E, F, G, H, A, 15); } hash[0] += A, hash[1] += B, hash[2] += C, hash[3] += D; hash[4] += E, hash[5] += F, hash[6] += G, hash[7] += H; } /** * Calculate message hash. * Can be called repeatedly with chunks of the message to be hashed. * * @param ctx the algorithm context containing current hashing state * @param msg message chunk * @param size length of the message chunk */ __device__ void rhash_sha512_update(sha512_ctx *ctx, const unsigned char *msg, size_t size) { size_t index = (size_t)ctx->length & 127; ctx->length += size; /* fill partial block */ if (index) { size_t left = sha512_block_size - index; memcpy((char*)ctx->message + index, msg, (size < left ? size : left)); if (size < left) return; /* process partial block */ rhash_sha512_process_block(ctx->hash, ctx->message); msg += left; size -= left; } while (size >= sha512_block_size) { uint64_t* aligned_message_block; if (IS_ALIGNED_64(msg)) { /* the most common case is processing of an already aligned message without copying it */ aligned_message_block = (uint64_t*)msg; } else { memcpy(ctx->message, msg, sha512_block_size); aligned_message_block = ctx->message; } rhash_sha512_process_block(ctx->hash, aligned_message_block); msg += sha512_block_size; size -= sha512_block_size; } if (size) { memcpy(ctx->message, msg, size); /* save leftovers */ } } /** * Store calculated hash into the given array. * * @param ctx the algorithm context containing current hashing state * @param result calculated hash in binary form */ __device__ void rhash_sha512_final(sha512_ctx *ctx, unsigned char* result) { size_t index = ((unsigned)ctx->length & 127) >> 3; unsigned shift = ((unsigned)ctx->length & 7) * 8; /* pad message and process the last block */ /* append the byte 0x80 to the message */ ctx->message[index] &= le2me_64( ~(I64(0xFFFFFFFFFFFFFFFF) << shift) ); ctx->message[index++] ^= le2me_64( I64(0x80) << shift ); /* if no room left in the message to store 128-bit message length */ if (index >= 15) { if (index == 15) ctx->message[index] = 0; rhash_sha512_process_block(ctx->hash, ctx->message); index = 0; } while (index < 15) { ctx->message[index++] = 0; } ctx->message[15] = be2me_64(ctx->length << 3); rhash_sha512_process_block(ctx->hash, ctx->message); if (result) be64_copy(result, 0, ctx->hash, ctx->digest_length); } __device__ void rhash_swap_copy_str_to_u64(void* to, int index, const void* from, size_t length) { /* if all pointers and length are 64-bits aligned */ if ( 0 == (( (int)((char*)to - (char*)0) | ((char*)from - (char*)0) | index | length ) & 7) ) { /* copy aligned memory block as 64-bit integers */ const uint64_t* src = (const uint64_t*)from; const uint64_t* end = (const uint64_t*)((const char*)src + length); uint64_t* dst = (uint64_t*)((char*)to + index); while (src < end) *(dst++) = bswap_64( *(src++) ); } else { const char* src = (const char*)from; for (length += index; (size_t)index < length; index++) ((char*)to)[index ^ 7] = *(src++); } } __device__ void SHA512(void *digest,const void *data,unsigned int len) { sha512_ctx ctx; rhash_sha512_init (&ctx); rhash_sha512_update (&ctx, (unsigned char *)data, len); rhash_sha512_final (&ctx, (unsigned char *)digest); }
#include <hip/hip_runtime.h> #include "sha5.cuh" #include <string.h> __device__ static const uint64_t rhash_k512[80] = { I64(0x428a2f98d728ae22), I64(0x7137449123ef65cd), I64(0xb5c0fbcfec4d3b2f), I64(0xe9b5dba58189dbbc), I64(0x3956c25bf348b538), I64(0x59f111f1b605d019), I64(0x923f82a4af194f9b), I64(0xab1c5ed5da6d8118), I64(0xd807aa98a3030242), I64(0x12835b0145706fbe), I64(0x243185be4ee4b28c), I64(0x550c7dc3d5ffb4e2), I64(0x72be5d74f27b896f), I64(0x80deb1fe3b1696b1), I64(0x9bdc06a725c71235), I64(0xc19bf174cf692694), I64(0xe49b69c19ef14ad2), I64(0xefbe4786384f25e3), I64(0x0fc19dc68b8cd5b5), I64(0x240ca1cc77ac9c65), I64(0x2de92c6f592b0275), I64(0x4a7484aa6ea6e483), I64(0x5cb0a9dcbd41fbd4), I64(0x76f988da831153b5), I64(0x983e5152ee66dfab), I64(0xa831c66d2db43210), I64(0xb00327c898fb213f), I64(0xbf597fc7beef0ee4), I64(0xc6e00bf33da88fc2), I64(0xd5a79147930aa725), I64(0x06ca6351e003826f), I64(0x142929670a0e6e70), I64(0x27b70a8546d22ffc), I64(0x2e1b21385c26c926), I64(0x4d2c6dfc5ac42aed), I64(0x53380d139d95b3df), I64(0x650a73548baf63de), I64(0x766a0abb3c77b2a8), I64(0x81c2c92e47edaee6), I64(0x92722c851482353b), I64(0xa2bfe8a14cf10364), I64(0xa81a664bbc423001), I64(0xc24b8b70d0f89791), I64(0xc76c51a30654be30), I64(0xd192e819d6ef5218), I64(0xd69906245565a910), I64(0xf40e35855771202a), I64(0x106aa07032bbd1b8), I64(0x19a4c116b8d2d0c8), I64(0x1e376c085141ab53), I64(0x2748774cdf8eeb99), I64(0x34b0bcb5e19b48a8), I64(0x391c0cb3c5c95a63), I64(0x4ed8aa4ae3418acb), I64(0x5b9cca4f7763e373), I64(0x682e6ff3d6b2b8a3), I64(0x748f82ee5defb2fc), I64(0x78a5636f43172f60), I64(0x84c87814a1f0ab72), I64(0x8cc702081a6439ec), I64(0x90befffa23631e28), I64(0xa4506cebde82bde9), I64(0xbef9a3f7b2c67915), I64(0xc67178f2e372532b), I64(0xca273eceea26619c), I64(0xd186b8c721c0c207), I64(0xeada7dd6cde0eb1e), I64(0xf57d4f7fee6ed178), I64(0x06f067aa72176fba), I64(0x0a637dc5a2c898a6), I64(0x113f9804bef90dae), I64(0x1b710b35131c471b), I64(0x28db77f523047d84), I64(0x32caab7b40c72493), I64(0x3c9ebe0a15c9bebc), I64(0x431d67c49c100d4c), I64(0x4cc5d4becb3e42b6), I64(0x597f299cfc657e2a), I64(0x5fcb6fab3ad6faec), I64(0x6c44198c4a475817) }; /* The SHA512/384 functions defined by FIPS 180-3, 4.1.3 */ /* Optimized version of Ch(x,y,z)=((x & y) | (~x & z)) */ #define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) /* Optimized version of Maj(x,y,z)=((x & y) ^ (x & z) ^ (y & z)) */ #define Maj(x,y,z) (((x) & (y)) ^ ((z) & ((x) ^ (y)))) #define Sigma0(x) (ROTR64((x), 28) ^ ROTR64((x), 34) ^ ROTR64((x), 39)) #define Sigma1(x) (ROTR64((x), 14) ^ ROTR64((x), 18) ^ ROTR64((x), 41)) #define sigma0(x) (ROTR64((x), 1) ^ ROTR64((x), 8) ^ ((x) >> 7)) #define sigma1(x) (ROTR64((x), 19) ^ ROTR64((x), 61) ^ ((x) >> 6)) /* Recalculate element n-th of circular buffer W using formula * W[n] = sigma1(W[n - 2]) + W[n - 7] + sigma0(W[n - 15]) + W[n - 16]; */ #define RECALCULATE_W(W,n) (W[n] += \ (sigma1(W[(n - 2) & 15]) + W[(n - 7) & 15] + sigma0(W[(n - 15) & 15]))) #define ROUND(a,b,c,d,e,f,g,h,k,data) { \ uint64_t T1 = h + Sigma1(e) + Ch(e,f,g) + k + (data); \ d += T1, h = T1 + Sigma0(a) + Maj(a,b,c); } #define ROUND_1_16(a,b,c,d,e,f,g,h,n) \ ROUND(a,b,c,d,e,f,g,h, rhash_k512[n], W[n] = be2me_64(block[n])) #define ROUND_17_80(a,b,c,d,e,f,g,h,n) \ ROUND(a,b,c,d,e,f,g,h, k[n], RECALCULATE_W(W, n)) /** * Initialize context before calculating hash. * * @param ctx context to initialize */ __device__ void rhash_sha512_init(sha512_ctx *ctx) { /* Initial values. These words were obtained by taking the first 32 * bits of the fractional parts of the square roots of the first * eight prime numbers. */ static const uint64_t SHA512_H0[8] = { I64(0x6a09e667f3bcc908), I64(0xbb67ae8584caa73b), I64(0x3c6ef372fe94f82b), I64(0xa54ff53a5f1d36f1), I64(0x510e527fade682d1), I64(0x9b05688c2b3e6c1f), I64(0x1f83d9abfb41bd6b), I64(0x5be0cd19137e2179) }; ctx->length = 0; ctx->digest_length = sha512_hash_size; /* initialize algorithm state */ memcpy(ctx->hash, SHA512_H0, sizeof(ctx->hash)); } /** * The core transformation. Process a 512-bit block. * * @param hash algorithm state * @param block the message block to process */ __device__ static void rhash_sha512_process_block(uint64_t hash[8], uint64_t block[16]) { uint64_t A, B, C, D, E, F, G, H; uint64_t W[16]; const uint64_t *k; int i; A = hash[0], B = hash[1], C = hash[2], D = hash[3]; E = hash[4], F = hash[5], G = hash[6], H = hash[7]; /* Compute SHA using alternate Method: FIPS 180-3 6.1.3 */ ROUND_1_16(A, B, C, D, E, F, G, H, 0); ROUND_1_16(H, A, B, C, D, E, F, G, 1); ROUND_1_16(G, H, A, B, C, D, E, F, 2); ROUND_1_16(F, G, H, A, B, C, D, E, 3); ROUND_1_16(E, F, G, H, A, B, C, D, 4); ROUND_1_16(D, E, F, G, H, A, B, C, 5); ROUND_1_16(C, D, E, F, G, H, A, B, 6); ROUND_1_16(B, C, D, E, F, G, H, A, 7); ROUND_1_16(A, B, C, D, E, F, G, H, 8); ROUND_1_16(H, A, B, C, D, E, F, G, 9); ROUND_1_16(G, H, A, B, C, D, E, F, 10); ROUND_1_16(F, G, H, A, B, C, D, E, 11); ROUND_1_16(E, F, G, H, A, B, C, D, 12); ROUND_1_16(D, E, F, G, H, A, B, C, 13); ROUND_1_16(C, D, E, F, G, H, A, B, 14); ROUND_1_16(B, C, D, E, F, G, H, A, 15); for (i = 16, k = &rhash_k512[16]; i < 80; i += 16, k += 16) { ROUND_17_80(A, B, C, D, E, F, G, H, 0); ROUND_17_80(H, A, B, C, D, E, F, G, 1); ROUND_17_80(G, H, A, B, C, D, E, F, 2); ROUND_17_80(F, G, H, A, B, C, D, E, 3); ROUND_17_80(E, F, G, H, A, B, C, D, 4); ROUND_17_80(D, E, F, G, H, A, B, C, 5); ROUND_17_80(C, D, E, F, G, H, A, B, 6); ROUND_17_80(B, C, D, E, F, G, H, A, 7); ROUND_17_80(A, B, C, D, E, F, G, H, 8); ROUND_17_80(H, A, B, C, D, E, F, G, 9); ROUND_17_80(G, H, A, B, C, D, E, F, 10); ROUND_17_80(F, G, H, A, B, C, D, E, 11); ROUND_17_80(E, F, G, H, A, B, C, D, 12); ROUND_17_80(D, E, F, G, H, A, B, C, 13); ROUND_17_80(C, D, E, F, G, H, A, B, 14); ROUND_17_80(B, C, D, E, F, G, H, A, 15); } hash[0] += A, hash[1] += B, hash[2] += C, hash[3] += D; hash[4] += E, hash[5] += F, hash[6] += G, hash[7] += H; } /** * Calculate message hash. * Can be called repeatedly with chunks of the message to be hashed. * * @param ctx the algorithm context containing current hashing state * @param msg message chunk * @param size length of the message chunk */ __device__ void rhash_sha512_update(sha512_ctx *ctx, const unsigned char *msg, size_t size) { size_t index = (size_t)ctx->length & 127; ctx->length += size; /* fill partial block */ if (index) { size_t left = sha512_block_size - index; memcpy((char*)ctx->message + index, msg, (size < left ? size : left)); if (size < left) return; /* process partial block */ rhash_sha512_process_block(ctx->hash, ctx->message); msg += left; size -= left; } while (size >= sha512_block_size) { uint64_t* aligned_message_block; if (IS_ALIGNED_64(msg)) { /* the most common case is processing of an already aligned message without copying it */ aligned_message_block = (uint64_t*)msg; } else { memcpy(ctx->message, msg, sha512_block_size); aligned_message_block = ctx->message; } rhash_sha512_process_block(ctx->hash, aligned_message_block); msg += sha512_block_size; size -= sha512_block_size; } if (size) { memcpy(ctx->message, msg, size); /* save leftovers */ } } /** * Store calculated hash into the given array. * * @param ctx the algorithm context containing current hashing state * @param result calculated hash in binary form */ __device__ void rhash_sha512_final(sha512_ctx *ctx, unsigned char* result) { size_t index = ((unsigned)ctx->length & 127) >> 3; unsigned shift = ((unsigned)ctx->length & 7) * 8; /* pad message and process the last block */ /* append the byte 0x80 to the message */ ctx->message[index] &= le2me_64( ~(I64(0xFFFFFFFFFFFFFFFF) << shift) ); ctx->message[index++] ^= le2me_64( I64(0x80) << shift ); /* if no room left in the message to store 128-bit message length */ if (index >= 15) { if (index == 15) ctx->message[index] = 0; rhash_sha512_process_block(ctx->hash, ctx->message); index = 0; } while (index < 15) { ctx->message[index++] = 0; } ctx->message[15] = be2me_64(ctx->length << 3); rhash_sha512_process_block(ctx->hash, ctx->message); if (result) be64_copy(result, 0, ctx->hash, ctx->digest_length); } __device__ void rhash_swap_copy_str_to_u64(void* to, int index, const void* from, size_t length) { /* if all pointers and length are 64-bits aligned */ if ( 0 == (( (int)((char*)to - (char*)0) | ((char*)from - (char*)0) | index | length ) & 7) ) { /* copy aligned memory block as 64-bit integers */ const uint64_t* src = (const uint64_t*)from; const uint64_t* end = (const uint64_t*)((const char*)src + length); uint64_t* dst = (uint64_t*)((char*)to + index); while (src < end) *(dst++) = bswap_64( *(src++) ); } else { const char* src = (const char*)from; for (length += index; (size_t)index < length; index++) ((char*)to)[index ^ 7] = *(src++); } } __device__ void SHA512(void *digest,const void *data,unsigned int len) { sha512_ctx ctx; rhash_sha512_init (&ctx); rhash_sha512_update (&ctx, (unsigned char *)data, len); rhash_sha512_final (&ctx, (unsigned char *)digest); }
Convert the following CUDA code to AMD GPU code: cuda /* * gapped_extender_gpu_ref.cu * * Created on: 2014/08/23 * Author: shu */ #ifndef GAPPED_EXTENDER_GPU_REF_CU_ #define GAPPED_EXTENDER_GPU_REF_CU_ /* #include "gapped_extender_gpu.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/system/cuda/experimental/pinned_allocator.h> #include <thrust/copy.h> #include <vector> #include <assert.h> #include "score_matrix.h" #include "cuda_common.h" using namespace std; __device__ int InitCachedSequence(const AlphabetCoder::Code* sequence, const AlphabetCoder::Code sequence_delimiter, int increment, AlphabetCoder::Code* cached_sequence) { AlphabetCoder::Code c = 0; int p = 0; for (c = 0, p = 0; c != sequence_delimiter; p += increment) { c = sequence[p]; cached_sequence[p] = c; } return 0; } __device__ int InitScoreArray(const AlphabetCoder::Code* sequence, const AlphabetCoder::Code sequence_delimiter, int gap_init, int gap_extention, int default_cutoff, int increment, GappedExtenderGpu::DpCell* score_array) { int score = -gap_init; score_array[0].best = 0; score_array[0].best_gap = -gap_init; int sequence_position = 0; int array_index = 0; int cutoff = default_cutoff; if (cutoff < gap_init) { cutoff = gap_init; } for (array_index = 1; sequence[sequence_position] != sequence_delimiter; ++array_index, sequence_position += increment) { if (score < -cutoff) { break; } score_array[array_index].best = score; score_array[array_index].best_gap = score - gap_extention; score -= gap_extention; } return array_index; } __device__ int UpdateDpCells(const AlphabetCoder::Code* sequence0, const AlphabetCoder::Code s1_c, const int sequence1_position, const AlphabetCoder::Code sequence_delimiter, int increment, int cutoff, const int *score_matrix_row, const int gap_init, const int gap_extention, GappedExtenderGpu::DpCell* score_array, int *array_start_ptr, int *array_end_ptr, int* max_score_sequence0_position, int* max_score_sequence1_position, int* max_score_ptr) { int score = GappedExtenderGpu::kInitScore; int score_gap_row = GappedExtenderGpu::kInitScore; int max_score = *max_score_ptr; int array_start = *array_start_ptr; int array_end = *array_end_ptr; int sequence0_position = array_start * increment; int prev_score = score_array[array_start].best; score_array[array_start].best = score_array[array_start].best_gap; score_array[array_start].best_gap -= gap_extention; int array_last_index = array_start; for (int array_index = array_start + 1; array_index < array_end; ++array_index, sequence0_position += increment) { score = prev_score + score_matrix_row[sequence0[sequence0_position]]; prev_score = score_array[array_index].best; int score_gap_column = score_array[array_index].best_gap; score = score < score_gap_column ? score_gap_column : score; score = score < score_gap_row ? score_gap_row : score; if (max_score - score > cutoff) { array_start += array_start + 1 == array_index ? 1 : 0; score_array[array_index].best = GappedExtenderGpu::kInitScore; score_array[array_index].best_gap = GappedExtenderGpu::kInitScore; score_gap_row = GappedExtenderGpu::kInitScore; } else { array_last_index = array_index; if (score > max_score) { max_score = score; *max_score_sequence0_position = sequence0_position; *max_score_sequence1_position = sequence1_position; } score_array[array_index].best_gap = max(score - gap_init, score_gap_column - gap_extention); score_gap_row = max(score - gap_init, score_gap_row - gap_extention); score_array[array_index].best = score; } } if (array_start + 1 != array_end) { if (array_last_index < array_end - 1) { array_end = array_last_index + 1; } else { while (score_gap_row >= (max_score - cutoff) && sequence0[sequence0_position] != sequence_delimiter) { score_array[array_end].best = score_gap_row; score_array[array_end].best_gap = score_gap_row - gap_init; score_gap_row -= gap_extention; ++array_end; sequence0_position += increment; } if (sequence0[sequence0_position] != sequence_delimiter) { score_array[array_end].best = GappedExtenderGpu::kInitScore; score_array[array_end].best_gap = GappedExtenderGpu::kInitScore; ++array_end; } } } *array_start_ptr = array_start; *array_end_ptr = array_end; *max_score_ptr = max_score; return 0; } __device__ int ExtendOneSideScoreOnlyDevice( const AlphabetCoder::Code* concatenated_sequence0, const uint32_t sequence_0_offset, const AlphabetCoder::Code* concatenated_sequence1, const uint32_t sequence_1_offset, const bool reverse, const AlphabetCoder::Code sequence_delimiter, const int* score_matrix, const uint32_t number_letters, const int gap_open, const int gap_extention, const int cutoff, GappedExtenderGpu::DpCell* dp_cells, uint32_t* best_sequence0_position, uint32_t* best_sequence1_position, int* best_score) { int increment = reverse ? -1 : 1; int max_score = 0; int max_score_sequence0_position = -increment; int max_score_sequence1_position = -increment; const AlphabetCoder::Code* sequence0 = concatenated_sequence0 + sequence_0_offset; const AlphabetCoder::Code* sequence1 = concatenated_sequence1 + sequence_1_offset; AlphabetCoder::Code cached_sequence0_mem[GappedExtenderGpu::kMaxSequence0Length]; AlphabetCoder::Code *cached_sequence0 = reverse ? &cached_sequence0_mem[GappedExtenderGpu::kMaxSequence0Length - 1] : &cached_sequence0_mem[0]; InitCachedSequence(sequence0, sequence_delimiter, increment, cached_sequence0); int sequence1_position = 0; GappedExtenderGpu::DpCell *score_array = dp_cells; int array_start = 0; int array_end = 0; int gap_init = gap_open + gap_extention; array_end = InitScoreArray(cached_sequence0, sequence_delimiter, gap_init, gap_extention, cutoff, increment, score_array); #if 0 printf("\n"); printf(" "); for (int x = 0; sequence0[x] != sequence_delimiter; x += increment) { printf("%3d", sequence0[x]); } printf("\n"); #endif bool stop_flag = false; while (!stop_flag) { #if 0 printf("%3d", sequence1[sequence1_position - increment]); for (int x = 0; x < array_start; ++x) { printf(" "); } for (int x = array_start; x < array_end; ++x) { printf("%3d", score_array[x].best); //fprintf(stderr, "%3d", insertion_sequence1_row[x]); } printf("\n"); #endif AlphabetCoder::Code s1_c = sequence1[sequence1_position]; if (s1_c == sequence_delimiter) { stop_flag = true; } else { UpdateDpCells(cached_sequence0, s1_c, sequence1_position, sequence_delimiter, increment, cutoff, score_matrix + s1_c * number_letters, gap_init, gap_extention, score_array, &array_start, &array_end, &max_score_sequence0_position, &max_score_sequence1_position, &max_score); sequence1_position += increment; } #if 0 // debug ////////////////// if (sequence1_position >= 10) { stop_flag = true; break; } ////////////////////////// #endif if (array_start + 1 == array_end) { stop_flag = true; break; } } *best_score = max_score; *best_sequence0_position = max_score_sequence0_position; *best_sequence1_position = max_score_sequence1_position; return 0; } __global__ void __launch_bounds__(128, 1) ExtendOneSideScoreOnlyKernel( const AlphabetCoder::Code* concatenated_sequence0, const AlphabetCoder::Code* concatenated_sequence1, const uint32_t number_extensions, const bool reverse, const AlphabetCoder::Code sequence_delimiter, const int* score_matrix, const uint32_t number_letters, const int gap_open, const int gap_extention, const int cutoff, uint32_t* sequence0_positions, uint32_t* sequence1_positions, int* best_scores) { const uint32_t thread_id = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t thread_id_skip = gridDim.x * blockDim.x; GappedExtenderGpu::DpCell dp_cells[GappedExtenderGpu::kMaxSequence0Length]; for (uint32_t i = thread_id; i < number_extensions; i += thread_id_skip) { uint32_t sequence_0_offset = sequence0_positions[i] + cuda_common::kOneSideMarginSize; uint32_t sequence_1_offset = sequence1_positions[i] + cuda_common::kOneSideMarginSize; uint32_t best_sequence_0_p = 0; uint32_t best_sequence_1_p = 0; int best_score = 0; ExtendOneSideScoreOnlyDevice(concatenated_sequence0, sequence_0_offset, concatenated_sequence1, sequence_1_offset, reverse, sequence_delimiter, score_matrix, number_letters, gap_open, gap_extention, cutoff, &dp_cells[0], &best_sequence_0_p, &best_sequence_1_p, &best_score); sequence0_positions[i] = sequence_0_offset + best_sequence_0_p - cuda_common::kOneSideMarginSize; sequence1_positions[i] = sequence_1_offset + best_sequence_1_p - cuda_common::kOneSideMarginSize; best_scores[i] = best_score; } return; } // call /* ExtendOneSideScoreOnlyKernel<<<1024, 128, 0, stream>>>(d_concatenated_query_sequence_, d_database_sequence_, size, reverse, sequence_delimiter_, d_score_matrix_, number_letters_ + 1, gap_open_, gap_extention_, cutoff_, d_query_concatenated_positions, d_database_positions, d_scores); * */ #endif /* GAPPED_EXTENDER_GPU_REF_CU_ */
/* * gapped_extender_gpu_ref.cu * * Created on: 2014/08/23 * Author: shu */ #ifndef GAPPED_EXTENDER_GPU_REF_CU_ #define GAPPED_EXTENDER_GPU_REF_CU_ /* #include "gapped_extender_gpu.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/system/cuda/experimental/pinned_allocator.h> #include <thrust/copy.h> #include <vector> #include <assert.h> #include "score_matrix.h" #include "cuda_common.h" using namespace std; __device__ int InitCachedSequence(const AlphabetCoder::Code* sequence, const AlphabetCoder::Code sequence_delimiter, int increment, AlphabetCoder::Code* cached_sequence) { AlphabetCoder::Code c = 0; int p = 0; for (c = 0, p = 0; c != sequence_delimiter; p += increment) { c = sequence[p]; cached_sequence[p] = c; } return 0; } __device__ int InitScoreArray(const AlphabetCoder::Code* sequence, const AlphabetCoder::Code sequence_delimiter, int gap_init, int gap_extention, int default_cutoff, int increment, GappedExtenderGpu::DpCell* score_array) { int score = -gap_init; score_array[0].best = 0; score_array[0].best_gap = -gap_init; int sequence_position = 0; int array_index = 0; int cutoff = default_cutoff; if (cutoff < gap_init) { cutoff = gap_init; } for (array_index = 1; sequence[sequence_position] != sequence_delimiter; ++array_index, sequence_position += increment) { if (score < -cutoff) { break; } score_array[array_index].best = score; score_array[array_index].best_gap = score - gap_extention; score -= gap_extention; } return array_index; } __device__ int UpdateDpCells(const AlphabetCoder::Code* sequence0, const AlphabetCoder::Code s1_c, const int sequence1_position, const AlphabetCoder::Code sequence_delimiter, int increment, int cutoff, const int *score_matrix_row, const int gap_init, const int gap_extention, GappedExtenderGpu::DpCell* score_array, int *array_start_ptr, int *array_end_ptr, int* max_score_sequence0_position, int* max_score_sequence1_position, int* max_score_ptr) { int score = GappedExtenderGpu::kInitScore; int score_gap_row = GappedExtenderGpu::kInitScore; int max_score = *max_score_ptr; int array_start = *array_start_ptr; int array_end = *array_end_ptr; int sequence0_position = array_start * increment; int prev_score = score_array[array_start].best; score_array[array_start].best = score_array[array_start].best_gap; score_array[array_start].best_gap -= gap_extention; int array_last_index = array_start; for (int array_index = array_start + 1; array_index < array_end; ++array_index, sequence0_position += increment) { score = prev_score + score_matrix_row[sequence0[sequence0_position]]; prev_score = score_array[array_index].best; int score_gap_column = score_array[array_index].best_gap; score = score < score_gap_column ? score_gap_column : score; score = score < score_gap_row ? score_gap_row : score; if (max_score - score > cutoff) { array_start += array_start + 1 == array_index ? 1 : 0; score_array[array_index].best = GappedExtenderGpu::kInitScore; score_array[array_index].best_gap = GappedExtenderGpu::kInitScore; score_gap_row = GappedExtenderGpu::kInitScore; } else { array_last_index = array_index; if (score > max_score) { max_score = score; *max_score_sequence0_position = sequence0_position; *max_score_sequence1_position = sequence1_position; } score_array[array_index].best_gap = max(score - gap_init, score_gap_column - gap_extention); score_gap_row = max(score - gap_init, score_gap_row - gap_extention); score_array[array_index].best = score; } } if (array_start + 1 != array_end) { if (array_last_index < array_end - 1) { array_end = array_last_index + 1; } else { while (score_gap_row >= (max_score - cutoff) && sequence0[sequence0_position] != sequence_delimiter) { score_array[array_end].best = score_gap_row; score_array[array_end].best_gap = score_gap_row - gap_init; score_gap_row -= gap_extention; ++array_end; sequence0_position += increment; } if (sequence0[sequence0_position] != sequence_delimiter) { score_array[array_end].best = GappedExtenderGpu::kInitScore; score_array[array_end].best_gap = GappedExtenderGpu::kInitScore; ++array_end; } } } *array_start_ptr = array_start; *array_end_ptr = array_end; *max_score_ptr = max_score; return 0; } __device__ int ExtendOneSideScoreOnlyDevice( const AlphabetCoder::Code* concatenated_sequence0, const uint32_t sequence_0_offset, const AlphabetCoder::Code* concatenated_sequence1, const uint32_t sequence_1_offset, const bool reverse, const AlphabetCoder::Code sequence_delimiter, const int* score_matrix, const uint32_t number_letters, const int gap_open, const int gap_extention, const int cutoff, GappedExtenderGpu::DpCell* dp_cells, uint32_t* best_sequence0_position, uint32_t* best_sequence1_position, int* best_score) { int increment = reverse ? -1 : 1; int max_score = 0; int max_score_sequence0_position = -increment; int max_score_sequence1_position = -increment; const AlphabetCoder::Code* sequence0 = concatenated_sequence0 + sequence_0_offset; const AlphabetCoder::Code* sequence1 = concatenated_sequence1 + sequence_1_offset; AlphabetCoder::Code cached_sequence0_mem[GappedExtenderGpu::kMaxSequence0Length]; AlphabetCoder::Code *cached_sequence0 = reverse ? &cached_sequence0_mem[GappedExtenderGpu::kMaxSequence0Length - 1] : &cached_sequence0_mem[0]; InitCachedSequence(sequence0, sequence_delimiter, increment, cached_sequence0); int sequence1_position = 0; GappedExtenderGpu::DpCell *score_array = dp_cells; int array_start = 0; int array_end = 0; int gap_init = gap_open + gap_extention; array_end = InitScoreArray(cached_sequence0, sequence_delimiter, gap_init, gap_extention, cutoff, increment, score_array); #if 0 printf("\n"); printf(" "); for (int x = 0; sequence0[x] != sequence_delimiter; x += increment) { printf("%3d", sequence0[x]); } printf("\n"); #endif bool stop_flag = false; while (!stop_flag) { #if 0 printf("%3d", sequence1[sequence1_position - increment]); for (int x = 0; x < array_start; ++x) { printf(" "); } for (int x = array_start; x < array_end; ++x) { printf("%3d", score_array[x].best); //fprintf(stderr, "%3d", insertion_sequence1_row[x]); } printf("\n"); #endif AlphabetCoder::Code s1_c = sequence1[sequence1_position]; if (s1_c == sequence_delimiter) { stop_flag = true; } else { UpdateDpCells(cached_sequence0, s1_c, sequence1_position, sequence_delimiter, increment, cutoff, score_matrix + s1_c * number_letters, gap_init, gap_extention, score_array, &array_start, &array_end, &max_score_sequence0_position, &max_score_sequence1_position, &max_score); sequence1_position += increment; } #if 0 // debug ////////////////// if (sequence1_position >= 10) { stop_flag = true; break; } ////////////////////////// #endif if (array_start + 1 == array_end) { stop_flag = true; break; } } *best_score = max_score; *best_sequence0_position = max_score_sequence0_position; *best_sequence1_position = max_score_sequence1_position; return 0; } __global__ void __launch_bounds__(128, 1) ExtendOneSideScoreOnlyKernel( const AlphabetCoder::Code* concatenated_sequence0, const AlphabetCoder::Code* concatenated_sequence1, const uint32_t number_extensions, const bool reverse, const AlphabetCoder::Code sequence_delimiter, const int* score_matrix, const uint32_t number_letters, const int gap_open, const int gap_extention, const int cutoff, uint32_t* sequence0_positions, uint32_t* sequence1_positions, int* best_scores) { const uint32_t thread_id = blockDim.x * blockIdx.x + threadIdx.x; const uint32_t thread_id_skip = gridDim.x * blockDim.x; GappedExtenderGpu::DpCell dp_cells[GappedExtenderGpu::kMaxSequence0Length]; for (uint32_t i = thread_id; i < number_extensions; i += thread_id_skip) { uint32_t sequence_0_offset = sequence0_positions[i] + cuda_common::kOneSideMarginSize; uint32_t sequence_1_offset = sequence1_positions[i] + cuda_common::kOneSideMarginSize; uint32_t best_sequence_0_p = 0; uint32_t best_sequence_1_p = 0; int best_score = 0; ExtendOneSideScoreOnlyDevice(concatenated_sequence0, sequence_0_offset, concatenated_sequence1, sequence_1_offset, reverse, sequence_delimiter, score_matrix, number_letters, gap_open, gap_extention, cutoff, &dp_cells[0], &best_sequence_0_p, &best_sequence_1_p, &best_score); sequence0_positions[i] = sequence_0_offset + best_sequence_0_p - cuda_common::kOneSideMarginSize; sequence1_positions[i] = sequence_1_offset + best_sequence_1_p - cuda_common::kOneSideMarginSize; best_scores[i] = best_score; } return; } // call /* ExtendOneSideScoreOnlyKernel<<<1024, 128, 0, stream>>>(d_concatenated_query_sequence_, d_database_sequence_, size, reverse, sequence_delimiter_, d_score_matrix_, number_letters_ + 1, gap_open_, gap_extention_, cutoff_, d_query_concatenated_positions, d_database_positions, d_scores); * */ #endif /* GAPPED_EXTENDER_GPU_REF_CU_ */
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void finishCentroids(int* centroidMass, unsigned int* centroidCount, float* centroids) { int centroidNumber = blockIdx.y * blockDim.y + threadIdx.y; int dimensionNumber = blockIdx.x * blockDim.x + threadIdx.x; if ((centroidNumber < 32) && (dimensionNumber < 34)) { float totalCount = (float)centroidCount[centroidNumber]; float mass = (float)centroidMass[dimensionNumber * 32 + centroidNumber]; centroids[dimensionNumber * 32 + centroidNumber] = mass / ((float)INTCONFACTOR * totalCount); } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void finishCentroids(int* centroidMass, unsigned int* centroidCount, float* centroids) { int centroidNumber = blockIdx.y * blockDim.y + threadIdx.y; int dimensionNumber = blockIdx.x * blockDim.x + threadIdx.x; if ((centroidNumber < 32) && (dimensionNumber < 34)) { float totalCount = (float)centroidCount[centroidNumber]; float mass = (float)centroidMass[dimensionNumber * 32 + centroidNumber]; centroids[dimensionNumber * 32 + centroidNumber] = mass / ((float)INTCONFACTOR * totalCount); } }
Convert the following CUDA code to AMD GPU code: cuda #include <cuda_runtime.h> #include <stdio.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> //Tamaño de matrices (cuadradas) #define N 1024 //Kernel __global__ void mul(int * A, int * B, int * C){ int i = blockIdx.x; int j = threadIdx.x; //TODO -> Calcular elemento C(i,j) } int main(){ struct timeval t1, t2; int *hA, *hB, *hC, *hC2; //Host Matrix int *dA, *dB, *dC; //Device Matrix //Reserva de memoria Host hA = (int*)malloc(N*N*sizeof(int)); hB = (int*)malloc(N*N*sizeof(int)); hC = (int*)malloc(N*N*sizeof(int)); hC2 = (int*)malloc(N*N*sizeof(int)); //Inicialización de matrices srand(time(NULL)); for (int i = 0; i < N; i++){ for (int j = 0; j < N; j++){ hA[i*N+j] = rand(); hB[i*N+j] = rand(); } } //Reserva de memoria GPU //TODO -> Reservar Memoria (dA, dB, dC) //Copia Host -> GPU //TODO -> Copiar hA a dA y hB a dB gettimeofday(&t1, 0); //Ejecución Kernel mul<<<N, N>>>(dA, dB, dC); cudaDeviceSynchronize(); gettimeofday(&t2, 0); //Copia Device -> Host //TODO -> Copiar dC a hC //Multiplicación en Host for(int i = 0; i < N; i++){ for(int j = 0; j < N; j++){ hC2[i*N + j] = 0; for(int k = 0; k < N; k++){ hC2[i*N + j] += hA[i*N + k] * hB[k*N + j]; } } } //Comprobación de errores bool error = false; for(int i = 0; i < N*N; i++){ if(hC[i] != hC2[i]){ error = true; break; } } if(error) printf("La multiplicación de matrices ha fallado.\n"); else printf("Multiplicación de matrices correcta.\n"); double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; printf("Tiempo: %f ms\n", time); //Liberar memoria free(hA); free(hB); free(hC); //TODO -> Liberar dA, dB y dC }
#include <hip/hip_runtime.h> #include <stdio.h> #include <time.h> #include <stdlib.h> #include <sys/time.h> //Tamaño de matrices (cuadradas) #define N 1024 //Kernel __global__ void mul(int * A, int * B, int * C){ int i = blockIdx.x; int j = threadIdx.x; //TODO -> Calcular elemento C(i,j) } int main(){ struct timeval t1, t2; int *hA, *hB, *hC, *hC2; //Host Matrix int *dA, *dB, *dC; //Device Matrix //Reserva de memoria Host hA = (int*)malloc(N*N*sizeof(int)); hB = (int*)malloc(N*N*sizeof(int)); hC = (int*)malloc(N*N*sizeof(int)); hC2 = (int*)malloc(N*N*sizeof(int)); //Inicialización de matrices srand(time(NULL)); for (int i = 0; i < N; i++){ for (int j = 0; j < N; j++){ hA[i*N+j] = rand(); hB[i*N+j] = rand(); } } //Reserva de memoria GPU //TODO -> Reservar Memoria (dA, dB, dC) //Copia Host -> GPU //TODO -> Copiar hA a dA y hB a dB gettimeofday(&t1, 0); //Ejecución Kernel mul<<<N, N>>>(dA, dB, dC); hipDeviceSynchronize(); gettimeofday(&t2, 0); //Copia Device -> Host //TODO -> Copiar dC a hC //Multiplicación en Host for(int i = 0; i < N; i++){ for(int j = 0; j < N; j++){ hC2[i*N + j] = 0; for(int k = 0; k < N; k++){ hC2[i*N + j] += hA[i*N + k] * hB[k*N + j]; } } } //Comprobación de errores bool error = false; for(int i = 0; i < N*N; i++){ if(hC[i] != hC2[i]){ error = true; break; } } if(error) printf("La multiplicación de matrices ha fallado.\n"); else printf("Multiplicación de matrices correcta.\n"); double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; printf("Tiempo: %f ms\n", time); //Liberar memoria free(hA); free(hB); free(hC); //TODO -> Liberar dA, dB y dC }
Convert the following CUDA code to AMD GPU code: cuda #include <cuda.h> #include <stdio.h> #include <sys/time.h> #define BLOCK_SIZE 16 __global__ void lud_diagonal(float *m, int matrix_dim, int offset) { int i,j; __shared__ float shadow[BLOCK_SIZE][BLOCK_SIZE]; /* Each thread block, i.e. 1D 16 threads, loads a * 2D block, i.e. 16x16, of data from the diagonal * of the matrix into shared memory 'shadow' */ int array_offset = offset*matrix_dim+offset; for(i = 0; i < BLOCK_SIZE; i++){ shadow[i][threadIdx.x] = m[array_offset+threadIdx.x]; array_offset += matrix_dim; } __syncthreads(); for(i = 0; i < BLOCK_SIZE-1; i++) { if ( threadIdx.x > i) { /* starts at 15 threads and then decrease one thread each time */ for(j = 0; j < i; j++) { /* This for loop computes cols */ shadow[threadIdx.x][i] -= shadow[threadIdx.x][j]*shadow[j][i]; } shadow[threadIdx.x][i] /= shadow[i][i]; } __syncthreads(); if ( threadIdx.x > i) { for( j = 0; j < i+1; j++) { /* This for loop computes rows */ shadow[i+1][threadIdx.x] -= shadow[i+1][j]*shadow[j][threadIdx.x]; } } __syncthreads(); } /* The first row is not modified, it * is no need to write it back to the * global memory */ array_offset = (offset+1)*matrix_dim+offset; for(i = 1; i < BLOCK_SIZE; i++) { m[array_offset+threadIdx.x] = shadow[i][threadIdx.x]; array_offset += matrix_dim; } } __global__ void lud_diagonal_noshr(float *m, int matrix_dim, int offset) { int i,j; int array_offset = offset*matrix_dim+offset; for(i = 0; i < BLOCK_SIZE-1; i++) { if ( threadIdx.x > i) { /* starts at 15 threads and then decrease one thread each time */ for(j = 0; j < i; j++) { /* This for loop computes cols */ m[(array_offset+threadIdx.x*matrix_dim) + i] -= m[(array_offset+threadIdx.x*matrix_dim) + j]* m[(array_offset+j*matrix_dim) + i]; } m[(array_offset+threadIdx.x*matrix_dim) + i] /= m[(array_offset+i*matrix_dim) + i]; } __syncthreads(); if ( threadIdx.x > i) { for( j = 0; j < i+1; j++) { /* This for loop computes rows */ m[(array_offset+(i+1)*matrix_dim) + threadIdx.x] -= m[(array_offset+(i+1)*matrix_dim) + j]* m[(array_offset+j*matrix_dim) + threadIdx.x]; } } __syncthreads(); } } __global__ void lud_perimeter(float *m, int matrix_dim, int offset) { __shared__ float dia[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float peri_row[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float peri_col[BLOCK_SIZE][BLOCK_SIZE]; int i,j, array_offset; int idx; /* For this kernel, each block contains 32 threads */ if ( threadIdx.x < BLOCK_SIZE) { /* threads 0 ... 15 */ idx = threadIdx.x; array_offset = offset*matrix_dim+offset; for (i=0; i < BLOCK_SIZE/2; i++){ dia[i][idx]=m[array_offset+idx]; array_offset += matrix_dim; } array_offset = offset*matrix_dim+offset; for (i=0; i < BLOCK_SIZE; i++) { peri_row[i][idx]=m[array_offset+(blockIdx.x+1)*BLOCK_SIZE+idx]; array_offset += matrix_dim; } } else { /* threads 16 ... 31 */ idx = threadIdx.x-BLOCK_SIZE; array_offset = (offset+BLOCK_SIZE/2)*matrix_dim+offset; for (i=BLOCK_SIZE/2; i < BLOCK_SIZE; i++){ dia[i][idx]=m[array_offset+idx]; array_offset += matrix_dim; } array_offset = (offset+(blockIdx.x+1)*BLOCK_SIZE)*matrix_dim+offset; for (i=0; i < BLOCK_SIZE; i++) { peri_col[i][idx] = m[array_offset+idx]; array_offset += matrix_dim; } } __syncthreads(); /* this version works ok on hardware, but not gpgpusim ************************************************************** if (threadIdx.x < BLOCK_SIZE) { //peri-row idx=threadIdx.x; for(i=1; i < BLOCK_SIZE; i++){ for (j=0; j < i; j++) peri_row[i][idx]-=dia[i][j]*peri_row[j][idx]; } array_offset = (offset+1)*matrix_dim+offset; for(i=1; i < BLOCK_SIZE; i++){ m[array_offset+(blockIdx.x+1)*BLOCK_SIZE+idx] = peri_row[i][idx]; array_offset += matrix_dim; } } else { //peri-col idx=threadIdx.x - BLOCK_SIZE; for(i=0; i < BLOCK_SIZE; i++){ for(j=0; j < i; j++) peri_col[idx][i]-=peri_col[idx][j]*dia[j][i]; peri_col[idx][i] /= dia[i][i]; } __syncthreads(); array_offset = (offset+(blockIdx.x+1)*BLOCK_SIZE)*matrix_dim+offset; for(i=0; i < BLOCK_SIZE; i++){ m[array_offset+idx] = peri_col[i][idx]; array_offset += matrix_dim; } } ***************************************************************/ if (threadIdx.x < BLOCK_SIZE) { //peri-row idx=threadIdx.x; for(i=1; i < BLOCK_SIZE; i++){ for (j=0; j < i; j++) { peri_row[i][idx]-=dia[i][j]*peri_row[j][idx]; } } } else { //peri-col idx=threadIdx.x - BLOCK_SIZE; for(i=0; i < BLOCK_SIZE; i++){ for(j=0; j < i; j++) { peri_col[idx][i]-=peri_col[idx][j]*dia[j][i]; } peri_col[idx][i] /= dia[i][i]; } } __syncthreads(); /* write data back to global memory */ if (threadIdx.x < BLOCK_SIZE) { //peri-row idx=threadIdx.x; array_offset = (offset+1)*matrix_dim+offset; for(i=1; i < BLOCK_SIZE; i++){ m[array_offset+(blockIdx.x+1)*BLOCK_SIZE+idx] = peri_row[i][idx]; array_offset += matrix_dim; } } else { //peri-col idx=threadIdx.x - BLOCK_SIZE; array_offset = (offset+(blockIdx.x+1)*BLOCK_SIZE)*matrix_dim+offset; for(i=0; i < BLOCK_SIZE; i++){ m[array_offset+idx] = peri_col[i][idx]; array_offset += matrix_dim; } } } __global__ void lud_perimeter_noshr(float *m, int matrix_dim, int offset) { int i,j, array_offset; int idx; if (threadIdx.x < BLOCK_SIZE) { //peri-row array_offset = offset*matrix_dim+offset; idx=threadIdx.x; for(i=1; i < BLOCK_SIZE; i++){ for (j=0; j < i; j++) { m[array_offset+i*matrix_dim+(blockIdx.x+1)*BLOCK_SIZE+idx] -= m[array_offset+i*matrix_dim+j] * m[array_offset+j*matrix_dim+(blockIdx.x+1)*BLOCK_SIZE+idx]; } } } else { //peri-col array_offset = (offset+(blockIdx.x+1)*BLOCK_SIZE)*matrix_dim+offset; idx=threadIdx.x - BLOCK_SIZE; for(i=0; i < BLOCK_SIZE; i++){ for(j=0; j < i; j++) { m[array_offset+idx*matrix_dim+i] -= m[array_offset+idx*matrix_dim+j] * m[offset*matrix_dim+offset+j*matrix_dim+i]; } m[array_offset+idx*matrix_dim+i] /= m[offset*matrix_dim+offset+i*matrix_dim+i]; } } } __global__ void lud_internal(float *m, int matrix_dim, int offset) { __shared__ float peri_row[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float peri_col[BLOCK_SIZE][BLOCK_SIZE]; int i; float sum; int global_row_id = offset + (blockIdx.y+1)*BLOCK_SIZE; int global_col_id = offset + (blockIdx.x+1)*BLOCK_SIZE; peri_row[threadIdx.y][threadIdx.x] = m[(offset+threadIdx.y)*matrix_dim+global_col_id+threadIdx.x]; peri_col[threadIdx.y][threadIdx.x] = m[(global_row_id+threadIdx.y)*matrix_dim+offset+threadIdx.x]; __syncthreads(); sum = 0; for (i=0; i < BLOCK_SIZE; i++) { sum += peri_col[threadIdx.y][i] * peri_row[i][threadIdx.x]; } m[(global_row_id+threadIdx.y)*matrix_dim+global_col_id+threadIdx.x] -= sum; } __global__ void lud_internal_noshr(float *m, int matrix_dim, int offset) { int i; float sum; int global_row_id = offset + (blockIdx.y+1)*BLOCK_SIZE; int global_col_id = offset + (blockIdx.x+1)*BLOCK_SIZE; sum = 0; for (i=0; i < BLOCK_SIZE; i++) { sum += m[(global_row_id+threadIdx.y)*matrix_dim+offset+i] * m[(offset+i)*matrix_dim+global_col_id+threadIdx.x]; } m[(global_row_id+threadIdx.y)*matrix_dim+global_col_id+threadIdx.x] -= sum; } void lud_cuda(float *m, int matrix_dim, int do_shared) { int i=0; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); /* beginning of timing point */ struct timeval tv1, tv2; gettimeofday(&tv1, NULL); if( do_shared) { //printf("Executing kernels with shared memory!\n"); for (i=0; i < matrix_dim-BLOCK_SIZE; i += BLOCK_SIZE) { lud_diagonal<<<1, BLOCK_SIZE>>>(m, matrix_dim, i); lud_perimeter<<<(matrix_dim-i)/BLOCK_SIZE-1, BLOCK_SIZE*2>>>(m, matrix_dim, i); dim3 dimGrid((matrix_dim-i)/BLOCK_SIZE-1, (matrix_dim-i)/BLOCK_SIZE-1); lud_internal<<<dimGrid, dimBlock>>>(m, matrix_dim, i); } lud_diagonal<<<1,BLOCK_SIZE>>>(m, matrix_dim, i); } else { //printf("Executing kernels without shared memory!\n"); cudaFuncSetCacheConfig("lud_diagonal_noshr", cudaFuncCachePreferL1); cudaFuncSetCacheConfig("lud_perimeter_noshr", cudaFuncCachePreferL1); cudaFuncSetCacheConfig("lud_internal_noshr", cudaFuncCachePreferL1); for (i=0; i < matrix_dim-BLOCK_SIZE; i += BLOCK_SIZE) { lud_diagonal_noshr<<<1, BLOCK_SIZE>>>(m, matrix_dim, i); lud_perimeter_noshr<<<(matrix_dim-i)/BLOCK_SIZE-1, BLOCK_SIZE*2>>>(m, matrix_dim, i); dim3 dimGrid((matrix_dim-i)/BLOCK_SIZE-1, (matrix_dim-i)/BLOCK_SIZE-1); lud_internal_noshr<<<dimGrid, dimBlock>>>(m, matrix_dim, i); } lud_diagonal<<<1,BLOCK_SIZE>>>(m, matrix_dim, i); } /* end of timing point */ gettimeofday(&tv2, NULL); double runtime = ((tv2.tv_sec*1000.0 + tv2.tv_usec/1000.0)-(tv1.tv_sec*1000.0 + tv1.tv_usec/1000.0)); printf("Runtime(milliseconds): %f\n", runtime); }
#include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> #define BLOCK_SIZE 16 __global__ void lud_diagonal(float *m, int matrix_dim, int offset) { int i,j; __shared__ float shadow[BLOCK_SIZE][BLOCK_SIZE]; /* Each thread block, i.e. 1D 16 threads, loads a * 2D block, i.e. 16x16, of data from the diagonal * of the matrix into shared memory 'shadow' */ int array_offset = offset*matrix_dim+offset; for(i = 0; i < BLOCK_SIZE; i++){ shadow[i][threadIdx.x] = m[array_offset+threadIdx.x]; array_offset += matrix_dim; } __syncthreads(); for(i = 0; i < BLOCK_SIZE-1; i++) { if ( threadIdx.x > i) { /* starts at 15 threads and then decrease one thread each time */ for(j = 0; j < i; j++) { /* This for loop computes cols */ shadow[threadIdx.x][i] -= shadow[threadIdx.x][j]*shadow[j][i]; } shadow[threadIdx.x][i] /= shadow[i][i]; } __syncthreads(); if ( threadIdx.x > i) { for( j = 0; j < i+1; j++) { /* This for loop computes rows */ shadow[i+1][threadIdx.x] -= shadow[i+1][j]*shadow[j][threadIdx.x]; } } __syncthreads(); } /* The first row is not modified, it * is no need to write it back to the * global memory */ array_offset = (offset+1)*matrix_dim+offset; for(i = 1; i < BLOCK_SIZE; i++) { m[array_offset+threadIdx.x] = shadow[i][threadIdx.x]; array_offset += matrix_dim; } } __global__ void lud_diagonal_noshr(float *m, int matrix_dim, int offset) { int i,j; int array_offset = offset*matrix_dim+offset; for(i = 0; i < BLOCK_SIZE-1; i++) { if ( threadIdx.x > i) { /* starts at 15 threads and then decrease one thread each time */ for(j = 0; j < i; j++) { /* This for loop computes cols */ m[(array_offset+threadIdx.x*matrix_dim) + i] -= m[(array_offset+threadIdx.x*matrix_dim) + j]* m[(array_offset+j*matrix_dim) + i]; } m[(array_offset+threadIdx.x*matrix_dim) + i] /= m[(array_offset+i*matrix_dim) + i]; } __syncthreads(); if ( threadIdx.x > i) { for( j = 0; j < i+1; j++) { /* This for loop computes rows */ m[(array_offset+(i+1)*matrix_dim) + threadIdx.x] -= m[(array_offset+(i+1)*matrix_dim) + j]* m[(array_offset+j*matrix_dim) + threadIdx.x]; } } __syncthreads(); } } __global__ void lud_perimeter(float *m, int matrix_dim, int offset) { __shared__ float dia[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float peri_row[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float peri_col[BLOCK_SIZE][BLOCK_SIZE]; int i,j, array_offset; int idx; /* For this kernel, each block contains 32 threads */ if ( threadIdx.x < BLOCK_SIZE) { /* threads 0 ... 15 */ idx = threadIdx.x; array_offset = offset*matrix_dim+offset; for (i=0; i < BLOCK_SIZE/2; i++){ dia[i][idx]=m[array_offset+idx]; array_offset += matrix_dim; } array_offset = offset*matrix_dim+offset; for (i=0; i < BLOCK_SIZE; i++) { peri_row[i][idx]=m[array_offset+(blockIdx.x+1)*BLOCK_SIZE+idx]; array_offset += matrix_dim; } } else { /* threads 16 ... 31 */ idx = threadIdx.x-BLOCK_SIZE; array_offset = (offset+BLOCK_SIZE/2)*matrix_dim+offset; for (i=BLOCK_SIZE/2; i < BLOCK_SIZE; i++){ dia[i][idx]=m[array_offset+idx]; array_offset += matrix_dim; } array_offset = (offset+(blockIdx.x+1)*BLOCK_SIZE)*matrix_dim+offset; for (i=0; i < BLOCK_SIZE; i++) { peri_col[i][idx] = m[array_offset+idx]; array_offset += matrix_dim; } } __syncthreads(); /* this version works ok on hardware, but not gpgpusim ************************************************************** if (threadIdx.x < BLOCK_SIZE) { //peri-row idx=threadIdx.x; for(i=1; i < BLOCK_SIZE; i++){ for (j=0; j < i; j++) peri_row[i][idx]-=dia[i][j]*peri_row[j][idx]; } array_offset = (offset+1)*matrix_dim+offset; for(i=1; i < BLOCK_SIZE; i++){ m[array_offset+(blockIdx.x+1)*BLOCK_SIZE+idx] = peri_row[i][idx]; array_offset += matrix_dim; } } else { //peri-col idx=threadIdx.x - BLOCK_SIZE; for(i=0; i < BLOCK_SIZE; i++){ for(j=0; j < i; j++) peri_col[idx][i]-=peri_col[idx][j]*dia[j][i]; peri_col[idx][i] /= dia[i][i]; } __syncthreads(); array_offset = (offset+(blockIdx.x+1)*BLOCK_SIZE)*matrix_dim+offset; for(i=0; i < BLOCK_SIZE; i++){ m[array_offset+idx] = peri_col[i][idx]; array_offset += matrix_dim; } } ***************************************************************/ if (threadIdx.x < BLOCK_SIZE) { //peri-row idx=threadIdx.x; for(i=1; i < BLOCK_SIZE; i++){ for (j=0; j < i; j++) { peri_row[i][idx]-=dia[i][j]*peri_row[j][idx]; } } } else { //peri-col idx=threadIdx.x - BLOCK_SIZE; for(i=0; i < BLOCK_SIZE; i++){ for(j=0; j < i; j++) { peri_col[idx][i]-=peri_col[idx][j]*dia[j][i]; } peri_col[idx][i] /= dia[i][i]; } } __syncthreads(); /* write data back to global memory */ if (threadIdx.x < BLOCK_SIZE) { //peri-row idx=threadIdx.x; array_offset = (offset+1)*matrix_dim+offset; for(i=1; i < BLOCK_SIZE; i++){ m[array_offset+(blockIdx.x+1)*BLOCK_SIZE+idx] = peri_row[i][idx]; array_offset += matrix_dim; } } else { //peri-col idx=threadIdx.x - BLOCK_SIZE; array_offset = (offset+(blockIdx.x+1)*BLOCK_SIZE)*matrix_dim+offset; for(i=0; i < BLOCK_SIZE; i++){ m[array_offset+idx] = peri_col[i][idx]; array_offset += matrix_dim; } } } __global__ void lud_perimeter_noshr(float *m, int matrix_dim, int offset) { int i,j, array_offset; int idx; if (threadIdx.x < BLOCK_SIZE) { //peri-row array_offset = offset*matrix_dim+offset; idx=threadIdx.x; for(i=1; i < BLOCK_SIZE; i++){ for (j=0; j < i; j++) { m[array_offset+i*matrix_dim+(blockIdx.x+1)*BLOCK_SIZE+idx] -= m[array_offset+i*matrix_dim+j] * m[array_offset+j*matrix_dim+(blockIdx.x+1)*BLOCK_SIZE+idx]; } } } else { //peri-col array_offset = (offset+(blockIdx.x+1)*BLOCK_SIZE)*matrix_dim+offset; idx=threadIdx.x - BLOCK_SIZE; for(i=0; i < BLOCK_SIZE; i++){ for(j=0; j < i; j++) { m[array_offset+idx*matrix_dim+i] -= m[array_offset+idx*matrix_dim+j] * m[offset*matrix_dim+offset+j*matrix_dim+i]; } m[array_offset+idx*matrix_dim+i] /= m[offset*matrix_dim+offset+i*matrix_dim+i]; } } } __global__ void lud_internal(float *m, int matrix_dim, int offset) { __shared__ float peri_row[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float peri_col[BLOCK_SIZE][BLOCK_SIZE]; int i; float sum; int global_row_id = offset + (blockIdx.y+1)*BLOCK_SIZE; int global_col_id = offset + (blockIdx.x+1)*BLOCK_SIZE; peri_row[threadIdx.y][threadIdx.x] = m[(offset+threadIdx.y)*matrix_dim+global_col_id+threadIdx.x]; peri_col[threadIdx.y][threadIdx.x] = m[(global_row_id+threadIdx.y)*matrix_dim+offset+threadIdx.x]; __syncthreads(); sum = 0; for (i=0; i < BLOCK_SIZE; i++) { sum += peri_col[threadIdx.y][i] * peri_row[i][threadIdx.x]; } m[(global_row_id+threadIdx.y)*matrix_dim+global_col_id+threadIdx.x] -= sum; } __global__ void lud_internal_noshr(float *m, int matrix_dim, int offset) { int i; float sum; int global_row_id = offset + (blockIdx.y+1)*BLOCK_SIZE; int global_col_id = offset + (blockIdx.x+1)*BLOCK_SIZE; sum = 0; for (i=0; i < BLOCK_SIZE; i++) { sum += m[(global_row_id+threadIdx.y)*matrix_dim+offset+i] * m[(offset+i)*matrix_dim+global_col_id+threadIdx.x]; } m[(global_row_id+threadIdx.y)*matrix_dim+global_col_id+threadIdx.x] -= sum; } void lud_cuda(float *m, int matrix_dim, int do_shared) { int i=0; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); /* beginning of timing point */ struct timeval tv1, tv2; gettimeofday(&tv1, NULL); if( do_shared) { //printf("Executing kernels with shared memory!\n"); for (i=0; i < matrix_dim-BLOCK_SIZE; i += BLOCK_SIZE) { lud_diagonal<<<1, BLOCK_SIZE>>>(m, matrix_dim, i); lud_perimeter<<<(matrix_dim-i)/BLOCK_SIZE-1, BLOCK_SIZE*2>>>(m, matrix_dim, i); dim3 dimGrid((matrix_dim-i)/BLOCK_SIZE-1, (matrix_dim-i)/BLOCK_SIZE-1); lud_internal<<<dimGrid, dimBlock>>>(m, matrix_dim, i); } lud_diagonal<<<1,BLOCK_SIZE>>>(m, matrix_dim, i); } else { //printf("Executing kernels without shared memory!\n"); hipFuncSetCacheConfig(reinterpret_cast<const void*>("lud_diagonal_noshr"), hipFuncCachePreferL1); hipFuncSetCacheConfig(reinterpret_cast<const void*>("lud_perimeter_noshr"), hipFuncCachePreferL1); hipFuncSetCacheConfig(reinterpret_cast<const void*>("lud_internal_noshr"), hipFuncCachePreferL1); for (i=0; i < matrix_dim-BLOCK_SIZE; i += BLOCK_SIZE) { lud_diagonal_noshr<<<1, BLOCK_SIZE>>>(m, matrix_dim, i); lud_perimeter_noshr<<<(matrix_dim-i)/BLOCK_SIZE-1, BLOCK_SIZE*2>>>(m, matrix_dim, i); dim3 dimGrid((matrix_dim-i)/BLOCK_SIZE-1, (matrix_dim-i)/BLOCK_SIZE-1); lud_internal_noshr<<<dimGrid, dimBlock>>>(m, matrix_dim, i); } lud_diagonal<<<1,BLOCK_SIZE>>>(m, matrix_dim, i); } /* end of timing point */ gettimeofday(&tv2, NULL); double runtime = ((tv2.tv_sec*1000.0 + tv2.tv_usec/1000.0)-(tv1.tv_sec*1000.0 + tv1.tv_usec/1000.0)); printf("Runtime(milliseconds): %f\n", runtime); }
Convert the following CUDA code to AMD GPU code: cuda #include<stdio.h> #include<stdlib.h> #include<time.h> # define FILL_PERCENT 10 # define SIZE 250 # define BLOCK_SIZE 32 __global__ void spmvNormal( int *M, int *V, int *res){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int temp,i; if (idx < SIZE) { temp = 0; //dot product for one row for (i = 0; i < SIZE; i++){ temp += M[idx * SIZE + i] * V[i]; } res[idx] = temp; } } __global__ void spmvCSR(int *ro, int *ci, int *val, int *V, int *res_csr){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i; int start, end; int dot; if(idx < SIZE){ dot = 0; start = ro[idx]; end = ro[idx + 1]; for(i = start; i < end; i++){ dot+= val[i] * V[ci[i]]; } } res_csr[idx] = dot; } __global__ void spmvECSR(int *ro, int *dd,int *val, int *V, int* res_ecsr){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int start, end; int dot; if(idx < SIZE){ start = ro[idx]; end = ro[idx + 1]; j=0; for(i = 0;i<=start;i++) j += dd[i]; dot = val[start] * V[j]; for(i = start+1; i < end; i++){ dot += val[i] * V[j+dd[i]]; } } res_ecsr[idx] = dot; } __global__ void spmvECSR_mod(int *ro, int *dd, int *val, int *V, int *res_ecsr){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int start, end; int dot; if(idx < SIZE){ start = ro[idx]; end = ro[idx + 1]; j = dd[start]; dot = val[start] * V[j]; for(i = start+1; i < end; i++){ dot+= val[i] * V[j+dd[i]]; } } res_ecsr[idx] = dot; } int **M,*V; int *ro,*ci,*val,*dd; int *ro_gpu,*ci_gpu,*val_gpu,*dd_gpu,*V_gpu,*M_gpu; int *res_csr,*res_ecsr,*res,*res_ecsr_mod; int *res_csr_gpu, *res_ecsr_gpu,*res_gpu,*res_ecsr_mod_gpu; int main(){ //cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); //variable declarations int i,j; cudaEvent_t start,stop,start_csr,stop_csr,start_ecsr,stop_ecsr,start_ecsr_mod,stop_ecsr_mod; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&start_csr); cudaEventCreate(&stop_csr); cudaEventCreate(&start_ecsr); cudaEventCreate(&stop_ecsr); cudaEventCreate(&start_ecsr_mod); cudaEventCreate(&stop_ecsr_mod); float time_csr=0,time_ecsr=0,time_normal=0,time_ecsr_mod=0; // Define CudaError cudaError_t err; //initiallizing result vectors res = (int *)malloc(SIZE*sizeof(int)); res_csr = (int * )malloc(SIZE * sizeof(int)); res_ecsr = (int *)malloc(SIZE * sizeof(int)); res_ecsr_mod = (int *)malloc(SIZE * sizeof(int)); //allocating SpMV printf("Generating Sparse Matrix ..."); M = (int **)malloc(SIZE * sizeof(int *)); for(i=0;i<SIZE;i++) M[i] = (int *)malloc(SIZE * sizeof(int)); for(i=0;i<SIZE;i++) for(j=0;j<SIZE;j++) M[i][j] = 0; int non_zero_ct = (int)(FILL_PERCENT * SIZE/100); // printf("%d\n",non_zero_ct); // int non_zero_ct = 2; for(i=0;i<non_zero_ct;i++){ long long n = (long long)((rand() % 100) * SIZE* SIZE)/100; long c = n % SIZE; long r = (int)(n / SIZE); M[r][c] = (rand() % 100) + 1; } printf("Done\n"); printf("Generating Dense Vector..."); V = (int *)malloc(SIZE * sizeof(int)); for(i=0;i<SIZE;i++) V[i] = (rand() % 100) + 1; printf("Done\n"); //Building CSR and ECSR rep of SpM printf("Building CSR vectors and Distance Difference vector..."); int cct = 0; int prev = 0; ro = (int *)malloc((SIZE + 1)*sizeof(int)); ci = (int *)malloc(non_zero_ct *2* sizeof(int)); val = (int *)malloc(non_zero_ct *2* sizeof(int)); ro[0] = 0; dd = (int *)malloc(non_zero_ct * 2 * sizeof(int)/2); /*for(i=0;i<SIZE;i++) for(j=0;j<SIZE;j++) printf("%d ",M[i][j]); printf("\n");*/ for(i=0;i<SIZE;i++){ int flag = 0; for(j=0;j<SIZE;j++){ //printf("%d ",M[i][j]); if(M[i][j]!=0){ while(j-prev>255){ printf("abc "); ci[cct] = prev + 255; val[cct] = 0; dd[cct] = 255; prev = prev + 255; cct++; } ci[cct] = j; val[cct] = M[i][j]; if(flag==0){ dd[cct] = j; flag++; } else dd[cct] = j - prev; prev = j; cct++; } } //printf("\n"); ro[i+1] = cct; } printf("Done\n"); // for(i=0;i<ro[SIZE];i++){ // printf("%d %d\n",ci[i],val[i]); // } //Setup memory on GPU cudaMalloc((void **)&M_gpu,(SIZE * sizeof(int))*(SIZE)); cudaMalloc((void **)&ro_gpu, (SIZE + 1)*sizeof(int)); cudaMalloc((void **)&ci_gpu, (non_zero_ct * 2 * sizeof(int))); cudaMalloc((void **)&val_gpu, (non_zero_ct * 2 * sizeof(int))); cudaMalloc((void **)&dd_gpu, (non_zero_ct * 2 * sizeof(int))/2); cudaMalloc((void **)&V_gpu, (SIZE * sizeof(int))); cudaMalloc((void **)&res_gpu, (SIZE * sizeof(int))); cudaMalloc((void **)&res_csr_gpu, (SIZE * sizeof(int))); cudaMalloc((void **)&res_ecsr_gpu, (SIZE * sizeof(int))); cudaMalloc((void **)&res_ecsr_mod_gpu, (SIZE * sizeof(int))); //printf("Done cuda malloc\n"); //transfer to device cudaMemcpy(M_gpu, M, (SIZE * SIZE * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(ro_gpu, ro, (SIZE +1)*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(ci_gpu, ci , (non_zero_ct * 2 * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(val_gpu, val, (non_zero_ct * 2 * sizeof(int)),cudaMemcpyHostToDevice); cudaMemcpy(dd_gpu, dd, (non_zero_ct * 2 * sizeof(int)/2), cudaMemcpyHostToDevice); cudaMemcpy(V_gpu, V, (SIZE * sizeof(int)), cudaMemcpyHostToDevice); //printf("Done transferring to device\n"); //setting CUDA parameters int nb = ceil(SIZE/BLOCK_SIZE); int nt = BLOCK_SIZE; // dim3 GridDim,BlockDim; // BlockDim.x = nb; // BlockDim.y=1; // GridDim.x = BLOCK_SIZE; // GridDim.y = BLOCK_SIZE; //Starting Normal Multiplication printf("\n\nStarting Normal Multiplication..."); //clock_t start,end; //start = clock(); cudaEventRecord(start); spmvNormal<<< nb,nt >>>(M_gpu,V_gpu,res_gpu); cudaEventRecord(stop); cudaEventSynchronize(stop); //end = clock(); //time_normal += end - start; cudaEventElapsedTime(&time_normal, start, stop); //Checking for CUDA errors err = cudaGetLastError(); if(err!=cudaSuccess){ printf("ERROR: %s\n",cudaGetErrorString(err)); } printf("Done\n"); //Transfer result back to memory cudaMemcpy(res, res_gpu, (SIZE * sizeof(int)), cudaMemcpyDeviceToHost); //Starting CSR Multiplication printf("\n\nStarting CSR Multiplication..."); // clock_t start_csr,end_csr; // start_csr = clock(); cudaEventRecord(start_csr); spmvCSR<<< nb,nt>>>(ro_gpu,dd_gpu,val_gpu,V_gpu,res_csr_gpu); cudaEventRecord(stop_csr); cudaEventSynchronize(stop_csr); cudaEventElapsedTime(&time_csr,start_csr,stop_csr); //end_csr = clock(); // time_csr += end_csr - start_csr; //Checking for CUDA errors err = cudaGetLastError(); if(err!=cudaSuccess){ printf("ERROR: %s\n",cudaGetErrorString(err)); exit(0); } printf("Done\n"); //Transfer result back to memory cudaMemcpy(res_csr, res_csr_gpu, (SIZE * sizeof(int)), cudaMemcpyDeviceToHost); //Starting ECSR Multiplication printf("\n\nStarting ECSR Multiplication..."); // clock_t start_ecsr,end_ecsr; // start_ecsr = clock(); cudaEventRecord(start_ecsr); spmvECSR<<< nb,nt>>>(ro_gpu,dd_gpu,val_gpu,V_gpu,res_ecsr_gpu); cudaEventRecord(stop_ecsr); cudaEventSynchronize(stop_ecsr); cudaEventElapsedTime(&time_ecsr, start_ecsr,stop_ecsr); // end_ecsr = clock(); // time_ecsr += end_ecsr - start_ecsr; //Checking for CUDA errors err = cudaGetLastError(); if(err!=cudaSuccess){ printf("ERROR: %s\n",cudaGetErrorString(err)); exit(0); } printf("Done\n"); //Transfer result back to memory cudaMemcpy(res_ecsr, res_ecsr_gpu, (SIZE * sizeof(int)), cudaMemcpyDeviceToHost); printf("\n\nStarting ECSR(modified) Multiplication..."); // clock_t start_ecsr,end_ecsr; // start_ecsr = clock(); cudaEventRecord(start_ecsr_mod); spmvECSR_mod<<< nb,nt>>>(ro_gpu,dd_gpu,val_gpu,V_gpu,res_ecsr_mod_gpu); cudaEventRecord(stop_ecsr_mod); cudaEventSynchronize(stop_ecsr_mod); cudaEventElapsedTime(&time_ecsr_mod, start_ecsr_mod,stop_ecsr_mod); // end_ecsr = clock(); // time_ecsr += end_ecsr - start_ecsr; //Checking for CUDA errors err = cudaGetLastError(); if(err!=cudaSuccess){ printf("ERROR: %s\n",cudaGetErrorString(err)); exit(0); } printf("Done\n\n"); //Transfer result back to memory cudaMemcpy(res_ecsr_mod, res_ecsr_mod_gpu, (SIZE * sizeof(int)), cudaMemcpyDeviceToHost); //free memory for(i=0;i<SIZE;i++) free(M[i]); free(M); free(V); free(ro); free(ci); free(val); free(dd); free(res); free(res_csr); free(res_ecsr); cudaFree(ro_gpu); cudaFree(ci_gpu); cudaFree(val_gpu); cudaFree(dd_gpu); cudaFree(M_gpu); cudaFree(V_gpu); cudaFree(res_gpu); cudaFree(res_csr_gpu); cudaFree(res_ecsr_gpu); printf("===============================================================================\n"); printf("Average time taken for normal multiplication:%lf\n",time_normal); printf("---------------------------------------------------------------\n"); printf("Average time taken for CSR multiplication:%lf\n",time_csr); printf("CSR multiplication runs %lf times faster than normal multiplication\n",time_normal/time_csr); printf("---------------------------------------------------------------\n"); printf("Average time taken for ECSR multiplication:%lf\n",time_ecsr); printf("ECSR multiplication runs %lf times faster than normal multiplication\n",time_normal/time_ecsr); printf("---------------------------------------------------------------\n"); printf("Average time taken for ECSR(modified) multiplication:%lf\n",time_ecsr_mod); printf("ECSR(modfied) multiplication runs %lf times faster than normal multiplication\n",time_normal/time_ecsr_mod); printf("---------------------------------------------------------------\n"); printf("It is seen that time taken for ECSR multiplication is %lf times that for ECSR(modified) multiplication\n",time_ecsr/time_ecsr_mod); return 0; }
#include <hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> #include<time.h> # define FILL_PERCENT 10 # define SIZE 250 # define BLOCK_SIZE 32 __global__ void spmvNormal( int *M, int *V, int *res){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int temp,i; if (idx < SIZE) { temp = 0; //dot product for one row for (i = 0; i < SIZE; i++){ temp += M[idx * SIZE + i] * V[i]; } res[idx] = temp; } } __global__ void spmvCSR(int *ro, int *ci, int *val, int *V, int *res_csr){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i; int start, end; int dot; if(idx < SIZE){ dot = 0; start = ro[idx]; end = ro[idx + 1]; for(i = start; i < end; i++){ dot+= val[i] * V[ci[i]]; } } res_csr[idx] = dot; } __global__ void spmvECSR(int *ro, int *dd,int *val, int *V, int* res_ecsr){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int start, end; int dot; if(idx < SIZE){ start = ro[idx]; end = ro[idx + 1]; j=0; for(i = 0;i<=start;i++) j += dd[i]; dot = val[start] * V[j]; for(i = start+1; i < end; i++){ dot += val[i] * V[j+dd[i]]; } } res_ecsr[idx] = dot; } __global__ void spmvECSR_mod(int *ro, int *dd, int *val, int *V, int *res_ecsr){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int i,j; int start, end; int dot; if(idx < SIZE){ start = ro[idx]; end = ro[idx + 1]; j = dd[start]; dot = val[start] * V[j]; for(i = start+1; i < end; i++){ dot+= val[i] * V[j+dd[i]]; } } res_ecsr[idx] = dot; } int **M,*V; int *ro,*ci,*val,*dd; int *ro_gpu,*ci_gpu,*val_gpu,*dd_gpu,*V_gpu,*M_gpu; int *res_csr,*res_ecsr,*res,*res_ecsr_mod; int *res_csr_gpu, *res_ecsr_gpu,*res_gpu,*res_ecsr_mod_gpu; int main(){ //cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); //variable declarations int i,j; hipEvent_t start,stop,start_csr,stop_csr,start_ecsr,stop_ecsr,start_ecsr_mod,stop_ecsr_mod; hipEventCreate(&start); hipEventCreate(&stop); hipEventCreate(&start_csr); hipEventCreate(&stop_csr); hipEventCreate(&start_ecsr); hipEventCreate(&stop_ecsr); hipEventCreate(&start_ecsr_mod); hipEventCreate(&stop_ecsr_mod); float time_csr=0,time_ecsr=0,time_normal=0,time_ecsr_mod=0; // Define CudaError hipError_t err; //initiallizing result vectors res = (int *)malloc(SIZE*sizeof(int)); res_csr = (int * )malloc(SIZE * sizeof(int)); res_ecsr = (int *)malloc(SIZE * sizeof(int)); res_ecsr_mod = (int *)malloc(SIZE * sizeof(int)); //allocating SpMV printf("Generating Sparse Matrix ..."); M = (int **)malloc(SIZE * sizeof(int *)); for(i=0;i<SIZE;i++) M[i] = (int *)malloc(SIZE * sizeof(int)); for(i=0;i<SIZE;i++) for(j=0;j<SIZE;j++) M[i][j] = 0; int non_zero_ct = (int)(FILL_PERCENT * SIZE/100); // printf("%d\n",non_zero_ct); // int non_zero_ct = 2; for(i=0;i<non_zero_ct;i++){ long long n = (long long)((rand() % 100) * SIZE* SIZE)/100; long c = n % SIZE; long r = (int)(n / SIZE); M[r][c] = (rand() % 100) + 1; } printf("Done\n"); printf("Generating Dense Vector..."); V = (int *)malloc(SIZE * sizeof(int)); for(i=0;i<SIZE;i++) V[i] = (rand() % 100) + 1; printf("Done\n"); //Building CSR and ECSR rep of SpM printf("Building CSR vectors and Distance Difference vector..."); int cct = 0; int prev = 0; ro = (int *)malloc((SIZE + 1)*sizeof(int)); ci = (int *)malloc(non_zero_ct *2* sizeof(int)); val = (int *)malloc(non_zero_ct *2* sizeof(int)); ro[0] = 0; dd = (int *)malloc(non_zero_ct * 2 * sizeof(int)/2); /*for(i=0;i<SIZE;i++) for(j=0;j<SIZE;j++) printf("%d ",M[i][j]); printf("\n");*/ for(i=0;i<SIZE;i++){ int flag = 0; for(j=0;j<SIZE;j++){ //printf("%d ",M[i][j]); if(M[i][j]!=0){ while(j-prev>255){ printf("abc "); ci[cct] = prev + 255; val[cct] = 0; dd[cct] = 255; prev = prev + 255; cct++; } ci[cct] = j; val[cct] = M[i][j]; if(flag==0){ dd[cct] = j; flag++; } else dd[cct] = j - prev; prev = j; cct++; } } //printf("\n"); ro[i+1] = cct; } printf("Done\n"); // for(i=0;i<ro[SIZE];i++){ // printf("%d %d\n",ci[i],val[i]); // } //Setup memory on GPU hipMalloc((void **)&M_gpu,(SIZE * sizeof(int))*(SIZE)); hipMalloc((void **)&ro_gpu, (SIZE + 1)*sizeof(int)); hipMalloc((void **)&ci_gpu, (non_zero_ct * 2 * sizeof(int))); hipMalloc((void **)&val_gpu, (non_zero_ct * 2 * sizeof(int))); hipMalloc((void **)&dd_gpu, (non_zero_ct * 2 * sizeof(int))/2); hipMalloc((void **)&V_gpu, (SIZE * sizeof(int))); hipMalloc((void **)&res_gpu, (SIZE * sizeof(int))); hipMalloc((void **)&res_csr_gpu, (SIZE * sizeof(int))); hipMalloc((void **)&res_ecsr_gpu, (SIZE * sizeof(int))); hipMalloc((void **)&res_ecsr_mod_gpu, (SIZE * sizeof(int))); //printf("Done cuda malloc\n"); //transfer to device hipMemcpy(M_gpu, M, (SIZE * SIZE * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(ro_gpu, ro, (SIZE +1)*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(ci_gpu, ci , (non_zero_ct * 2 * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(val_gpu, val, (non_zero_ct * 2 * sizeof(int)),hipMemcpyHostToDevice); hipMemcpy(dd_gpu, dd, (non_zero_ct * 2 * sizeof(int)/2), hipMemcpyHostToDevice); hipMemcpy(V_gpu, V, (SIZE * sizeof(int)), hipMemcpyHostToDevice); //printf("Done transferring to device\n"); //setting CUDA parameters int nb = ceil(SIZE/BLOCK_SIZE); int nt = BLOCK_SIZE; // dim3 GridDim,BlockDim; // BlockDim.x = nb; // BlockDim.y=1; // GridDim.x = BLOCK_SIZE; // GridDim.y = BLOCK_SIZE; //Starting Normal Multiplication printf("\n\nStarting Normal Multiplication..."); //clock_t start,end; //start = clock(); hipEventRecord(start); spmvNormal<<< nb,nt >>>(M_gpu,V_gpu,res_gpu); hipEventRecord(stop); hipEventSynchronize(stop); //end = clock(); //time_normal += end - start; hipEventElapsedTime(&time_normal, start, stop); //Checking for CUDA errors err = hipGetLastError(); if(err!=hipSuccess){ printf("ERROR: %s\n",hipGetErrorString(err)); } printf("Done\n"); //Transfer result back to memory hipMemcpy(res, res_gpu, (SIZE * sizeof(int)), hipMemcpyDeviceToHost); //Starting CSR Multiplication printf("\n\nStarting CSR Multiplication..."); // clock_t start_csr,end_csr; // start_csr = clock(); hipEventRecord(start_csr); spmvCSR<<< nb,nt>>>(ro_gpu,dd_gpu,val_gpu,V_gpu,res_csr_gpu); hipEventRecord(stop_csr); hipEventSynchronize(stop_csr); hipEventElapsedTime(&time_csr,start_csr,stop_csr); //end_csr = clock(); // time_csr += end_csr - start_csr; //Checking for CUDA errors err = hipGetLastError(); if(err!=hipSuccess){ printf("ERROR: %s\n",hipGetErrorString(err)); exit(0); } printf("Done\n"); //Transfer result back to memory hipMemcpy(res_csr, res_csr_gpu, (SIZE * sizeof(int)), hipMemcpyDeviceToHost); //Starting ECSR Multiplication printf("\n\nStarting ECSR Multiplication..."); // clock_t start_ecsr,end_ecsr; // start_ecsr = clock(); hipEventRecord(start_ecsr); spmvECSR<<< nb,nt>>>(ro_gpu,dd_gpu,val_gpu,V_gpu,res_ecsr_gpu); hipEventRecord(stop_ecsr); hipEventSynchronize(stop_ecsr); hipEventElapsedTime(&time_ecsr, start_ecsr,stop_ecsr); // end_ecsr = clock(); // time_ecsr += end_ecsr - start_ecsr; //Checking for CUDA errors err = hipGetLastError(); if(err!=hipSuccess){ printf("ERROR: %s\n",hipGetErrorString(err)); exit(0); } printf("Done\n"); //Transfer result back to memory hipMemcpy(res_ecsr, res_ecsr_gpu, (SIZE * sizeof(int)), hipMemcpyDeviceToHost); printf("\n\nStarting ECSR(modified) Multiplication..."); // clock_t start_ecsr,end_ecsr; // start_ecsr = clock(); hipEventRecord(start_ecsr_mod); spmvECSR_mod<<< nb,nt>>>(ro_gpu,dd_gpu,val_gpu,V_gpu,res_ecsr_mod_gpu); hipEventRecord(stop_ecsr_mod); hipEventSynchronize(stop_ecsr_mod); hipEventElapsedTime(&time_ecsr_mod, start_ecsr_mod,stop_ecsr_mod); // end_ecsr = clock(); // time_ecsr += end_ecsr - start_ecsr; //Checking for CUDA errors err = hipGetLastError(); if(err!=hipSuccess){ printf("ERROR: %s\n",hipGetErrorString(err)); exit(0); } printf("Done\n\n"); //Transfer result back to memory hipMemcpy(res_ecsr_mod, res_ecsr_mod_gpu, (SIZE * sizeof(int)), hipMemcpyDeviceToHost); //free memory for(i=0;i<SIZE;i++) free(M[i]); free(M); free(V); free(ro); free(ci); free(val); free(dd); free(res); free(res_csr); free(res_ecsr); hipFree(ro_gpu); hipFree(ci_gpu); hipFree(val_gpu); hipFree(dd_gpu); hipFree(M_gpu); hipFree(V_gpu); hipFree(res_gpu); hipFree(res_csr_gpu); hipFree(res_ecsr_gpu); printf("===============================================================================\n"); printf("Average time taken for normal multiplication:%lf\n",time_normal); printf("---------------------------------------------------------------\n"); printf("Average time taken for CSR multiplication:%lf\n",time_csr); printf("CSR multiplication runs %lf times faster than normal multiplication\n",time_normal/time_csr); printf("---------------------------------------------------------------\n"); printf("Average time taken for ECSR multiplication:%lf\n",time_ecsr); printf("ECSR multiplication runs %lf times faster than normal multiplication\n",time_normal/time_ecsr); printf("---------------------------------------------------------------\n"); printf("Average time taken for ECSR(modified) multiplication:%lf\n",time_ecsr_mod); printf("ECSR(modfied) multiplication runs %lf times faster than normal multiplication\n",time_normal/time_ecsr_mod); printf("---------------------------------------------------------------\n"); printf("It is seen that time taken for ECSR multiplication is %lf times that for ECSR(modified) multiplication\n",time_ecsr/time_ecsr_mod); return 0; }
Convert the following CUDA code to AMD GPU code: cuda //pass //--blockDim=10 --gridDim=64 --no-inline #include "cuda.h" __global__ void foo() { __shared__ int A[10][10]; A[threadIdx.y][threadIdx.x] = 2; }
//pass //--blockDim=10 --gridDim=64 --no-inline #include "hip/hip_runtime.h" __global__ void foo() { __shared__ int A[10][10]; A[threadIdx.y][threadIdx.x] = 2; }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void applyLinearFunction(int size, const short *x, short *y, short a, short b) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { y[ix] = a + b * x[ix]; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void applyLinearFunction(int size, const short *x, short *y, short a, short b) { const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x; if (ix < size) { y[ix] = a + b * x[ix]; } }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void generateGaussian_kernel(float* og, float delta, int radius) { int x = threadIdx.x - radius; og[threadIdx.x] = __expf(-(x * x) / (2 * delta * delta)); }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void generateGaussian_kernel(float* og, float delta, int radius) { int x = threadIdx.x - radius; og[threadIdx.x] = __expf(-(x * x) / (2 * delta * delta)); }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> #include <cstdlib> #include <cassert> #include <cmath> #include <fstream> #include <sstream> #include "training_reader.cuh" using namespace std; //-----------------------Training Class to load training data------------------- // class TrainingData // { // public: // __host__ TrainingData(const string filename); // __host__ ~TrainingData(void); // __host__ bool isEof(void) { return m_trainingDataFile.eof(); } // // // Returns the number of input values read from the file: // __host__ void getNextInputs(double *inputVals); // __host__ void getTargetOutputs(double *targetOutputVals); // // private: // ifstream m_trainingDataFile; // }; __host__ TrainingData::TrainingData(const string filename) { m_trainingDataFile.open(filename.c_str()); } __host__ TrainingData::~TrainingData() { m_trainingDataFile.close(); } __host__ void TrainingData::getNextInputs(double *inputVals) { int index = 0; string line; getline(m_trainingDataFile, line); stringstream ss(line); string label; ss>> label; if (label.compare("in:") == 0) { double oneValue; while (ss >> oneValue) { inputVals[index] = oneValue; ++index; } } } __host__ void TrainingData::getTargetOutputs(double *targetOutputVals) { int index = 0; string line; getline(m_trainingDataFile, line); stringstream ss(line); string label; ss>> label; if (label.compare("out:") == 0) { double oneValue; while (ss >> oneValue) { targetOutputVals[index] = oneValue; ++index; } } }
#include <hip/hip_runtime.h> #include <iostream> #include <cstdlib> #include <cassert> #include <cmath> #include <fstream> #include <sstream> using namespace std; //-----------------------Training Class to load training data------------------- class TrainingData { public: __host__ TrainingData(const string filename); __host__ ~TrainingData(void); __host__ bool isEof(void) { return m_trainingDataFile.eof(); } // Returns the number of input values read from the file: __host__ void getNextInputs(double *inputVals); __host__ void getTargetOutputs(double *targetOutputVals); private: ifstream m_trainingDataFile; };
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __device__ float sigmoid(float x) { return 1.0f / (1 + __expf(-x)); } __global__ void matrixMultiplyUpdateWeights_sigmoid(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns, float learning_rate) { //@@ Insert code to implement matrix multiplication here __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = C[Row*numCColumns+Col] - learning_rate * (Pvalue / numAColumns); }
#include <hip/hip_runtime.h> #include "includes.h" __device__ float sigmoid(float x) { return 1.0f / (1 + __expf(-x)); } __global__ void matrixMultiplyUpdateWeights_sigmoid(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns, float learning_rate) { //@@ Insert code to implement matrix multiplication here __shared__ float ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ float ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = C[Row*numCColumns+Col] - learning_rate * (Pvalue / numAColumns); }
Convert the following CUDA code to AMD GPU code: cuda extern "C" __global__ void calc_entropy_atomic(float *float_image_in, float *entropy_out, int blk_size) { //calculate entropy of a block through a single thread __shared__ float sum; if (threadIdx.x == 0 && threadIdx.y == 0) { sum = 0.0; } __syncthreads(); int blocksize = blk_size*blk_size; //vertical offset to get to beginning of own block int v_offset_to_blkrow = gridDim.x*blockDim.x*blockDim.y*blockIdx.y; int v_offset_to_pixrow = blockDim.x*gridDim.x*threadIdx.y; int h_offset = blockDim.x*blockIdx.x + threadIdx.x; int idx = v_offset_to_blkrow + v_offset_to_pixrow + h_offset; //idx of top left corner of the block int out_idx = blockIdx.y*gridDim.x + blockIdx.x; //normalize image float_image_in[idx] = float_image_in[idx] * float_image_in[idx] / (blocksize); atomicAdd(&sum, float_image_in[idx]); __syncthreads(); __shared__ float entropy; if (threadIdx.x == 0 && threadIdx.y == 0) { entropy = 0.0; } __syncthreads(); float_image_in[idx] = float_image_in[idx] / sum; //shannon entropy atomicAdd(&entropy, -float_image_in[idx] * log2(float_image_in[idx])); __syncthreads(); //printf("%f\n", sum2); if (threadIdx.x == 0 && threadIdx.y == 0) { entropy_out[out_idx] = entropy; } } extern "C" __global__ void thread_dct_h(float *float_image_in, float *coefficients, float *float_image_out, int blk_size) { //dct on rows //summation using Kahan algorithm, very important! if (blk_size>32){ blk_size = 32; } float sum = 0.0; float c = 0.0; for (int i = 0; i<blk_size; i++) { //printf("executing %d th task",i); //printf("param1 = %d,param2 = %d,param3 = %d \n",gridDim.x*blockIdx.y*blockDim.x*blockDim.y+threadIdx.y*gridDim.x*blockDim.x+threadIdx.x+blockIdx.x*blockDim.x,threadIdx.y*blk_size+i,gridDim.x*blockIdx.y*blockDim.x*blockDim.y+threadIdx.y*gridDim.x*blockDim.x+threadIdx.x+blockIdx.x*blockDim.x+i); float temp = coefficients[threadIdx.y*blk_size + i] * float_image_in[gridDim.x*blockIdx.y*blockDim.x*blockDim.y + threadIdx.y*gridDim.x*blockDim.x + threadIdx.x + blockIdx.x*blockDim.x + i] - c; float t = sum + temp; c = (t - sum) - temp; sum = t; } float_image_out[gridDim.x*blockIdx.y*blockDim.x*blockDim.y + threadIdx.y*gridDim.x*blockDim.x + threadIdx.x + blockIdx.x*blockDim.x] = sum; } extern "C" __global__ void thread_dct_v(float *float_image_in, float *coefficients, float *float_image_out, int blk_size) { // dct on columns //summation using Kahan algorithm, very important! if (blk_size>32){ blk_size = 32; } float sum = 0.0; float c = 0.0; for (int i = 0; i<blk_size; i++) { //printf("executing %d th task",i); //printf("param1 = %d,param2 = %d,param3 = %d \n",gridDim.x*blockIdx.y*blockDim.x*blockDim.y+threadIdx.y*gridDim.x*blockDim.x+threadIdx.x+blockIdx.x*blockDim.x,threadIdx.y*blk_size+i,gridDim.x*blockIdx.y*blockDim.x*blockDim.y+threadIdx.y*gridDim.x*blockDim.x+threadIdx.x+blockIdx.x*blockDim.x+i); float temp = coefficients[threadIdx.y*blk_size + i] * float_image_in[gridDim.x*blockIdx.y*blockDim.x*blockDim.y + i*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x] - c; float t = sum + temp; c = (t - sum) - temp; sum = t; } float_image_out[gridDim.x*blockIdx.y*blockDim.x*blockDim.y + threadIdx.y*gridDim.x*blockDim.x + threadIdx.x + blockIdx.x*blockDim.x] = sum; }
#include <hip/hip_runtime.h> extern "C" __global__ void calc_entropy_atomic(float *float_image_in, float *entropy_out, int blk_size) { //calculate entropy of a block through a single thread __shared__ float sum; if (threadIdx.x == 0 && threadIdx.y == 0) { sum = 0.0; } __syncthreads(); int blocksize = blk_size*blk_size; //vertical offset to get to beginning of own block int v_offset_to_blkrow = gridDim.x*blockDim.x*blockDim.y*blockIdx.y; int v_offset_to_pixrow = blockDim.x*gridDim.x*threadIdx.y; int h_offset = blockDim.x*blockIdx.x + threadIdx.x; int idx = v_offset_to_blkrow + v_offset_to_pixrow + h_offset; //idx of top left corner of the block int out_idx = blockIdx.y*gridDim.x + blockIdx.x; //normalize image float_image_in[idx] = float_image_in[idx] * float_image_in[idx] / (blocksize); atomicAdd(&sum, float_image_in[idx]); __syncthreads(); __shared__ float entropy; if (threadIdx.x == 0 && threadIdx.y == 0) { entropy = 0.0; } __syncthreads(); float_image_in[idx] = float_image_in[idx] / sum; //shannon entropy atomicAdd(&entropy, -float_image_in[idx] * log2(float_image_in[idx])); __syncthreads(); //printf("%f\n", sum2); if (threadIdx.x == 0 && threadIdx.y == 0) { entropy_out[out_idx] = entropy; } } extern "C" __global__ void thread_dct_h(float *float_image_in, float *coefficients, float *float_image_out, int blk_size) { //dct on rows //summation using Kahan algorithm, very important! if (blk_size>32){ blk_size = 32; } float sum = 0.0; float c = 0.0; for (int i = 0; i<blk_size; i++) { //printf("executing %d th task",i); //printf("param1 = %d,param2 = %d,param3 = %d \n",gridDim.x*blockIdx.y*blockDim.x*blockDim.y+threadIdx.y*gridDim.x*blockDim.x+threadIdx.x+blockIdx.x*blockDim.x,threadIdx.y*blk_size+i,gridDim.x*blockIdx.y*blockDim.x*blockDim.y+threadIdx.y*gridDim.x*blockDim.x+threadIdx.x+blockIdx.x*blockDim.x+i); float temp = coefficients[threadIdx.y*blk_size + i] * float_image_in[gridDim.x*blockIdx.y*blockDim.x*blockDim.y + threadIdx.y*gridDim.x*blockDim.x + threadIdx.x + blockIdx.x*blockDim.x + i] - c; float t = sum + temp; c = (t - sum) - temp; sum = t; } float_image_out[gridDim.x*blockIdx.y*blockDim.x*blockDim.y + threadIdx.y*gridDim.x*blockDim.x + threadIdx.x + blockIdx.x*blockDim.x] = sum; } extern "C" __global__ void thread_dct_v(float *float_image_in, float *coefficients, float *float_image_out, int blk_size) { // dct on columns //summation using Kahan algorithm, very important! if (blk_size>32){ blk_size = 32; } float sum = 0.0; float c = 0.0; for (int i = 0; i<blk_size; i++) { //printf("executing %d th task",i); //printf("param1 = %d,param2 = %d,param3 = %d \n",gridDim.x*blockIdx.y*blockDim.x*blockDim.y+threadIdx.y*gridDim.x*blockDim.x+threadIdx.x+blockIdx.x*blockDim.x,threadIdx.y*blk_size+i,gridDim.x*blockIdx.y*blockDim.x*blockDim.y+threadIdx.y*gridDim.x*blockDim.x+threadIdx.x+blockIdx.x*blockDim.x+i); float temp = coefficients[threadIdx.y*blk_size + i] * float_image_in[gridDim.x*blockIdx.y*blockDim.x*blockDim.y + i*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x] - c; float t = sum + temp; c = (t - sum) - temp; sum = t; } float_image_out[gridDim.x*blockIdx.y*blockDim.x*blockDim.y + threadIdx.y*gridDim.x*blockDim.x + threadIdx.x + blockIdx.x*blockDim.x] = sum; }
Convert the following CUDA code to AMD GPU code: cuda __global__ void dummyKernel(){ unsigned int count = 0; for(unsigned int i = 0; i < 1000; i++){ count += i; } } void dummyKernelWrapper(){ dummyKernel<<<1,1>>>(); }
#include <hip/hip_runtime.h> __global__ void dummyKernel(){ unsigned int count = 0; for(unsigned int i = 0; i < 1000; i++){ count += i; } } void dummyKernelWrapper(){ dummyKernel<<<1,1>>>(); }
Convert the following CUDA code to AMD GPU code: cuda #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cmath> #include <time.h> #include <iostream> #define DTYPE float __global__ void kernel(float *a, float *x, float* buff,int Xblocks,int size,bool comp,int toreduce) { int i=threadIdx.x+blockIdx.x*blockDim.x; int j=threadIdx.y+blockIdx.y*blockDim.y; __shared__ float xSM[1024]; if(comp==true){ xSM[threadIdx.x+threadIdx.y*blockDim.x]=a[i+j*size]*x[i]; __syncthreads(); } else{ xSM[threadIdx.x+threadIdx.y*blockDim.x]=buff[i+j*Xblocks]; } if(i<toreduce){ for(int k=blockDim.x/2;k>0;k/=2){ if(threadIdx.x<k&&i+k<toreduce){ xSM[threadIdx.x+threadIdx.y*blockDim.x]+=xSM[threadIdx.x+k+threadIdx.y*blockDim.x]; __syncthreads(); } } } if(threadIdx.x==0){ } buff[blockIdx.x+j*Xblocks]=xSM[threadIdx.y*blockDim.x]; } void fillA(DTYPE *a, int size) { for (int i=0;i<size*size;i++) a[i]=1.0; } //X mit Werten füllen void fillX(DTYPE *x, int size) { for (int i=0;i<size;i++){ x[i]=1; // x[i]= (DTYPE)(i+1); } } int main(int argc, char**argv) { int sx=32; int sy=32; int i=1; bool standard=true; if (argc>1) { standard=false; sx=atoi(argv[1]); if (argc>2){ sy=atoi(argv[2]); if(argc>3){ i=atoi(argv[3]); } } } if(standard){ std::cout<<"Do experiment with standard settings"<<std::endl; std::cout<<"Sx="<<sx<<"\nSy="<<sy<<"\nSize=1024*"<<i<<std::endl; }else { std::cout<<"Do experiment with individual settings"<<std::endl; std::cout<<"Sx="<<sx<<"\nSy="<<sy<<"\nSize=1024*"<<i<<std::endl; } /*if(sx*sy!=t){ std::cout<<"Sx*Sy has to be equal to threads per block"<<std::endl; return -1; }*/ int size=1024*i; int xblocks=size/sx; //Datenfelder anlegen für Host DTYPE *a_host, *buff_host,*x_host; //und Device DTYPE *a_dev, *buff_dev,*x_dev; //Events für die Zeitmessung cudaEvent_t start,end; //Zeiten: //htd: Host->Device Memcpy von A und x //dth: Device->Host Memcpy von y //kernelA, kernelAT float kernelA_time=0.0; //TODO: Host Speicher anlegen und A und x füllen a_host = (DTYPE*)malloc(size*size*sizeof(DTYPE)); x_host = (DTYPE*)malloc(size*sizeof(DTYPE)); buff_host=(DTYPE*)malloc(xblocks*size*sizeof(DTYPE)); fillA(a_host,size); fillX(x_host,size); //TODO: CUDA Events erstellen //TODO: CUDA Speicher anlegen für alle Arrays (a_dev,x_dev,y_dev) cudaMalloc((void**)&a_dev,size*size*sizeof(DTYPE)); cudaMalloc((void**)&x_dev,size*sizeof(DTYPE)); cudaMalloc((void**)&buff_dev,xblocks*size*sizeof(DTYPE)); cudaMemcpy(a_dev,a_host,size*size*sizeof(DTYPE),cudaMemcpyHostToDevice); cudaMemcpy(x_dev,x_host,size*sizeof(DTYPE),cudaMemcpyHostToDevice); dim3 block(sx,sy); dim3 grid(size/block.x,size/block.y); int toreduce=size;//Anzahl der noch zu reduzierenden Wert, im ersten Kernel=size, da noch nichts reduziert wurde und man somit im ersten Berechnungsschritt size viel Skalare erhällt bool doComputation=true; //cache Konfiguration if(argc>4){ if(atoi(argv[4])==1){//L1 Prefered std::cout<<"16 kB shared, 48kB L1"<<std::endl; cudaFuncSetCacheConfig(kernel, cudaFuncCachePreferL1); } else if(atoi(argv[4])==2){ std::cout<<"48kB shared, 16kb L1"<<std::endl; cudaFuncSetCacheConfig(kernel, cudaFuncCachePreferShared); }else{ std::cout<<"32kB shared, 32kB L1"<<std::endl; cudaFuncSetCacheConfig(kernel, cudaFuncCachePreferNone); } } //Start Zeitmessung, da dies nur der erste Aufruf ist und danach der Kernel nochmal aufgerufen wird, mit einem anderen Argument als du Computation, endet die Zeitmessung erst nach der while Schleife, inder der Kernel noch einmal aufgerufen wird. cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start,0); kernel<<<grid,block>>>(a_dev,x_dev,buff_dev,xblocks,size,doComputation,toreduce);//,y_dev,size); //Überprüfung des Zwischenergebnisses /*cudaMemcpy(buff_host,buff_dev,xblocks*size*sizeof(DTYPE),cudaMemcpyDeviceToHost); for(int lj=0;lj<10;lj++){ for(int li=0;li<10;li++) std::cout<<buff_host[li+xblocks*lj]<<" ; "; std::cout<<std::endl; }*/ //Vorbereitung für weitere Durchführung doComputation=false; //Matrix Vektor Multiplikation musst nur einmal durchgeführt werden if(toreduce%sx==0) toreduce/=sx; else toreduce=toreduce/sx+1; while(toreduce>1){//toreduce=1 bedeut, dass auf einen Wert reduziert wurde und somit keine weitere Reduzierung notwendig ist int xgrid; int ygrid=size/sy; if(toreduce%sx==0) xgrid=toreduce/sx; else xgrid=toreduce/sx+1; dim3 itgrid(xgrid,ygrid); kernel<<<itgrid,block>>>(a_dev,x_dev,buff_dev,xblocks,size,doComputation,toreduce);//,y_dev,size); if(toreduce%sx==0) toreduce/=sx; else toreduce=toreduce/sx+1; } cudaEventRecord(end,0); cudaEventSynchronize(end); cudaEventElapsedTime(&kernelA_time,start,end); if(argc>4){ cudaMemcpy(buff_host,buff_dev,xblocks*size*sizeof(DTYPE),cudaMemcpyDeviceToHost); std::cout<<"Das Endergebnis ist"<<std::endl; for(int lj=0;lj<10;lj++){ //for(int li=0;li<10;li++) // std::cout<<buff_host[li+xblocks*lj]<<" ; "; // std::cout<<std::endl; std::cout<<buff_host[xblocks*lj]<<std::endl; } } std::cout<<"Computation time: "<<kernelA_time<<std::endl; //Bestimmung der Performance //float time_s=kernelA_time/1000; //float gflops=pow(10,-9)*size*size*2/time_s; float gflops=pow(10,-6)*size*size*2/kernelA_time; std::cout<<"Computation Performance in GFLOPs: "<<gflops<<std::endl; return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cmath> #include <time.h> #include <iostream> #define DTYPE float __global__ void kernel(float *a, float *x, float* buff,int Xblocks,int size,bool comp,int toreduce) { int i=threadIdx.x+blockIdx.x*blockDim.x; int j=threadIdx.y+blockIdx.y*blockDim.y; __shared__ float xSM[1024]; if(comp==true){ xSM[threadIdx.x+threadIdx.y*blockDim.x]=a[i+j*size]*x[i]; __syncthreads(); } else{ xSM[threadIdx.x+threadIdx.y*blockDim.x]=buff[i+j*Xblocks]; } if(i<toreduce){ for(int k=blockDim.x/2;k>0;k/=2){ if(threadIdx.x<k&&i+k<toreduce){ xSM[threadIdx.x+threadIdx.y*blockDim.x]+=xSM[threadIdx.x+k+threadIdx.y*blockDim.x]; __syncthreads(); } } } if(threadIdx.x==0){ } buff[blockIdx.x+j*Xblocks]=xSM[threadIdx.y*blockDim.x]; } void fillA(DTYPE *a, int size) { for (int i=0;i<size*size;i++) a[i]=1.0; } //X mit Werten füllen void fillX(DTYPE *x, int size) { for (int i=0;i<size;i++){ x[i]=1; // x[i]= (DTYPE)(i+1); } } int main(int argc, char**argv) { int sx=32; int sy=32; int i=1; bool standard=true; if (argc>1) { standard=false; sx=atoi(argv[1]); if (argc>2){ sy=atoi(argv[2]); if(argc>3){ i=atoi(argv[3]); } } } if(standard){ std::cout<<"Do experiment with standard settings"<<std::endl; std::cout<<"Sx="<<sx<<"\nSy="<<sy<<"\nSize=1024*"<<i<<std::endl; }else { std::cout<<"Do experiment with individual settings"<<std::endl; std::cout<<"Sx="<<sx<<"\nSy="<<sy<<"\nSize=1024*"<<i<<std::endl; } /*if(sx*sy!=t){ std::cout<<"Sx*Sy has to be equal to threads per block"<<std::endl; return -1; }*/ int size=1024*i; int xblocks=size/sx; //Datenfelder anlegen für Host DTYPE *a_host, *buff_host,*x_host; //und Device DTYPE *a_dev, *buff_dev,*x_dev; //Events für die Zeitmessung hipEvent_t start,end; //Zeiten: //htd: Host->Device Memcpy von A und x //dth: Device->Host Memcpy von y //kernelA, kernelAT float kernelA_time=0.0; //TODO: Host Speicher anlegen und A und x füllen a_host = (DTYPE*)malloc(size*size*sizeof(DTYPE)); x_host = (DTYPE*)malloc(size*sizeof(DTYPE)); buff_host=(DTYPE*)malloc(xblocks*size*sizeof(DTYPE)); fillA(a_host,size); fillX(x_host,size); //TODO: CUDA Events erstellen //TODO: CUDA Speicher anlegen für alle Arrays (a_dev,x_dev,y_dev) hipMalloc((void**)&a_dev,size*size*sizeof(DTYPE)); hipMalloc((void**)&x_dev,size*sizeof(DTYPE)); hipMalloc((void**)&buff_dev,xblocks*size*sizeof(DTYPE)); hipMemcpy(a_dev,a_host,size*size*sizeof(DTYPE),hipMemcpyHostToDevice); hipMemcpy(x_dev,x_host,size*sizeof(DTYPE),hipMemcpyHostToDevice); dim3 block(sx,sy); dim3 grid(size/block.x,size/block.y); int toreduce=size;//Anzahl der noch zu reduzierenden Wert, im ersten Kernel=size, da noch nichts reduziert wurde und man somit im ersten Berechnungsschritt size viel Skalare erhällt bool doComputation=true; //cache Konfiguration if(argc>4){ if(atoi(argv[4])==1){//L1 Prefered std::cout<<"16 kB shared, 48kB L1"<<std::endl; hipFuncSetCacheConfig(reinterpret_cast<const void*>(kernel), hipFuncCachePreferL1); } else if(atoi(argv[4])==2){ std::cout<<"48kB shared, 16kb L1"<<std::endl; hipFuncSetCacheConfig(reinterpret_cast<const void*>(kernel), hipFuncCachePreferShared); }else{ std::cout<<"32kB shared, 32kB L1"<<std::endl; hipFuncSetCacheConfig(reinterpret_cast<const void*>(kernel), hipFuncCachePreferNone); } } //Start Zeitmessung, da dies nur der erste Aufruf ist und danach der Kernel nochmal aufgerufen wird, mit einem anderen Argument als du Computation, endet die Zeitmessung erst nach der while Schleife, inder der Kernel noch einmal aufgerufen wird. hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start,0); kernel<<<grid,block>>>(a_dev,x_dev,buff_dev,xblocks,size,doComputation,toreduce);//,y_dev,size); //Überprüfung des Zwischenergebnisses /*cudaMemcpy(buff_host,buff_dev,xblocks*size*sizeof(DTYPE),cudaMemcpyDeviceToHost); for(int lj=0;lj<10;lj++){ for(int li=0;li<10;li++) std::cout<<buff_host[li+xblocks*lj]<<" ; "; std::cout<<std::endl; }*/ //Vorbereitung für weitere Durchführung doComputation=false; //Matrix Vektor Multiplikation musst nur einmal durchgeführt werden if(toreduce%sx==0) toreduce/=sx; else toreduce=toreduce/sx+1; while(toreduce>1){//toreduce=1 bedeut, dass auf einen Wert reduziert wurde und somit keine weitere Reduzierung notwendig ist int xgrid; int ygrid=size/sy; if(toreduce%sx==0) xgrid=toreduce/sx; else xgrid=toreduce/sx+1; dim3 itgrid(xgrid,ygrid); kernel<<<itgrid,block>>>(a_dev,x_dev,buff_dev,xblocks,size,doComputation,toreduce);//,y_dev,size); if(toreduce%sx==0) toreduce/=sx; else toreduce=toreduce/sx+1; } hipEventRecord(end,0); hipEventSynchronize(end); hipEventElapsedTime(&kernelA_time,start,end); if(argc>4){ hipMemcpy(buff_host,buff_dev,xblocks*size*sizeof(DTYPE),hipMemcpyDeviceToHost); std::cout<<"Das Endergebnis ist"<<std::endl; for(int lj=0;lj<10;lj++){ //for(int li=0;li<10;li++) // std::cout<<buff_host[li+xblocks*lj]<<" ; "; // std::cout<<std::endl; std::cout<<buff_host[xblocks*lj]<<std::endl; } } std::cout<<"Computation time: "<<kernelA_time<<std::endl; //Bestimmung der Performance //float time_s=kernelA_time/1000; //float gflops=pow(10,-9)*size*size*2/time_s; float gflops=pow(10,-6)*size*size*2/kernelA_time; std::cout<<"Computation Performance in GFLOPs: "<<gflops<<std::endl; return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> #include <cstdlib> #include <ctime> #include "cuda_runtime.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> using namespace std; __global__ void action(int *array1, int* array2, int* array_res) { int i = blockIdx.x * blockDim.x + threadIdx.x; switch(i % 3) { case 0: array_res[i] = array1[i] + 1; break; case 1: array_res[i] = array2[i] - 1; break; case 2: array_res[i] = array1[i] * array2[i]; break; } } ////////////////// struct is_first { int p = -1; __device__ __host__ bool operator() (int x) { return x ; } }; template<typename T> struct first_func { __device__ __host__ T operator ()( const T& x ) const { return x + 1; } }; template<typename T> struct second_func { __device__ __host__ T operator ()( const T& x ) const { return x - 1; } }; template<typename T> struct third_func { __device__ __host__ T operator ()( const T& x ) const { return x + 1; } }; ////////////////// void array_print(int* array, int array_len, const char* message) { cout << " " << message << ":\n [ "; for (int i = 0; i < array_len; i++) { cout << array[i] << " "; } cout << "]" << endl; } int main(int argc, char** argv) { int array_len, start, stop; //Obtaining command line arguments switch (argc) { case 1: array_len = 15; cout << " #Warning# Default array size: " << array_len << endl; start = 0; cout << " #Warning# Default random start: " << start << endl; stop = 100; cout << " #Warning# Default random stop: " << stop << endl; cout << endl; break; case 2: array_len = atoi(argv[1]); start = 0; cout << " #Warning# Default random start: " << start << endl; stop = 100; cout << " #Warning# Default random stop: " << stop << endl; cout << endl; break; case 4: array_len = atoi(argv[1]); start = atoi(argv[2]); stop = atoi(argv[3]); cout << endl; break; default: cout << "Wrong input!" << endl; } //Prepairing variables int *array1 = new int[array_len]; int *array2 = new int[array_len]; int *array_res = new int[array_len]; int * d_array1, *d_array2, *d_array_res; // thrust::device_vector<int> t_array1(array_len); // thrust::device_vector<int> t_array2(array_len); // thrust::device_vector<int> t_array_res(array_len); cudaMalloc((void**)&d_array1, sizeof(int)*array_len); cudaMalloc((void**)&d_array2, sizeof(int)*array_len); cudaMalloc((void**)&d_array_res, sizeof(int)*array_len); //Randomizing array srand(time(NULL)); for (int i = 0; i < array_len; i++) { array1[i] = start + rand() % stop; //t_array1[i] = array1[i]; array2[i] = start + rand() % stop; //t_array2[i] = array2[i]; } array_print(array1, array_len, "Array1"); array_print(array2, array_len, "Array2"); //Some copies cudaMemcpy(d_array1, array1, sizeof(int) * array_len, cudaMemcpyHostToDevice); cudaMemcpy(d_array2, array2, sizeof(int) * array_len, cudaMemcpyHostToDevice); action<<<array_len, 1>>>(d_array1, d_array2, d_array_res); cudaDeviceSynchronize(); //thrust::transform_if(thrust::device, t_array1.begin(), t_array1.end(), // t_array2.begin(), first_func<int>(), is_first()); // for(int i = 0; i < array_len; i++) // { // cout << t_array2[i] << " "; // } // cout << endl; //Some copies cudaMemcpy(array_res, d_array_res, sizeof(int) * array_len, cudaMemcpyDeviceToHost); array_print(array_res, array_len, "Res array"); delete[] array1, array2, array_res; cudaFree(d_array1); cudaFree(d_array2); cudaFree(d_array_res); cudaDeviceReset(); return 0; }
#include <iostream> #include <cstdlib> #include <ctime> #include "hip/hip_runtime.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> using namespace std; __global__ void action(int *array1, int* array2, int* array_res) { int i = blockIdx.x * blockDim.x + threadIdx.x; switch(i % 3) { case 0: array_res[i] = array1[i] + 1; break; case 1: array_res[i] = array2[i] - 1; break; case 2: array_res[i] = array1[i] * array2[i]; break; } } ////////////////// struct is_first { int p = -1; __device__ __host__ bool operator() (int x) { return x ; } }; template<typename T> struct first_func { __device__ __host__ T operator ()( const T& x ) const { return x + 1; } }; template<typename T> struct second_func { __device__ __host__ T operator ()( const T& x ) const { return x - 1; } }; template<typename T> struct third_func { __device__ __host__ T operator ()( const T& x ) const { return x + 1; } }; ////////////////// void array_print(int* array, int array_len, const char* message) { cout << " " << message << ":\n [ "; for (int i = 0; i < array_len; i++) { cout << array[i] << " "; } cout << "]" << endl; } int main(int argc, char** argv) { int array_len, start, stop; //Obtaining command line arguments switch (argc) { case 1: array_len = 15; cout << " #Warning# Default array size: " << array_len << endl; start = 0; cout << " #Warning# Default random start: " << start << endl; stop = 100; cout << " #Warning# Default random stop: " << stop << endl; cout << endl; break; case 2: array_len = atoi(argv[1]); start = 0; cout << " #Warning# Default random start: " << start << endl; stop = 100; cout << " #Warning# Default random stop: " << stop << endl; cout << endl; break; case 4: array_len = atoi(argv[1]); start = atoi(argv[2]); stop = atoi(argv[3]); cout << endl; break; default: cout << "Wrong input!" << endl; } //Prepairing variables int *array1 = new int[array_len]; int *array2 = new int[array_len]; int *array_res = new int[array_len]; int * d_array1, *d_array2, *d_array_res; // thrust::device_vector<int> t_array1(array_len); // thrust::device_vector<int> t_array2(array_len); // thrust::device_vector<int> t_array_res(array_len); hipMalloc((void**)&d_array1, sizeof(int)*array_len); hipMalloc((void**)&d_array2, sizeof(int)*array_len); hipMalloc((void**)&d_array_res, sizeof(int)*array_len); //Randomizing array srand(time(NULL)); for (int i = 0; i < array_len; i++) { array1[i] = start + rand() % stop; //t_array1[i] = array1[i]; array2[i] = start + rand() % stop; //t_array2[i] = array2[i]; } array_print(array1, array_len, "Array1"); array_print(array2, array_len, "Array2"); //Some copies hipMemcpy(d_array1, array1, sizeof(int) * array_len, hipMemcpyHostToDevice); hipMemcpy(d_array2, array2, sizeof(int) * array_len, hipMemcpyHostToDevice); action<<<array_len, 1>>>(d_array1, d_array2, d_array_res); hipDeviceSynchronize(); //thrust::transform_if(thrust::device, t_array1.begin(), t_array1.end(), // t_array2.begin(), first_func<int>(), is_first()); // for(int i = 0; i < array_len; i++) // { // cout << t_array2[i] << " "; // } // cout << endl; //Some copies hipMemcpy(array_res, d_array_res, sizeof(int) * array_len, hipMemcpyDeviceToHost); array_print(array_res, array_len, "Res array"); delete[] array1, array2, array_res; hipFree(d_array1); hipFree(d_array2); hipFree(d_array_res); hipDeviceReset(); return 0; }
Convert the following CUDA code to AMD GPU code: cuda //#include "shallow.h" #include <iostream> #include <time.h> #include <stdlib.h> #define RELU(a) ((a > 0) ? a : 0) #define KERNEL_SIZE(n_C) ((n_C * 2 > 8) ? 8 : n_C * 2) struct layer_param { int M; //Ilosc tablic podawanych na wejscie int pad; //Grubosc warstwy zer na krawedziach (zero-padding) int F; //Rozmiar 2D filtra (F x F) int F_MP; //Rozmiar 2D filtra do max poolingu (F_MP x F_MP) int stride; //Ilosc przeskakiwanych pikseli przy konwolucji na inkrementacje int stride_MP; //To samo, tylko przy max poolingu int n_Hprev; //Wysokosc tablicy wejsciowej podawanej na wejscie sieci int n_Wprev; //Szerokosc tablicy wejsciowej podawanej na wejscie sieci int n_Cprev; //Glebokosc tablicy wejsciowej, jednoczesnie musi to byc glebokosc filtra (F x F x C) int n_H; //Wysokosc tablicy uzyskanej po konwolucji kernela z wejsciem int n_W; int n_C; //Ilosc filtrow, jednoczesnie glebokosc wyjscia warstwy int n_Hout; //Wysokosc tablicy wyjsciowej warstwy int n_Wout; double alpha; //Predkosc uczenia }; struct cache_data { double** IN; //Tablica wejsciowa double** Z; //Wynik splotu double** A; //Wynik Aktywacji double** OUT; //Poprzedni wynik po max poolingu, jednoczescie wyjscie warstwy sieci double** kernel; //Filtr double** dW; //Gradient kosztu wzgledem kerneli double** dA; //Gradient kosztu wzgledem warstwy double** dAprev; //Gradient kosztu wzgledem wyjscia warstwy n_l - 1 double** dZ; //Gradient kosztu wzgledem wyniku konwolucji }; void set_random_IN(layer_param, double**&); //Ustawia losowe wejscie (do testowania) void set_new_IN(double**&, double**&, layer_param l); void show_results(layer_param, cache_data&); //Wyswietla zawartosc koncowych i posrednich wynikow w warstwie void brief_inf(layer_param, double**); //Krotka informacja o wyjsciu sieci void forw_prop(layer_param, cache_data&); //Najwazniejsza funkcja (konwolucja, aktywacja, maxpooling) void simple_del(double**&, int); //Usuwanie pamieci void update_param(layer_param&, layer_param&); //Ustawianie nowych parametrow warstwy void prep_new_arrays(layer_param, cache_data&); //Tworzenie nowych tablic wynikowych void prep_gradients(layer_param, cache_data&); //Tworzenie gradientow (narazie losowo, bez funkcji kosztu) void show_gradients(layer_param, cache_data&); void back_prop(layer_param, cache_data&); int main() { srand(time(NULL)); int number_of_layers = 2; layer_param* l = new layer_param[number_of_layers]; cache_data* cache = new cache_data[number_of_layers]; int n_l = 0; layer_param l_prev; l[n_l].M = 1; l[n_l].pad = 0; l[n_l].F = 3; l[n_l].F_MP = 2; l[n_l].stride = 1; l[n_l].stride_MP = 2; l[n_l].alpha = 0.1; int IN_size = 16; //Rzeczywisty rozmiar wejscia int IN_depth = 1; //Rzeczywista glebokosc wejscia l[n_l].n_Hprev = IN_size + 2 * l[n_l].pad; l[n_l].n_Wprev = IN_size + 2 * l[n_l].pad; l[n_l].n_Cprev = IN_depth; l[n_l].n_H = int((l[n_l].n_Hprev - l[n_l].F) / l[n_l].stride) + 1; l[n_l].n_W = int((l[n_l].n_Wprev - l[n_l].F) / l[n_l].stride) + 1; l[n_l].n_C = 1; l[n_l].n_Hout = int((l[n_l].n_H - l[n_l].F_MP) / l[n_l].stride_MP) + 1; l[n_l].n_Wout = int((l[n_l].n_W - l[n_l].F_MP) / l[n_l].stride_MP) + 1; for (n_l = 0; n_l < number_of_layers; n_l++) { std::cout << "\n\n#### WARSTWA: " << n_l + 1 << "#### \n"; if (n_l == 0) set_random_IN(l[n_l], cache[n_l].IN); else { l_prev = l[n_l - 1]; update_param(l_prev, l[n_l]); set_new_IN(cache[n_l].IN, cache[n_l - 1].OUT, l[n_l]); } prep_new_arrays(l[n_l], cache[n_l]); forw_prop(l[n_l], cache[n_l]); prep_gradients(l[n_l], cache[n_l]); if (l[n_l].n_H < 25) show_results(l[n_l], cache[n_l]); else brief_inf(l[n_l], cache[n_l].OUT); //back_prop(l[n_l], cache[n_l]); //show_gradients(l[n_l], cache[n_l]); //Funkcja wyswietla gradient } return 0; } void set_random_IN(layer_param l, double**& IN) { IN = new double* [l.M]; for (int i = 0; i < l.M; i++) { IN[i] = new double[l.n_Cprev * l.n_Hprev * l.n_Wprev]; } for (int m = 0; m < l.M; m++) { //Dla kazdego badanego przypadku (np. pojedynczej mapy bajtowej- spektogram) for (int h = 0; h < l.n_Hprev; h++) { //Przejdz po kazdym wierszu for (int w = 0; w < l.n_Wprev; w++) { //Przejdz po kazdej kolumnie for (int c = 0; c < l.n_Cprev; c++) { //Przejdz po kazdym kanale (np. dla wejscia w postaci zdjecia rgb - 3 kanaly) if (h < l.pad || h > l.n_Hprev - l.pad - 1) IN[m][w + l.n_Wprev * (h + l.n_Hprev * c)] = 0; //Ustawianie zer dla zero paddingu else if (w < l.pad || w > l.n_Wprev - l.pad - 1) IN[m][w + l.n_Wprev * (h + l.n_Hprev * c)] = 0; else IN[m][w + l.n_Wprev * (h + l.n_Hprev * c)] = (rand() % 10 + 1)/10.; //W tablicy wejsciowej beda same wartosci int } } } } } void set_new_IN(double**& IN, double**& OUT, layer_param l) { IN = new double* [l.M]; for (int i = 0; i < l.M; i++) { IN[i] = new double[l.n_Cprev * l.n_Hprev * l.n_Wprev]; } for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_Cprev; c++) { for (int h = 0; h < l.n_Hprev; h++) { for (int w = 0; w < l.n_Wprev; w++) { IN[m][w + l.n_Wprev * (h + l.n_Hprev * c)] = OUT[m][w + l.n_Wprev * (h + l.n_Hprev * c)]; } } } } } void update_param(layer_param& l_prev, layer_param& l) { l.M = l_prev.M; l.pad = l_prev.pad; l.F = l_prev.F; l.F_MP = l_prev.F_MP; l.stride = l_prev.stride; l.stride_MP = l_prev.stride_MP; l.alpha = l_prev.alpha; l.n_Hprev = l_prev.n_Hout; l.n_Wprev = l_prev.n_Wout; l.n_Cprev = l_prev.n_C; l.n_H = int((l.n_Hprev - l.F) / l.stride) + 1; l.n_W = int((l.n_Wprev - l.F) / l.stride) + 1; l.n_C = KERNEL_SIZE(l_prev.n_C); l.n_Hout = int((l.n_H - l.F_MP) / l.stride_MP) + 1; l.n_Wout = int((l.n_W - l.F_MP) / l.stride_MP) + 1; } void prep_new_arrays(layer_param l, cache_data& cache) { cache.Z = new double* [l.M]; cache.A = new double* [l.M]; cache.OUT = new double* [l.M]; cache.kernel = new double* [l.n_C]; for (int i = 0; i < l.M; i++) { cache.Z[i] = new double[l.n_C * l.n_H * l.n_W]; cache.A[i] = new double[l.n_C * l.n_H * l.n_W]; cache.OUT[i] = new double[l.n_C * l.n_Hout * l.n_Wout]; } for (int i = 0; i < l.n_C; i++) { cache.kernel[i] = new double[l.n_Cprev * l.F * l.F]; } for (int c = 0; c < l.n_C; c++) { for (int h = 0; h < l.F; h++) { for (int w = 0; w < l.F; w++) { for (int d = 0; d < l.n_Cprev; d++) { cache.kernel[c][w + l.F * (h + l.F * d)] = (rand()%10000 - 5000) * 0.0001; //Ustawianie losowych wag filtra } } } } } void prep_gradients(layer_param l, cache_data& cache) { cache.dZ = new double* [l.M]; cache.dA = new double* [l.M]; cache.dAprev = new double* [l.M]; cache.dW = new double* [l.n_C]; for (int i = 0; i < l.M; i++) { cache.dZ[i] = new double[l.n_C * l.n_H * l.n_W]; } for (int i = 0; i < l.M; i++) { cache.dA[i] = new double[l.n_C * l.n_Hout * l.n_Wout]; } for (int i = 0; i < l.M; i++) { cache.dAprev[i] = new double[l.n_Cprev * l.n_Hprev * l.n_Wprev]; } for (int i = 0; i < l.n_C; i++) { cache.dW[i] = new double[l.n_Cprev * l.F * l.F]; } for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_C; c++) { for (int h = 0; h < l.n_Hout; h++) { for (int w = 0; w < l.n_Wout; w++) { cache.dA[m][w + l.n_Wout * (h + l.n_Hout * c)] = (rand()%10000 - 5000) * 0.0001; } } } } for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_Cprev; c++) { for (int h = 0; h < l.n_Hprev; h++) { for (int w = 0; w < l.n_Wprev; w++) { cache.dAprev[m][w + l.n_Wprev * (h + l.n_Hprev * c)] = 0; } } } } for (int c = 0; c < l.n_C; c++) { for (int h = 0; h < l.F; h++) { for (int w = 0; w < l.F; w++) { for (int d = 0; d < l.n_Cprev; d++) { cache.dW[c][w + l.F * (h + l.F * d)] = 0; } } } } double maximum; int vert_start, vert_end; int horiz_start, horiz_end; for (int m = 0; m < l.M; m++) { //Dla kazdego przypadku for (int h = 0; h < l.n_Hout; h++) { //Dla kazdego wiersza wyjscia (wyniku max poolingu) for (int w = 0; w < l.n_Wout; w++) { // Dla kazdej kolumny wyjscia for (int c = 0; c < l.n_C; c++) { //Dla kazdego kanalu wyjscia vert_start = h * l.stride_MP; vert_end = vert_start + l.F_MP; horiz_start = w * l.stride_MP; horiz_end = horiz_start + l.F_MP; maximum = 0; for (int j = vert_start; j < vert_end; j++) { //Dla kazdego wiersza wycinka wyniku aktywacji for (int k = horiz_start; k < horiz_end; k++) { //Dla kazdej kolumny wycinka wyniku aktywacji if (cache.A[m][k + l.n_W * (j + l.n_H * c)] > maximum) maximum = cache.A[m][k + l.n_W * (j + l.n_H * c)]; } } for (int j = vert_start; j < vert_end; j++) { for (int k = horiz_start; k < horiz_end; k++) { if (cache.A[m][k + l.n_W * (j + l.n_H * c)] != maximum || maximum == 0) cache.dZ[m][k + l.n_W * (j + l.n_H * c)] = 0; else cache.dZ[m][k + l.n_W * (j + l.n_H * c)] = cache.dA[m][w + l.n_Wout * (h + l.n_Hout * c)]; } } } } } } for (int m = 0; m < l.M; m++) { for (int h = 0; h < l.n_H; h++) { for (int w = 0; w < l.n_W; w++) { for (int c = 0; c < l.n_C; c++) { vert_start = h; vert_end = vert_start + l.F; horiz_start = w; horiz_end = horiz_start + l.F; for (int d = 0; d < l.n_Cprev; d++) { for (int j = vert_start; j < vert_end; j++) { for (int k = horiz_start; k < horiz_end; k++) { if (cache.dZ[m][w + l.n_W * (h + l.n_H * c)] < 0) cache.dZ[m][w + l.n_W * (h + l.n_H * c)] = 0; cache.dAprev[m][j + l.n_Wprev * (k + l.n_Hprev * d)] += cache.kernel[c][(k - horiz_start) + l.F * ((j - vert_start) + l.F * d)] * cache.dZ[m][w + l.n_W * (h + l.n_H * c)]; cache.dW[c][(k - horiz_start) + l.F * ((j - vert_start) + l.F * d)] += cache.IN[m][j + l.n_Wprev * (k + l.n_Hprev * d)] * cache.dZ[m][w + l.n_W * (h + l.n_H * c)]; } } } } } } } } void brief_inf(layer_param l, double** OUT) { for (int m = 0; m < l.M; m++) std::cout << "Wyjscie: " << m + 1 << " Kanaly: " << l.n_C << " (" << l.n_Hout << "x" << l.n_Wout << "x" << l.n_C << ")" << "\n" << std::fixed; } void forw_prop(layer_param l, cache_data& cache) { int M = l.M; int pad = l.pad; int F = l.F; int F_MP = l.F_MP; int stride = l.stride; int stride_MP = l.stride_MP; int n_Hprev = l.n_Hprev; int n_Wprev = l.n_Wprev; int n_Cprev = l.n_Cprev; int n_H = l.n_H; int n_W = l.n_W; int n_C = l.n_C; int n_Hout = l.n_Hout; int n_Wout = l.n_Wout; int vert_start = 0; int vert_end = 0; int horiz_start = 0; int horiz_end = 0; for (int m = 0; m < M; m++) { //Dla kazdego przypadku for (int h = 0; h < n_H; h++) { //Dla kazdego wiersza for (int w = 0; w < n_W; w++) { //Dla kazdej kolumny for (int c = 0; c < n_C; c++) { //Dla kazdego kanalu (kanalow bedzie tyle, ile chcemy kerneli) vert_start = h * stride; //Poczatek wycinka w pionie vert_end = vert_start + F; //Koniec wycinka w pionie horiz_start = w * stride; //Poczatek wycika w poziomie horiz_end = horiz_start + F; //Koniec wycinka w poziomie cache.Z[m][w + n_W * (h + n_H * c)] = 0; for (int d = 0; d < n_Cprev; d++) { //Dla kazdego kanalu w tablicy wejsciowej for (int j = vert_start; j < vert_end; j++) { //Dla wybranych wierszy for (int k = horiz_start; k < horiz_end; k++) { //Dla wybranych kolumn cache.Z[m][w + n_W * (h + n_H * c)] += cache.kernel[c][(k - horiz_start) + F * ((j - vert_start) + F * d)] * cache.IN[m][k + n_Wprev * (j + n_Hprev * d)]; //Pomnoz wartosc/piksel wycinka przez wage kernela i dodaj do wyniku konwolucji } } } cache.A[m][w + n_W * (h + n_H * c)] = RELU(cache.Z[m][w + n_W * (h + n_H * c)]); //Aktywowanie danej wartosci/neuronu } } } } double maximum = 0; for (int m = 0; m < M; m++) { //Dla kazdego przypadku for (int h = 0; h < n_Hout; h++) { //Dla kazdego wiersza wyjscia (wyniku max poolingu) for (int w = 0; w < n_Wout; w++) { // Dla kazdej kolumny wyjscia for (int c = 0; c < n_C; c++) { //Dla kazdego kanalu wyjscia if (n_Hout > 1) { vert_start = h * stride_MP; vert_end = vert_start + F_MP; horiz_start = w * stride_MP; horiz_end = horiz_start + F_MP; maximum = 0; for (int j = vert_start; j < vert_end; j++) { //Dla kazdego wiersza wycinka wyniku aktywacji for (int k = horiz_start; k < horiz_end; k++) { //Dla kazdej kolumny wycinka wyniku aktywacji if (cache.A[m][k + n_W * (j + n_H * c)] > maximum) maximum = cache.A[m][k + n_W * (j + n_H * c)]; //Wybierz maksimum z wycinka } } cache.OUT[m][w + n_Wout * (h + n_Hout * c)] = maximum; } else cache.OUT[m][w + n_Wout * (h + n_Hout * c)] = cache.A[m][0 + n_W * (0 + n_H * c)]; } } } } } void show_gradients(layer_param l, cache_data& cache) { for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_Cprev; c++) { std::cout << "dAprev: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_Hprev << "x" << l.n_Wprev << "x" << l.n_Cprev << ")" << "\n"; for (int h = 0; h < l.n_Hprev; h++) { for (int w = 0; w < l.n_Wprev; w++) { std::cout << cache.dAprev[m][w + l.n_Wprev * (h + l.n_Hprev * c)] << " "; } std::cout << "\n"; } } std::cout << "\n\n"; } std::cout << "#### dW #### \n\n"; for (int c = 0; c < l.n_C; c++) { for (int d = 0; d < l.n_Cprev; d++) { std::cout << "dW: " << c + 1 << " Kanal: " << d + 1 << " (" << l.F << "x" << l.F << "x" << l.n_Cprev << ")" << "\n" << std::fixed; for (int h = 0; h < l.F; h++) { for (int w = 0; w < l.F; w++) { std::cout << cache.dW[c][w + l.F * (h + l.F * d)] << " "; } std::cout << "\n"; } } std::cout << "\n\n"; } std::cout << "#### dZ #### \n\n"; for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_C; c++) { std::cout << "dZ: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_H << "x" << l.n_W << "x" << l.n_C << ")" << "\n" << std::fixed; for (int h = 0; h < l.n_H; h++) { for (int w = 0; w < l.n_W; w++) { std::cout << cache.dZ[m][w + l.n_W * (h + l.n_H * c)] << " "; } std::cout << "\n"; } std::cout << "\n"; } } std::cout << "#### dA #### \n\n"; for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_C; c++) { std::cout << "dA: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_Hout << "x" << l.n_Wout << "x" << l.n_C << ")" << "\n" << std::fixed; for (int h = 0; h < l.n_Hout; h++) { for (int w = 0; w < l.n_Wout; w++) { std::cout << cache.dA[m][w + l.n_Wout * (h + l.n_Hout * c)] << " "; } std::cout << "\n"; } std::cout << "\n"; } } } void simple_del(double**& arr, int len) { for (int i = 0; i < len; i++) { delete[] arr[i]; } delete[] arr; } void show_results(layer_param l, cache_data& cache) { std::cout.precision(4); for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_Cprev; c++) { std::cout << "Wejscie: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_Hprev << "x" << l.n_Wprev << "x" << l.n_Cprev << ")" << "\n"; for (int h = 0; h < l.n_Hprev; h++) { for (int w = 0; w < l.n_Wprev; w++) { std::cout << cache.IN[m][w + l.n_Wprev * (h + l.n_Hprev * c)] << " "; } std::cout << "\n"; } } std::cout << "\n\n"; } std::cout << "#### FILTRY #### \n\n"; for (int c = 0; c < l.n_C; c++) { for (int d = 0; d < l.n_Cprev; d++) { std::cout << "Kernel: " << c + 1 << " Kanal: " << d + 1 << " (" << l.F << "x" << l.F << "x" << l.n_Cprev << ")" << "\n" << std::fixed; for (int h = 0; h < l.F; h++) { for (int w = 0; w < l.F; w++) { std::cout << cache.kernel[c][w + l.F * (h + l.F * d)] << " "; } std::cout << "\n"; } } std::cout << "\n\n"; } std::cout << "#### WYNIKI KONWOLUCJI #### \n\n"; for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_C; c++) { std::cout << "Z: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_H << "x" << l.n_W << "x" << l.n_C << ")" << "\n" << std::fixed; for (int h = 0; h < l.n_H; h++) { for (int w = 0; w < l.n_W; w++) { std::cout << cache.Z[m][w + l.n_W * (h + l.n_H * c)] << " "; } std::cout << "\n"; } std::cout << "\n"; } } std::cout << "#### WYNIKI AKTYWACJI (RELU) #### \n\n"; for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_C; c++) { std::cout << "A: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_H << "x" << l.n_W << "x" << l.n_C << ")" << "\n" << std::fixed; for (int h = 0; h < l.n_H; h++) { for (int w = 0; w < l.n_W; w++) { std::cout << cache.A[m][w + l.n_W * (h + l.n_H * c)] << " "; } std::cout << "\n"; } std::cout << "\n"; } } std::cout << "#### WYNIKI MAX POOLINGU #### \n\n"; for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_C; c++) { std::cout << "Wyjscie: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_Hout << "x" << l.n_Wout << "x" << l.n_C << ")" << "\n" << std::fixed; for (int h = 0; h < l.n_Hout; h++) { for (int w = 0; w < l.n_Wout; w++) { std::cout << cache.OUT[m][w + l.n_Wout * (h + l.n_Hout * c)] << " "; } std::cout << "\n"; } std::cout << "\n"; } } } void back_prop(layer_param l, cache_data& cache) { std::cout << "#### FILTRY PO PO PROPAGACJI WSTECZNEJ #### \n\n"; for (int c = 0; c < l.n_C; c++) { for (int d = 0; d < l.n_Cprev; d++) { std::cout << "Wagi: " << c + 1 << " Kanal: " << d + 1 << " (" << l.F << "x" << l.F << "x" << l.n_Cprev << ")" << "\n" << std::fixed; for (int h = 0; h < l.F; h++) { for (int w = 0; w < l.F; w++) { std::cout << cache.kernel[c][w + l.F * (h + l.F * d)] - l.alpha * cache.dW[c][w + l.F * (h + l.F * d)] << " "; } std::cout << "\n"; } } std::cout << "\n\n"; } }
//#include "shallow.h" #include <hip/hip_runtime.h> #include <iostream> #include <time.h> #include <stdlib.h> #define RELU(a) ((a > 0) ? a : 0) #define KERNEL_SIZE(n_C) ((n_C * 2 > 8) ? 8 : n_C * 2) struct layer_param { int M; //Ilosc tablic podawanych na wejscie int pad; //Grubosc warstwy zer na krawedziach (zero-padding) int F; //Rozmiar 2D filtra (F x F) int F_MP; //Rozmiar 2D filtra do max poolingu (F_MP x F_MP) int stride; //Ilosc przeskakiwanych pikseli przy konwolucji na inkrementacje int stride_MP; //To samo, tylko przy max poolingu int n_Hprev; //Wysokosc tablicy wejsciowej podawanej na wejscie sieci int n_Wprev; //Szerokosc tablicy wejsciowej podawanej na wejscie sieci int n_Cprev; //Glebokosc tablicy wejsciowej, jednoczesnie musi to byc glebokosc filtra (F x F x C) int n_H; //Wysokosc tablicy uzyskanej po konwolucji kernela z wejsciem int n_W; int n_C; //Ilosc filtrow, jednoczesnie glebokosc wyjscia warstwy int n_Hout; //Wysokosc tablicy wyjsciowej warstwy int n_Wout; double alpha; //Predkosc uczenia }; struct cache_data { double** IN; //Tablica wejsciowa double** Z; //Wynik splotu double** A; //Wynik Aktywacji double** OUT; //Poprzedni wynik po max poolingu, jednoczescie wyjscie warstwy sieci double** kernel; //Filtr double** dW; //Gradient kosztu wzgledem kerneli double** dA; //Gradient kosztu wzgledem warstwy double** dAprev; //Gradient kosztu wzgledem wyjscia warstwy n_l - 1 double** dZ; //Gradient kosztu wzgledem wyniku konwolucji }; void set_random_IN(layer_param, double**&); //Ustawia losowe wejscie (do testowania) void set_new_IN(double**&, double**&, layer_param l); void show_results(layer_param, cache_data&); //Wyswietla zawartosc koncowych i posrednich wynikow w warstwie void brief_inf(layer_param, double**); //Krotka informacja o wyjsciu sieci void forw_prop(layer_param, cache_data&); //Najwazniejsza funkcja (konwolucja, aktywacja, maxpooling) void simple_del(double**&, int); //Usuwanie pamieci void update_param(layer_param&, layer_param&); //Ustawianie nowych parametrow warstwy void prep_new_arrays(layer_param, cache_data&); //Tworzenie nowych tablic wynikowych void prep_gradients(layer_param, cache_data&); //Tworzenie gradientow (narazie losowo, bez funkcji kosztu) void show_gradients(layer_param, cache_data&); void back_prop(layer_param, cache_data&); int main() { srand(time(NULL)); int number_of_layers = 2; layer_param* l = new layer_param[number_of_layers]; cache_data* cache = new cache_data[number_of_layers]; int n_l = 0; layer_param l_prev; l[n_l].M = 1; l[n_l].pad = 0; l[n_l].F = 3; l[n_l].F_MP = 2; l[n_l].stride = 1; l[n_l].stride_MP = 2; l[n_l].alpha = 0.1; int IN_size = 16; //Rzeczywisty rozmiar wejscia int IN_depth = 1; //Rzeczywista glebokosc wejscia l[n_l].n_Hprev = IN_size + 2 * l[n_l].pad; l[n_l].n_Wprev = IN_size + 2 * l[n_l].pad; l[n_l].n_Cprev = IN_depth; l[n_l].n_H = int((l[n_l].n_Hprev - l[n_l].F) / l[n_l].stride) + 1; l[n_l].n_W = int((l[n_l].n_Wprev - l[n_l].F) / l[n_l].stride) + 1; l[n_l].n_C = 1; l[n_l].n_Hout = int((l[n_l].n_H - l[n_l].F_MP) / l[n_l].stride_MP) + 1; l[n_l].n_Wout = int((l[n_l].n_W - l[n_l].F_MP) / l[n_l].stride_MP) + 1; for (n_l = 0; n_l < number_of_layers; n_l++) { std::cout << "\n\n#### WARSTWA: " << n_l + 1 << "#### \n"; if (n_l == 0) set_random_IN(l[n_l], cache[n_l].IN); else { l_prev = l[n_l - 1]; update_param(l_prev, l[n_l]); set_new_IN(cache[n_l].IN, cache[n_l - 1].OUT, l[n_l]); } prep_new_arrays(l[n_l], cache[n_l]); forw_prop(l[n_l], cache[n_l]); prep_gradients(l[n_l], cache[n_l]); if (l[n_l].n_H < 25) show_results(l[n_l], cache[n_l]); else brief_inf(l[n_l], cache[n_l].OUT); //back_prop(l[n_l], cache[n_l]); //show_gradients(l[n_l], cache[n_l]); //Funkcja wyswietla gradient } return 0; } void set_random_IN(layer_param l, double**& IN) { IN = new double* [l.M]; for (int i = 0; i < l.M; i++) { IN[i] = new double[l.n_Cprev * l.n_Hprev * l.n_Wprev]; } for (int m = 0; m < l.M; m++) { //Dla kazdego badanego przypadku (np. pojedynczej mapy bajtowej- spektogram) for (int h = 0; h < l.n_Hprev; h++) { //Przejdz po kazdym wierszu for (int w = 0; w < l.n_Wprev; w++) { //Przejdz po kazdej kolumnie for (int c = 0; c < l.n_Cprev; c++) { //Przejdz po kazdym kanale (np. dla wejscia w postaci zdjecia rgb - 3 kanaly) if (h < l.pad || h > l.n_Hprev - l.pad - 1) IN[m][w + l.n_Wprev * (h + l.n_Hprev * c)] = 0; //Ustawianie zer dla zero paddingu else if (w < l.pad || w > l.n_Wprev - l.pad - 1) IN[m][w + l.n_Wprev * (h + l.n_Hprev * c)] = 0; else IN[m][w + l.n_Wprev * (h + l.n_Hprev * c)] = (rand() % 10 + 1)/10.; //W tablicy wejsciowej beda same wartosci int } } } } } void set_new_IN(double**& IN, double**& OUT, layer_param l) { IN = new double* [l.M]; for (int i = 0; i < l.M; i++) { IN[i] = new double[l.n_Cprev * l.n_Hprev * l.n_Wprev]; } for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_Cprev; c++) { for (int h = 0; h < l.n_Hprev; h++) { for (int w = 0; w < l.n_Wprev; w++) { IN[m][w + l.n_Wprev * (h + l.n_Hprev * c)] = OUT[m][w + l.n_Wprev * (h + l.n_Hprev * c)]; } } } } } void update_param(layer_param& l_prev, layer_param& l) { l.M = l_prev.M; l.pad = l_prev.pad; l.F = l_prev.F; l.F_MP = l_prev.F_MP; l.stride = l_prev.stride; l.stride_MP = l_prev.stride_MP; l.alpha = l_prev.alpha; l.n_Hprev = l_prev.n_Hout; l.n_Wprev = l_prev.n_Wout; l.n_Cprev = l_prev.n_C; l.n_H = int((l.n_Hprev - l.F) / l.stride) + 1; l.n_W = int((l.n_Wprev - l.F) / l.stride) + 1; l.n_C = KERNEL_SIZE(l_prev.n_C); l.n_Hout = int((l.n_H - l.F_MP) / l.stride_MP) + 1; l.n_Wout = int((l.n_W - l.F_MP) / l.stride_MP) + 1; } void prep_new_arrays(layer_param l, cache_data& cache) { cache.Z = new double* [l.M]; cache.A = new double* [l.M]; cache.OUT = new double* [l.M]; cache.kernel = new double* [l.n_C]; for (int i = 0; i < l.M; i++) { cache.Z[i] = new double[l.n_C * l.n_H * l.n_W]; cache.A[i] = new double[l.n_C * l.n_H * l.n_W]; cache.OUT[i] = new double[l.n_C * l.n_Hout * l.n_Wout]; } for (int i = 0; i < l.n_C; i++) { cache.kernel[i] = new double[l.n_Cprev * l.F * l.F]; } for (int c = 0; c < l.n_C; c++) { for (int h = 0; h < l.F; h++) { for (int w = 0; w < l.F; w++) { for (int d = 0; d < l.n_Cprev; d++) { cache.kernel[c][w + l.F * (h + l.F * d)] = (rand()%10000 - 5000) * 0.0001; //Ustawianie losowych wag filtra } } } } } void prep_gradients(layer_param l, cache_data& cache) { cache.dZ = new double* [l.M]; cache.dA = new double* [l.M]; cache.dAprev = new double* [l.M]; cache.dW = new double* [l.n_C]; for (int i = 0; i < l.M; i++) { cache.dZ[i] = new double[l.n_C * l.n_H * l.n_W]; } for (int i = 0; i < l.M; i++) { cache.dA[i] = new double[l.n_C * l.n_Hout * l.n_Wout]; } for (int i = 0; i < l.M; i++) { cache.dAprev[i] = new double[l.n_Cprev * l.n_Hprev * l.n_Wprev]; } for (int i = 0; i < l.n_C; i++) { cache.dW[i] = new double[l.n_Cprev * l.F * l.F]; } for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_C; c++) { for (int h = 0; h < l.n_Hout; h++) { for (int w = 0; w < l.n_Wout; w++) { cache.dA[m][w + l.n_Wout * (h + l.n_Hout * c)] = (rand()%10000 - 5000) * 0.0001; } } } } for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_Cprev; c++) { for (int h = 0; h < l.n_Hprev; h++) { for (int w = 0; w < l.n_Wprev; w++) { cache.dAprev[m][w + l.n_Wprev * (h + l.n_Hprev * c)] = 0; } } } } for (int c = 0; c < l.n_C; c++) { for (int h = 0; h < l.F; h++) { for (int w = 0; w < l.F; w++) { for (int d = 0; d < l.n_Cprev; d++) { cache.dW[c][w + l.F * (h + l.F * d)] = 0; } } } } double maximum; int vert_start, vert_end; int horiz_start, horiz_end; for (int m = 0; m < l.M; m++) { //Dla kazdego przypadku for (int h = 0; h < l.n_Hout; h++) { //Dla kazdego wiersza wyjscia (wyniku max poolingu) for (int w = 0; w < l.n_Wout; w++) { // Dla kazdej kolumny wyjscia for (int c = 0; c < l.n_C; c++) { //Dla kazdego kanalu wyjscia vert_start = h * l.stride_MP; vert_end = vert_start + l.F_MP; horiz_start = w * l.stride_MP; horiz_end = horiz_start + l.F_MP; maximum = 0; for (int j = vert_start; j < vert_end; j++) { //Dla kazdego wiersza wycinka wyniku aktywacji for (int k = horiz_start; k < horiz_end; k++) { //Dla kazdej kolumny wycinka wyniku aktywacji if (cache.A[m][k + l.n_W * (j + l.n_H * c)] > maximum) maximum = cache.A[m][k + l.n_W * (j + l.n_H * c)]; } } for (int j = vert_start; j < vert_end; j++) { for (int k = horiz_start; k < horiz_end; k++) { if (cache.A[m][k + l.n_W * (j + l.n_H * c)] != maximum || maximum == 0) cache.dZ[m][k + l.n_W * (j + l.n_H * c)] = 0; else cache.dZ[m][k + l.n_W * (j + l.n_H * c)] = cache.dA[m][w + l.n_Wout * (h + l.n_Hout * c)]; } } } } } } for (int m = 0; m < l.M; m++) { for (int h = 0; h < l.n_H; h++) { for (int w = 0; w < l.n_W; w++) { for (int c = 0; c < l.n_C; c++) { vert_start = h; vert_end = vert_start + l.F; horiz_start = w; horiz_end = horiz_start + l.F; for (int d = 0; d < l.n_Cprev; d++) { for (int j = vert_start; j < vert_end; j++) { for (int k = horiz_start; k < horiz_end; k++) { if (cache.dZ[m][w + l.n_W * (h + l.n_H * c)] < 0) cache.dZ[m][w + l.n_W * (h + l.n_H * c)] = 0; cache.dAprev[m][j + l.n_Wprev * (k + l.n_Hprev * d)] += cache.kernel[c][(k - horiz_start) + l.F * ((j - vert_start) + l.F * d)] * cache.dZ[m][w + l.n_W * (h + l.n_H * c)]; cache.dW[c][(k - horiz_start) + l.F * ((j - vert_start) + l.F * d)] += cache.IN[m][j + l.n_Wprev * (k + l.n_Hprev * d)] * cache.dZ[m][w + l.n_W * (h + l.n_H * c)]; } } } } } } } } void brief_inf(layer_param l, double** OUT) { for (int m = 0; m < l.M; m++) std::cout << "Wyjscie: " << m + 1 << " Kanaly: " << l.n_C << " (" << l.n_Hout << "x" << l.n_Wout << "x" << l.n_C << ")" << "\n" << std::fixed; } void forw_prop(layer_param l, cache_data& cache) { int M = l.M; int pad = l.pad; int F = l.F; int F_MP = l.F_MP; int stride = l.stride; int stride_MP = l.stride_MP; int n_Hprev = l.n_Hprev; int n_Wprev = l.n_Wprev; int n_Cprev = l.n_Cprev; int n_H = l.n_H; int n_W = l.n_W; int n_C = l.n_C; int n_Hout = l.n_Hout; int n_Wout = l.n_Wout; int vert_start = 0; int vert_end = 0; int horiz_start = 0; int horiz_end = 0; for (int m = 0; m < M; m++) { //Dla kazdego przypadku for (int h = 0; h < n_H; h++) { //Dla kazdego wiersza for (int w = 0; w < n_W; w++) { //Dla kazdej kolumny for (int c = 0; c < n_C; c++) { //Dla kazdego kanalu (kanalow bedzie tyle, ile chcemy kerneli) vert_start = h * stride; //Poczatek wycinka w pionie vert_end = vert_start + F; //Koniec wycinka w pionie horiz_start = w * stride; //Poczatek wycika w poziomie horiz_end = horiz_start + F; //Koniec wycinka w poziomie cache.Z[m][w + n_W * (h + n_H * c)] = 0; for (int d = 0; d < n_Cprev; d++) { //Dla kazdego kanalu w tablicy wejsciowej for (int j = vert_start; j < vert_end; j++) { //Dla wybranych wierszy for (int k = horiz_start; k < horiz_end; k++) { //Dla wybranych kolumn cache.Z[m][w + n_W * (h + n_H * c)] += cache.kernel[c][(k - horiz_start) + F * ((j - vert_start) + F * d)] * cache.IN[m][k + n_Wprev * (j + n_Hprev * d)]; //Pomnoz wartosc/piksel wycinka przez wage kernela i dodaj do wyniku konwolucji } } } cache.A[m][w + n_W * (h + n_H * c)] = RELU(cache.Z[m][w + n_W * (h + n_H * c)]); //Aktywowanie danej wartosci/neuronu } } } } double maximum = 0; for (int m = 0; m < M; m++) { //Dla kazdego przypadku for (int h = 0; h < n_Hout; h++) { //Dla kazdego wiersza wyjscia (wyniku max poolingu) for (int w = 0; w < n_Wout; w++) { // Dla kazdej kolumny wyjscia for (int c = 0; c < n_C; c++) { //Dla kazdego kanalu wyjscia if (n_Hout > 1) { vert_start = h * stride_MP; vert_end = vert_start + F_MP; horiz_start = w * stride_MP; horiz_end = horiz_start + F_MP; maximum = 0; for (int j = vert_start; j < vert_end; j++) { //Dla kazdego wiersza wycinka wyniku aktywacji for (int k = horiz_start; k < horiz_end; k++) { //Dla kazdej kolumny wycinka wyniku aktywacji if (cache.A[m][k + n_W * (j + n_H * c)] > maximum) maximum = cache.A[m][k + n_W * (j + n_H * c)]; //Wybierz maksimum z wycinka } } cache.OUT[m][w + n_Wout * (h + n_Hout * c)] = maximum; } else cache.OUT[m][w + n_Wout * (h + n_Hout * c)] = cache.A[m][0 + n_W * (0 + n_H * c)]; } } } } } void show_gradients(layer_param l, cache_data& cache) { for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_Cprev; c++) { std::cout << "dAprev: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_Hprev << "x" << l.n_Wprev << "x" << l.n_Cprev << ")" << "\n"; for (int h = 0; h < l.n_Hprev; h++) { for (int w = 0; w < l.n_Wprev; w++) { std::cout << cache.dAprev[m][w + l.n_Wprev * (h + l.n_Hprev * c)] << " "; } std::cout << "\n"; } } std::cout << "\n\n"; } std::cout << "#### dW #### \n\n"; for (int c = 0; c < l.n_C; c++) { for (int d = 0; d < l.n_Cprev; d++) { std::cout << "dW: " << c + 1 << " Kanal: " << d + 1 << " (" << l.F << "x" << l.F << "x" << l.n_Cprev << ")" << "\n" << std::fixed; for (int h = 0; h < l.F; h++) { for (int w = 0; w < l.F; w++) { std::cout << cache.dW[c][w + l.F * (h + l.F * d)] << " "; } std::cout << "\n"; } } std::cout << "\n\n"; } std::cout << "#### dZ #### \n\n"; for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_C; c++) { std::cout << "dZ: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_H << "x" << l.n_W << "x" << l.n_C << ")" << "\n" << std::fixed; for (int h = 0; h < l.n_H; h++) { for (int w = 0; w < l.n_W; w++) { std::cout << cache.dZ[m][w + l.n_W * (h + l.n_H * c)] << " "; } std::cout << "\n"; } std::cout << "\n"; } } std::cout << "#### dA #### \n\n"; for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_C; c++) { std::cout << "dA: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_Hout << "x" << l.n_Wout << "x" << l.n_C << ")" << "\n" << std::fixed; for (int h = 0; h < l.n_Hout; h++) { for (int w = 0; w < l.n_Wout; w++) { std::cout << cache.dA[m][w + l.n_Wout * (h + l.n_Hout * c)] << " "; } std::cout << "\n"; } std::cout << "\n"; } } } void simple_del(double**& arr, int len) { for (int i = 0; i < len; i++) { delete[] arr[i]; } delete[] arr; } void show_results(layer_param l, cache_data& cache) { std::cout.precision(4); for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_Cprev; c++) { std::cout << "Wejscie: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_Hprev << "x" << l.n_Wprev << "x" << l.n_Cprev << ")" << "\n"; for (int h = 0; h < l.n_Hprev; h++) { for (int w = 0; w < l.n_Wprev; w++) { std::cout << cache.IN[m][w + l.n_Wprev * (h + l.n_Hprev * c)] << " "; } std::cout << "\n"; } } std::cout << "\n\n"; } std::cout << "#### FILTRY #### \n\n"; for (int c = 0; c < l.n_C; c++) { for (int d = 0; d < l.n_Cprev; d++) { std::cout << "Kernel: " << c + 1 << " Kanal: " << d + 1 << " (" << l.F << "x" << l.F << "x" << l.n_Cprev << ")" << "\n" << std::fixed; for (int h = 0; h < l.F; h++) { for (int w = 0; w < l.F; w++) { std::cout << cache.kernel[c][w + l.F * (h + l.F * d)] << " "; } std::cout << "\n"; } } std::cout << "\n\n"; } std::cout << "#### WYNIKI KONWOLUCJI #### \n\n"; for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_C; c++) { std::cout << "Z: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_H << "x" << l.n_W << "x" << l.n_C << ")" << "\n" << std::fixed; for (int h = 0; h < l.n_H; h++) { for (int w = 0; w < l.n_W; w++) { std::cout << cache.Z[m][w + l.n_W * (h + l.n_H * c)] << " "; } std::cout << "\n"; } std::cout << "\n"; } } std::cout << "#### WYNIKI AKTYWACJI (RELU) #### \n\n"; for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_C; c++) { std::cout << "A: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_H << "x" << l.n_W << "x" << l.n_C << ")" << "\n" << std::fixed; for (int h = 0; h < l.n_H; h++) { for (int w = 0; w < l.n_W; w++) { std::cout << cache.A[m][w + l.n_W * (h + l.n_H * c)] << " "; } std::cout << "\n"; } std::cout << "\n"; } } std::cout << "#### WYNIKI MAX POOLINGU #### \n\n"; for (int m = 0; m < l.M; m++) { for (int c = 0; c < l.n_C; c++) { std::cout << "Wyjscie: " << m + 1 << " Kanal: " << c + 1 << " (" << l.n_Hout << "x" << l.n_Wout << "x" << l.n_C << ")" << "\n" << std::fixed; for (int h = 0; h < l.n_Hout; h++) { for (int w = 0; w < l.n_Wout; w++) { std::cout << cache.OUT[m][w + l.n_Wout * (h + l.n_Hout * c)] << " "; } std::cout << "\n"; } std::cout << "\n"; } } } void back_prop(layer_param l, cache_data& cache) { std::cout << "#### FILTRY PO PO PROPAGACJI WSTECZNEJ #### \n\n"; for (int c = 0; c < l.n_C; c++) { for (int d = 0; d < l.n_Cprev; d++) { std::cout << "Wagi: " << c + 1 << " Kanal: " << d + 1 << " (" << l.F << "x" << l.F << "x" << l.n_Cprev << ")" << "\n" << std::fixed; for (int h = 0; h < l.F; h++) { for (int w = 0; w < l.F; w++) { std::cout << cache.kernel[c][w + l.F * (h + l.F * d)] - l.alpha * cache.dW[c][w + l.F * (h + l.F * d)] << " "; } std::cout << "\n"; } } std::cout << "\n\n"; } }
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //Defining number of elements in Array #define arraySize 5 //Defining number of thread per block #define threadPerBlock 5 // 秩排序算法,对于数组中每个元素,统计小于它的元素个数 __global__ void addKernel(int *d_a, int *d_b) { // 当前元素在排序后数组中的位置 int count = 0; // 块中当前线程索引 int tid = threadIdx.x; // 所有块中当前线程唯一索引 int ttid = blockIdx.x * threadPerBlock + tid; // threadPerBlock=blockDim.x // 每个线程的当前元素 int val = d_a[ttid]; // 共享内存,减少全局内存访问 __shared__ int cache[threadPerBlock]; // 循环直到整个数组计算完,每次计算一个块,步长是一个块线程大小 for (int i = tid; i < arraySize; i += threadPerBlock) { // threadPerBlock=blockDim.x // 填充共享内存 cache[tid] = d_a[i]; __syncthreads(); // 在一个块内,和当前val变量比较,统计block内比其小的数量 for (int j = 0; j < threadPerBlock; ++j) if (val > cache[j]) count++; __syncthreads(); } // 当前线程元素val在排序后数组的位置 d_b[count] = val; } int main() { //Defining host arrays int h_a[arraySize] = { 5, 9, 3, 4, 8 }; int h_b[arraySize]; //Defining device pointers int *d_a, *d_b; // allocate the memory cudaMalloc((void**)&d_b, arraySize * sizeof(int)); cudaMalloc((void**)&d_a, arraySize * sizeof(int)); // Copy input vector from host memory to GPU buffers. cudaMemcpy(d_a, h_a, arraySize * sizeof(int), cudaMemcpyHostToDevice); // Launch a kernel on the GPU with one thread for each element. addKernel<<<arraySize/threadPerBlock, threadPerBlock>>>(d_a, d_b); // 设备同步 cudaDeviceSynchronize(); // Copy output vector from GPU buffer to host memory. cudaMemcpy(h_b, d_b, arraySize * sizeof(int), cudaMemcpyDeviceToHost); printf("The Enumeration sorted Array is: \n"); for (int i = 0; i < arraySize; i++) { printf("%d\n", h_b[i]); } cudaFree(d_a); cudaFree(d_b); return 0; }
#include "hip/hip_runtime.h" #include <stdio.h> //Defining number of elements in Array #define arraySize 5 //Defining number of thread per block #define threadPerBlock 5 // 秩排序算法,对于数组中每个元素,统计小于它的元素个数 __global__ void addKernel(int *d_a, int *d_b) { // 当前元素在排序后数组中的位置 int count = 0; // 块中当前线程索引 int tid = threadIdx.x; // 所有块中当前线程唯一索引 int ttid = blockIdx.x * threadPerBlock + tid; // threadPerBlock=blockDim.x // 每个线程的当前元素 int val = d_a[ttid]; // 共享内存,减少全局内存访问 __shared__ int cache[threadPerBlock]; // 循环直到整个数组计算完,每次计算一个块,步长是一个块线程大小 for (int i = tid; i < arraySize; i += threadPerBlock) { // threadPerBlock=blockDim.x // 填充共享内存 cache[tid] = d_a[i]; __syncthreads(); // 在一个块内,和当前val变量比较,统计block内比其小的数量 for (int j = 0; j < threadPerBlock; ++j) if (val > cache[j]) count++; __syncthreads(); } // 当前线程元素val在排序后数组的位置 d_b[count] = val; } int main() { //Defining host arrays int h_a[arraySize] = { 5, 9, 3, 4, 8 }; int h_b[arraySize]; //Defining device pointers int *d_a, *d_b; // allocate the memory hipMalloc((void**)&d_b, arraySize * sizeof(int)); hipMalloc((void**)&d_a, arraySize * sizeof(int)); // Copy input vector from host memory to GPU buffers. hipMemcpy(d_a, h_a, arraySize * sizeof(int), hipMemcpyHostToDevice); // Launch a kernel on the GPU with one thread for each element. addKernel<<<arraySize/threadPerBlock, threadPerBlock>>>(d_a, d_b); // 设备同步 hipDeviceSynchronize(); // Copy output vector from GPU buffer to host memory. hipMemcpy(h_b, d_b, arraySize * sizeof(int), hipMemcpyDeviceToHost); printf("The Enumeration sorted Array is: \n"); for (int i = 0; i < arraySize; i++) { printf("%d\n", h_b[i]); } hipFree(d_a); hipFree(d_b); return 0; }
Convert the following CUDA code to AMD GPU code: cuda /* * * Programa de Introducción a los conceptos de CUDA * Mariana Hernández * Alan Córdova * * */ #include <stdio.h> #include <stdlib.h> /* Declaración de métodos/ /* Utilidad para checar errores de CUDA */ void checkCUDAError(const char*); /* Kernel para sumar dos vectores en un sólo bloque de hilos */ __global__ void vect_add(int *d_a, int *d_b, int *d_c) { /* Part 2B: Implementación del kernel para realizar la suma de los vectores en el GPU */ int i = blockDim.x * blockIdx.x + threadIdx.x; // if (i < N) d_c[i] = d_a[i] + d_b[i]; } /* Versión de múltiples bloques de la suma de vectores */ __global__ void vect_add_multiblock(int *d_a, int *d_b, int *d_c) { /* Part 2C: Implementación del kernel pero esta vez permitiendo múltiples bloques de hilos. */ int i = blockDim.x * blockIdx.x + threadIdx.x; // if (i < N) d_c[i] = d_a[i] + d_b[i]; } /* Numero de elementos en el vector */ #define ARRAY_SIZE 256 /* * Número de bloques e hilos * Su producto siempre debe ser el tamaño del vector (arreglo). */ #define NUM_BLOCKS 1 #define THREADS_PER_BLOCK 256 /* Main routine */ int main(int argc, char *argv[]) { int *a, *b, *c, *d; /* Arreglos del CPU */ int *d_a, *d_b, *d_c, *d_d;/* Arreglos del GPU */ int i; size_t sz = ARRAY_SIZE * sizeof(int); /* * Reservar memoria en el cpu */ a = (int *) malloc(sz); b = (int *) malloc(sz); c = (int *) malloc(sz); d = (int *) malloc(sz); /* * Parte 1A:Reservar memoria en el GPU */ cudaMalloc(&d_a, sz); cudaMalloc(&d_b, sz); cudaMalloc(&d_c, sz); cudaMalloc(&d_d, sz); /* inicialización */ for (i = 0; i < ARRAY_SIZE; i++) { a[i] = i; b[i] = ARRAY_SIZE - i; c[i] = 0; d[i] = 0; } /* Parte 1B: Copiar los vectores del CPU al GPU */ cudaMemcpy(d_a, a, sz, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sz, cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, sz, cudaMemcpyHostToDevice); cudaMemcpy(d_d, d, sz, cudaMemcpyHostToDevice); /* run the kernel on the GPU */ /* Parte 2A: Configurar y llamar los kernels */ /* dim3 dimGrid( ); */ /* dim3 dimBlock( ); */ /* vect_add<<< , >>>( ); */ //invocamos kernel int threadsPerBlock = 64; // ARRAY_SIZE/NUM_BLOCKS int blocksPerGrid = 4; // nuevo NUM_BLOCKS // Para obtener tiempos de ejecucion del kernel cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); vect_add<<< NUM_BLOCKS , THREADS_PER_BLOCK >>> (d_a, d_b, d_c); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Tiempo de vect_add: \t %f \n", milliseconds); cudaEventRecord(start); vect_add_multiblock<<< blocksPerGrid , threadsPerBlock >>> (d_a, d_b, d_d); cudaEventRecord(stop); cudaEventSynchronize(stop); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Tiempo de vect_add_multiblock (4 bloques): \t %f \n", milliseconds); /* Esperar a que todos los threads acaben y checar por errores */ cudaThreadSynchronize(); checkCUDAError("kernel invocation"); /* Part 1C: copiar el resultado de nuevo al CPU */ cudaMemcpy(a, d_a, sz, cudaMemcpyDeviceToHost); cudaMemcpy(b, d_b, sz, cudaMemcpyDeviceToHost); cudaMemcpy(c, d_c, sz, cudaMemcpyDeviceToHost); cudaMemcpy(d, d_d, sz, cudaMemcpyDeviceToHost); checkCUDAError("memcpy"); /* print out the result */ printf("Results: "); for (i = 0; i < ARRAY_SIZE; i++) { printf("%d, ", c[i]); } for (i = 0; i < ARRAY_SIZE; i++) { printf("%d, ", d[i] ); } printf("\n\n"); /* Parte 1D: Liberar los arreglos */ cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_d); free(a); free(b); free(c); free(d); return 0; } /* Utility function to check for and report CUDA errors */ void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
/* * * Programa de Introducción a los conceptos de CUDA * Mariana Hernández * Alan Córdova * * */ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> /* Declaración de métodos/ /* Utilidad para checar errores de CUDA */ void checkCUDAError(const char*); /* Kernel para sumar dos vectores en un sólo bloque de hilos */ __global__ void vect_add(int *d_a, int *d_b, int *d_c) { /* Part 2B: Implementación del kernel para realizar la suma de los vectores en el GPU */ int i = blockDim.x * blockIdx.x + threadIdx.x; // if (i < N) d_c[i] = d_a[i] + d_b[i]; } /* Versión de múltiples bloques de la suma de vectores */ __global__ void vect_add_multiblock(int *d_a, int *d_b, int *d_c) { /* Part 2C: Implementación del kernel pero esta vez permitiendo múltiples bloques de hilos. */ int i = blockDim.x * blockIdx.x + threadIdx.x; // if (i < N) d_c[i] = d_a[i] + d_b[i]; } /* Numero de elementos en el vector */ #define ARRAY_SIZE 256 /* * Número de bloques e hilos * Su producto siempre debe ser el tamaño del vector (arreglo). */ #define NUM_BLOCKS 1 #define THREADS_PER_BLOCK 256 /* Main routine */ int main(int argc, char *argv[]) { int *a, *b, *c, *d; /* Arreglos del CPU */ int *d_a, *d_b, *d_c, *d_d;/* Arreglos del GPU */ int i; size_t sz = ARRAY_SIZE * sizeof(int); /* * Reservar memoria en el cpu */ a = (int *) malloc(sz); b = (int *) malloc(sz); c = (int *) malloc(sz); d = (int *) malloc(sz); /* * Parte 1A:Reservar memoria en el GPU */ hipMalloc(&d_a, sz); hipMalloc(&d_b, sz); hipMalloc(&d_c, sz); hipMalloc(&d_d, sz); /* inicialización */ for (i = 0; i < ARRAY_SIZE; i++) { a[i] = i; b[i] = ARRAY_SIZE - i; c[i] = 0; d[i] = 0; } /* Parte 1B: Copiar los vectores del CPU al GPU */ hipMemcpy(d_a, a, sz, hipMemcpyHostToDevice); hipMemcpy(d_b, b, sz, hipMemcpyHostToDevice); hipMemcpy(d_c, c, sz, hipMemcpyHostToDevice); hipMemcpy(d_d, d, sz, hipMemcpyHostToDevice); /* run the kernel on the GPU */ /* Parte 2A: Configurar y llamar los kernels */ /* dim3 dimGrid( ); */ /* dim3 dimBlock( ); */ /* vect_add<<< , >>>( ); */ //invocamos kernel int threadsPerBlock = 64; // ARRAY_SIZE/NUM_BLOCKS int blocksPerGrid = 4; // nuevo NUM_BLOCKS // Para obtener tiempos de ejecucion del kernel hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); vect_add<<< NUM_BLOCKS , THREADS_PER_BLOCK >>> (d_a, d_b, d_c); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Tiempo de vect_add: \t %f \n", milliseconds); hipEventRecord(start); vect_add_multiblock<<< blocksPerGrid , threadsPerBlock >>> (d_a, d_b, d_d); hipEventRecord(stop); hipEventSynchronize(stop); milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Tiempo de vect_add_multiblock (4 bloques): \t %f \n", milliseconds); /* Esperar a que todos los threads acaben y checar por errores */ hipDeviceSynchronize(); checkCUDAError("kernel invocation"); /* Part 1C: copiar el resultado de nuevo al CPU */ hipMemcpy(a, d_a, sz, hipMemcpyDeviceToHost); hipMemcpy(b, d_b, sz, hipMemcpyDeviceToHost); hipMemcpy(c, d_c, sz, hipMemcpyDeviceToHost); hipMemcpy(d, d_d, sz, hipMemcpyDeviceToHost); checkCUDAError("memcpy"); /* print out the result */ printf("Results: "); for (i = 0; i < ARRAY_SIZE; i++) { printf("%d, ", c[i]); } for (i = 0; i < ARRAY_SIZE; i++) { printf("%d, ", d[i] ); } printf("\n\n"); /* Parte 1D: Liberar los arreglos */ hipFree(d_a); hipFree(d_b); hipFree(d_c); hipFree(d_d); free(a); free(b); free(c); free(d); return 0; } /* Utility function to check for and report CUDA errors */ void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #define WIDTH gridDim.x*blockDim.x #define HEIGHT gridDim.y*blockDim.y #define X ((blockIdx.x * blockDim.x) + threadIdx.x) #define Y ((blockIdx.y * blockDim.y) + threadIdx.y) extern "C" __global__ void computeFloat( int *iters, float4 area, int maxIterations, float sqrEscapeRadius ) { const float cr = area.x + X*area.z; const float ci = area.y + Y*area.w; float zr = 0; float zi = 0; float new_zr = 0.0f; int count = 0; for (; count<maxIterations; count++){ new_zr = (zr * zr - zi * zi) + cr; zi = ((2.0f * zr) * zi) + ci; zr = new_zr; if ( (zr * zr + zi * zi) >= sqrEscapeRadius ) { break; } } const int tIndex = X + Y * WIDTH; iters[tIndex] = count; } int main() { int tWidth = 5000; int tHeight = 5000; int tMaxIter = 1000; float4 tArea = {-1.5f,-1.0f, 2.0f/tWidth, 2.0f/tHeight}; int *tIters = (int*)malloc(tWidth*tHeight*sizeof(int)); int *cuIters; cudaMalloc((void **)&cuIters, sizeof(int)*tWidth*tHeight); cudaMemcpy(cuIters, tIters ,tHeight*tWidth*sizeof(int) ,cudaMemcpyHostToDevice); int tThreadsX = 8; int tThreadsY = 4; dim3 blockSize = dim3( tThreadsX,tThreadsY); dim3 gridSize = dim3( tWidth/tThreadsX, tHeight/tThreadsY ); computeFloat<<<gridSize, blockSize>>>( cuIters, tArea, tMaxIter, 4); cudaDeviceSynchronize(); cudaMemcpy(tIters, cuIters ,tHeight*tWidth*sizeof(int) ,cudaMemcpyDeviceToHost); // 160 chars width and 80 chars height for ( int y=0;y<tHeight; y+=tHeight/80) { for ( int x=0; x<tWidth; x+=tWidth/160) { if ( tIters[x+y*tWidth]==tMaxIter) { printf("X"); } else { printf("."); } } printf("\n"); } }
#include <hip/hip_runtime.h> #include <stdio.h> #define WIDTH gridDim.x*blockDim.x #define HEIGHT gridDim.y*blockDim.y #define X ((blockIdx.x * blockDim.x) + threadIdx.x) #define Y ((blockIdx.y * blockDim.y) + threadIdx.y) extern "C" __global__ void computeFloat( int *iters, float4 area, int maxIterations, float sqrEscapeRadius ) { const float cr = area.x + X*area.z; const float ci = area.y + Y*area.w; float zr = 0; float zi = 0; float new_zr = 0.0f; int count = 0; for (; count<maxIterations; count++){ new_zr = (zr * zr - zi * zi) + cr; zi = ((2.0f * zr) * zi) + ci; zr = new_zr; if ( (zr * zr + zi * zi) >= sqrEscapeRadius ) { break; } } const int tIndex = X + Y * WIDTH; iters[tIndex] = count; } int main() { int tWidth = 5000; int tHeight = 5000; int tMaxIter = 1000; float4 tArea = {-1.5f,-1.0f, 2.0f/tWidth, 2.0f/tHeight}; int *tIters = (int*)malloc(tWidth*tHeight*sizeof(int)); int *cuIters; hipMalloc((void **)&cuIters, sizeof(int)*tWidth*tHeight); hipMemcpy(cuIters, tIters ,tHeight*tWidth*sizeof(int) ,hipMemcpyHostToDevice); int tThreadsX = 8; int tThreadsY = 4; dim3 blockSize = dim3( tThreadsX,tThreadsY); dim3 gridSize = dim3( tWidth/tThreadsX, tHeight/tThreadsY ); computeFloat<<<gridSize, blockSize>>>( cuIters, tArea, tMaxIter, 4); hipDeviceSynchronize(); hipMemcpy(tIters, cuIters ,tHeight*tWidth*sizeof(int) ,hipMemcpyDeviceToHost); // 160 chars width and 80 chars height for ( int y=0;y<tHeight; y+=tHeight/80) { for ( int x=0; x<tWidth; x+=tWidth/160) { if ( tIters[x+y*tWidth]==tMaxIter) { printf("X"); } else { printf("."); } } printf("\n"); } }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void pnpoly_cnGPU1(const float *px, const float *py, const float *vx, const float *vy, char* cs, int npoint, int nvert) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < npoint) { int j, k, c = 0; for (j = 0, k = nvert-1; j < nvert; k = j++) { if ( ((vy[j]>py[i]) != (vy[k]>py[i])) && (px[i] < (vx[k]-vx[j]) * (py[i]-vy[j]) / (vy[k]-vy[j]) + vx[j]) ) c = !c; } cs[i] = c & 1; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void pnpoly_cnGPU1(const float *px, const float *py, const float *vx, const float *vy, char* cs, int npoint, int nvert) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < npoint) { int j, k, c = 0; for (j = 0, k = nvert-1; j < nvert; k = j++) { if ( ((vy[j]>py[i]) != (vy[k]>py[i])) && (px[i] < (vx[k]-vx[j]) * (py[i]-vy[j]) / (vy[k]-vy[j]) + vx[j]) ) c = !c; } cs[i] = c & 1; } }
Convert the following CUDA code to AMD GPU code: cuda #include <cuda.h> int main(int argc, char ** argv) { int deviceCount; cudaGetDeviceCount(&deviceCount); for (int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { // Ne detecte pas CUDA return -1; } else { // Afficher le nombre de device } } // Afficher le nom de la device // Donner le numero de version majeur et mineur // Donner la taille de la memoire globale // Donner la taille de la memoire constante // Donner la taille de la memoire partagee par bloc // Donner le nombre de thread max dans chacune des directions // Donner le taille maximum de la grille pour chaque direction // Donner la taille du warp } return 0; }
#include <hip/hip_runtime.h> int main(int argc, char ** argv) { int deviceCount; hipGetDeviceCount(&deviceCount); for (int dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { // Ne detecte pas CUDA return -1; } else { // Afficher le nombre de device } } // Afficher le nom de la device // Donner le numero de version majeur et mineur // Donner la taille de la memoire globale // Donner la taille de la memoire constante // Donner la taille de la memoire partagee par bloc // Donner le nombre de thread max dans chacune des directions // Donner le taille maximum de la grille pour chaque direction // Donner la taille du warp } return 0; }
Convert the following CUDA code to AMD GPU code: cuda /** * Writed By: Huaxia Wang * [email protected] **/ #include <stdio.h> #include <stdlib.h> #include <string.h> #define CHECK_ERR(x) \ if (x != cudaSuccess) { \ fprintf(stderr, "%s in %s at line %d\n", \ cudaGetErrorString(err), __FILE__, __LINE__); \ exit(-1); \ } \ #define FILE_LINE 5120000 #define LINE_WIDTH 128 //device function, used to locate the searching word in a sentence __device__ char *d_strstr(const char *str1, const char *str2, int width) { char *cp = (char *)str1; char *s1, *s2; //if searching word is empty, return the sentence if(!*str2) return ((char*)str1); int i = 0; //keep searching until return the end of sentence while(i < width) { s1 = cp; s2 = (char *)str2; while(*s1 && *s2 && !(*s1 - *s2)) s1++, s2++; if(!*s2) return cp; cp++; i++; } return NULL; } //device memcpy, used to copy certain length memory from source to destination __device__ char *d_memcpy(char *dest, char *src, int count) { char *result = dest; //check whether there is a memory overlap if(dest <= src || dest >= (src + count)) { while(count--) *(char *)dest++ = *(char *)src++; } else { dest += count - 1; src += count - 1; while(count--) *(char *)dest-- = *(char *)src--; } return result; } //callable function in CPU, which fulfill the grep function __global__ void d_Grep(char *d_File, char *d_regex, char *result, int line, int width){ int i = blockDim.x * blockIdx.x + threadIdx.x; char *pch; if(i < line) { pch = d_strstr(&d_File[i*width], d_regex, width); if(pch != NULL) d_memcpy(&result[i*width], &d_File[i*width], sizeof(char)*width); } } int main(int argc, char* argv[]) { cudaError_t err; //get arguments from command line char *Filename = argv[1]; char *Regexp = argv[2]; if(Regexp==NULL||Filename==NULL){ printf("Usage: #./program [file name] [searching words]"); return -1; } //open file FILE *f; f = fopen(Filename, "r"); if(f == NULL) { printf("Fail to open file!\n"); return -1; } char **file; char *result; int i; //store file and searching results file = (char **)malloc(sizeof(char*)*FILE_LINE); result = (char *)malloc(sizeof(char)*FILE_LINE*LINE_WIDTH); //keep the continuity of memory file[0] = (char *)malloc(sizeof(char)*FILE_LINE*LINE_WIDTH); for(i = 1; i < FILE_LINE; i++) file[i] = file[i-1] + LINE_WIDTH; //get file contents for(i = 0; i < FILE_LINE; i++) { fgets(file[i], LINE_WIDTH, f); } //allocate memory in GPU, and copy data to it char *d_file, *d_regex, *d_result; err = cudaMalloc((void**) &d_file, sizeof(char)*FILE_LINE*LINE_WIDTH); CHECK_ERR(err); err = cudaMalloc((void**) &d_regex, strlen(Regexp)); CHECK_ERR(err); err = cudaMalloc((void**) &d_result, sizeof(char)*FILE_LINE*LINE_WIDTH); CHECK_ERR(err); err = cudaMemset(d_result, 0, sizeof(char)*FILE_LINE*LINE_WIDTH); CHECK_ERR(err); err = cudaMemcpy(d_file, &file[0][0], sizeof(char)*FILE_LINE*LINE_WIDTH, cudaMemcpyHostToDevice); CHECK_ERR(err); err = cudaMemcpy(d_regex, Regexp, strlen(Regexp), cudaMemcpyHostToDevice); CHECK_ERR(err); //set number of threads in a block int numThread = 512; int numBlock = ceil((double)FILE_LINE/numThread); d_Grep<<<numBlock, numThread>>>(d_file, d_regex, d_result, FILE_LINE, LINE_WIDTH); cudaDeviceSynchronize(); err = cudaMemcpy(result, d_result, sizeof(char)*FILE_LINE*LINE_WIDTH, cudaMemcpyDeviceToHost); CHECK_ERR(err); //free all the GPU memory err = cudaFree(d_file); CHECK_ERR(err); err = cudaFree(d_regex); CHECK_ERR(err); err = cudaFree(d_result); CHECK_ERR(err); //print the results for(i = 0; i < FILE_LINE; i++) { if(&result[i*LINE_WIDTH] != NULL) printf("%s", &result[i*LINE_WIDTH]); } free(file); free(result); return 0; }
/** * Writed By: Huaxia Wang * [email protected] **/ #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define CHECK_ERR(x) \ if (x != hipSuccess) { \ fprintf(stderr, "%s in %s at line %d\n", \ hipGetErrorString(err), __FILE__, __LINE__); \ exit(-1); \ } \ #define FILE_LINE 5120000 #define LINE_WIDTH 128 //device function, used to locate the searching word in a sentence __device__ char *d_strstr(const char *str1, const char *str2, int width) { char *cp = (char *)str1; char *s1, *s2; //if searching word is empty, return the sentence if(!*str2) return ((char*)str1); int i = 0; //keep searching until return the end of sentence while(i < width) { s1 = cp; s2 = (char *)str2; while(*s1 && *s2 && !(*s1 - *s2)) s1++, s2++; if(!*s2) return cp; cp++; i++; } return NULL; } //device memcpy, used to copy certain length memory from source to destination __device__ char *d_memcpy(char *dest, char *src, int count) { char *result = dest; //check whether there is a memory overlap if(dest <= src || dest >= (src + count)) { while(count--) *(char *)dest++ = *(char *)src++; } else { dest += count - 1; src += count - 1; while(count--) *(char *)dest-- = *(char *)src--; } return result; } //callable function in CPU, which fulfill the grep function __global__ void d_Grep(char *d_File, char *d_regex, char *result, int line, int width){ int i = blockDim.x * blockIdx.x + threadIdx.x; char *pch; if(i < line) { pch = d_strstr(&d_File[i*width], d_regex, width); if(pch != NULL) d_memcpy(&result[i*width], &d_File[i*width], sizeof(char)*width); } } int main(int argc, char* argv[]) { hipError_t err; //get arguments from command line char *Filename = argv[1]; char *Regexp = argv[2]; if(Regexp==NULL||Filename==NULL){ printf("Usage: #./program [file name] [searching words]"); return -1; } //open file FILE *f; f = fopen(Filename, "r"); if(f == NULL) { printf("Fail to open file!\n"); return -1; } char **file; char *result; int i; //store file and searching results file = (char **)malloc(sizeof(char*)*FILE_LINE); result = (char *)malloc(sizeof(char)*FILE_LINE*LINE_WIDTH); //keep the continuity of memory file[0] = (char *)malloc(sizeof(char)*FILE_LINE*LINE_WIDTH); for(i = 1; i < FILE_LINE; i++) file[i] = file[i-1] + LINE_WIDTH; //get file contents for(i = 0; i < FILE_LINE; i++) { fgets(file[i], LINE_WIDTH, f); } //allocate memory in GPU, and copy data to it char *d_file, *d_regex, *d_result; err = hipMalloc((void**) &d_file, sizeof(char)*FILE_LINE*LINE_WIDTH); CHECK_ERR(err); err = hipMalloc((void**) &d_regex, strlen(Regexp)); CHECK_ERR(err); err = hipMalloc((void**) &d_result, sizeof(char)*FILE_LINE*LINE_WIDTH); CHECK_ERR(err); err = hipMemset(d_result, 0, sizeof(char)*FILE_LINE*LINE_WIDTH); CHECK_ERR(err); err = hipMemcpy(d_file, &file[0][0], sizeof(char)*FILE_LINE*LINE_WIDTH, hipMemcpyHostToDevice); CHECK_ERR(err); err = hipMemcpy(d_regex, Regexp, strlen(Regexp), hipMemcpyHostToDevice); CHECK_ERR(err); //set number of threads in a block int numThread = 512; int numBlock = ceil((double)FILE_LINE/numThread); d_Grep<<<numBlock, numThread>>>(d_file, d_regex, d_result, FILE_LINE, LINE_WIDTH); hipDeviceSynchronize(); err = hipMemcpy(result, d_result, sizeof(char)*FILE_LINE*LINE_WIDTH, hipMemcpyDeviceToHost); CHECK_ERR(err); //free all the GPU memory err = hipFree(d_file); CHECK_ERR(err); err = hipFree(d_regex); CHECK_ERR(err); err = hipFree(d_result); CHECK_ERR(err); //print the results for(i = 0; i < FILE_LINE; i++) { if(&result[i*LINE_WIDTH] != NULL) printf("%s", &result[i*LINE_WIDTH]); } free(file); free(result); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #include <string> #include <curand.h> #include <curand_kernel.h> #define ull unsigned long long #define ld long double #define GTX_1060_BLOCKS 1280 #define WARP_SIZE 32 // количество потоков в блоке /** * Запуск по всем блокам. После выполнения функции в per_blocks_sum лежат * сумма для каждого блока, и тогда эти значения суммируются в одно. * @param per_blocks_sum массив с локальной суммой по всем потокам каждого блока * @param iterations количество итераций на поток. */ __global__ void kernel(ull *per_blocks_sum, ull iterations) { __shared__ ull per_block_sum[WARP_SIZE]; ull index = threadIdx.x + blockIdx.x * blockDim.x; curandState_t rng; curand_init(clock64(), index, 0, &rng); per_block_sum[threadIdx.x] = 0; for (int i = 0; i < iterations; i++) { double x = curand_uniform(&rng); // x в [0,1] double y = curand_uniform(&rng); // y в [0,1] per_block_sum[threadIdx.x] += 1 - int(x * x + y * y); } if (threadIdx.x == 0) { per_blocks_sum[blockIdx.x] = 0; for (int i = 0; i < WARP_SIZE; i++) { per_blocks_sum[blockIdx.x] += per_block_sum[i]; } } } __host__ ld monteCarloCPU(ull N) { double x,y; ld sum = 0; for(int i = 0; i < N; i++){ x = (double) rand()/RAND_MAX; y = (double) rand()/RAND_MAX; if(x*x + y*y <= 1) sum += 1.0; } return sum * 4.0 / (ld)(N); } __host__ ld monteCarloGPU(ull N) { ull iterations; size_t size = N * sizeof(ull); ull *sums_per_blocks = nullptr; cudaMalloc(&sums_per_blocks, size); iterations = N / (GTX_1060_BLOCKS * WARP_SIZE); if (iterations == 0) { iterations = 1; kernel<<<N, 1>>>(sums_per_blocks, iterations); } else { kernel<<<GTX_1060_BLOCKS, WARP_SIZE>>>(sums_per_blocks, iterations); } cudaDeviceSynchronize(); ull *host_sums_per_blocks = (ull *) malloc(size); cudaMemcpy(host_sums_per_blocks, sums_per_blocks, size, cudaMemcpyDeviceToHost); double sum = 0; double sum_iterations = GTX_1060_BLOCKS; if(iterations == 1) { sum_iterations = N; } for (int i = 0; i < sum_iterations; i++) { sum += host_sums_per_blocks[i]; } double divizor = iterations == 1 ? N : GTX_1060_BLOCKS * WARP_SIZE * iterations; free(host_sums_per_blocks); cudaFree(sums_per_blocks); return sum * 4 / divizor; } int main() { unsigned long long n = 1e8; // scanf("%llu", &n); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); ld pi = monteCarloGPU(n); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); printf("GPU Pi:: %Lf\n", pi); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Time consumed for monteCarloGPU :: %3.1f ms \n", milliseconds); cudaEventRecord(start, 0); pi = monteCarloCPU(n); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); printf("CPU Pi:: %Lf\n", pi); cudaEventElapsedTime(&milliseconds, start, stop); printf("Time consumed for monteCarloCPU :: %3.1f ms \n", milliseconds); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define ull unsigned long long #define ld long double #define GTX_1060_BLOCKS 1280 #define WARP_SIZE 32 // количество потоков в блоке /** * Запуск по всем блокам. После выполнения функции в per_blocks_sum лежат * сумма для каждого блока, и тогда эти значения суммируются в одно. * @param per_blocks_sum массив с локальной суммой по всем потокам каждого блока * @param iterations количество итераций на поток. */ __global__ void kernel(ull *per_blocks_sum, ull iterations) { __shared__ ull per_block_sum[WARP_SIZE]; ull index = threadIdx.x + blockIdx.x * blockDim.x; hiprandState_t rng; hiprand_init(clock64(), index, 0, &rng); per_block_sum[threadIdx.x] = 0; for (int i = 0; i < iterations; i++) { double x = hiprand_uniform(&rng); // x в [0,1] double y = hiprand_uniform(&rng); // y в [0,1] per_block_sum[threadIdx.x] += 1 - int(x * x + y * y); } if (threadIdx.x == 0) { per_blocks_sum[blockIdx.x] = 0; for (int i = 0; i < WARP_SIZE; i++) { per_blocks_sum[blockIdx.x] += per_block_sum[i]; } } } __host__ ld monteCarloCPU(ull N) { double x,y; ld sum = 0; for(int i = 0; i < N; i++){ x = (double) rand()/RAND_MAX; y = (double) rand()/RAND_MAX; if(x*x + y*y <= 1) sum += 1.0; } return sum * 4.0 / (ld)(N); } __host__ ld monteCarloGPU(ull N) { ull iterations; size_t size = N * sizeof(ull); ull *sums_per_blocks = nullptr; hipMalloc(&sums_per_blocks, size); iterations = N / (GTX_1060_BLOCKS * WARP_SIZE); if (iterations == 0) { iterations = 1; kernel<<<N, 1>>>(sums_per_blocks, iterations); } else { kernel<<<GTX_1060_BLOCKS, WARP_SIZE>>>(sums_per_blocks, iterations); } hipDeviceSynchronize(); ull *host_sums_per_blocks = (ull *) malloc(size); hipMemcpy(host_sums_per_blocks, sums_per_blocks, size, hipMemcpyDeviceToHost); double sum = 0; double sum_iterations = GTX_1060_BLOCKS; if(iterations == 1) { sum_iterations = N; } for (int i = 0; i < sum_iterations; i++) { sum += host_sums_per_blocks[i]; } double divizor = iterations == 1 ? N : GTX_1060_BLOCKS * WARP_SIZE * iterations; free(host_sums_per_blocks); hipFree(sums_per_blocks); return sum * 4 / divizor; } int main() { unsigned long long n = 1e8; // scanf("%llu", &n); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); ld pi = monteCarloGPU(n); hipEventRecord(stop, 0); hipEventSynchronize(stop); printf("GPU Pi:: %Lf\n", pi); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Time consumed for monteCarloGPU :: %3.1f ms \n", milliseconds); hipEventRecord(start, 0); pi = monteCarloCPU(n); hipEventRecord(stop, 0); hipEventSynchronize(stop); printf("CPU Pi:: %Lf\n", pi); hipEventElapsedTime(&milliseconds, start, stop); printf("Time consumed for monteCarloCPU :: %3.1f ms \n", milliseconds); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> __device__ int d_value; __global__ void test_Kernel() { int threadID = threadIdx.x; d_value = 1; printf("threadID %-3d d_value%3d\n",threadID,d_value); } int main() { int h_value = 0; test_Kernel<<<1,2>>>(); cudaMemcpyFromSymbol(&h_value,d_value, sizeof(int),0,cudaMemcpyDeviceToHost); printf("Output from host: %d\n",h_value); return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> __device__ int d_value; __global__ void test_Kernel() { int threadID = threadIdx.x; d_value = 1; printf("threadID %-3d d_value%3d\n",threadID,d_value); } int main() { int h_value = 0; test_Kernel<<<1,2>>>(); hipMemcpyFromSymbol(&h_value,HIP_SYMBOL(d_value), sizeof(int),0,hipMemcpyDeviceToHost); printf("Output from host: %d\n",h_value); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void Subtract(float *d_Result, float *d_Data1, float *d_Data2, int width, int pitch, int height) { const int x = blockIdx.x*SUBTRACT_W + threadIdx.x; const int y = blockIdx.y*SUBTRACT_H + threadIdx.y; int p = y*pitch + x; if (x<width && y<height) d_Result[p] = d_Data1[p] - d_Data2[p]; __syncthreads(); }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void Subtract(float *d_Result, float *d_Data1, float *d_Data2, int width, int pitch, int height) { const int x = blockIdx.x*SUBTRACT_W + threadIdx.x; const int y = blockIdx.y*SUBTRACT_H + threadIdx.y; int p = y*pitch + x; if (x<width && y<height) d_Result[p] = d_Data1[p] - d_Data2[p]; __syncthreads(); }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> #include <time.h> #include <string> #include <vector> #include <sstream> #include <cuda_runtime.h> #include <math.h> #include <fstream> // Libreria para leer archivos #include <typeinfo> // for 'typeid' to work #include <tuple> using namespace std; /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Funciones de apoyo */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ __constant__ int HitoriCM[100*100]; //FIXME: Cambiar cuando se actualice N y M void funcionQL(string* Hitori_Str, int* Hit_State, int N); // Función para Splitear un String void tokenize(string const &str, const char delim, vector<string> &out) { // construct a stream from the string stringstream ss(str); string s; while (getline(ss, s, delim)) { out.push_back(s); } return; } /* * Impresor de Matrix 2D - Almacenada linealmente */ void showMatrix(int *matrix, int N, int M) { for(int j = 0; j < M; j++){ for(int i = 0; i < N; i++) printf("%d ", matrix[i + j*N]); printf("\n"); } printf("\n"); } void showMatrix(string* matrix, int N, int M) { for(int j = 0; j < M; j++){ for(int i = 0; i < N; i++) if(matrix[i + j*N].size() == 2) cout << matrix[i + j*N] << " "; else if(matrix[i + j*N].size() == 1) cout << matrix[i + j*N] << " "; printf("\n"); } printf("\n"); } /* void showMatrix(string* matrix, int N, int M) { for(int j = 0; j < M; j++){ for(int i = 0; i < N; i++) cout << matrix[i + j*N] << " "; printf("\n"); } printf("\n"); }*/ void readHitoriFromFile(fstream* FILE, int* matrixH, string* matrixHstr, int N){ int i, j = 0; const char delim = ' '; string line; vector<string> row; while( getline(*FILE, line)){ tokenize(line, delim, row); for(i = 0; i < N ; i++){ matrixHstr[j] = row[i]; matrixH[j++] = stoi(row[i]); } // Limpiar el buffer de salida row.clear(); } } /* 1 -> not multiple 2 -> multiple per row 3 -> multiple per column 4 -> multiple per row and column 5 -> not paintable 6 -> paintable // Eliminado */ // tuple (elem , posElem) vector<tuple<int , int>> getRemainingMultiples(int* Hit_State, int N){ int i,j; int elem; int posElem; vector<tuple<int, int>> M; tuple<int, int> tup; /* 1 -> not multiple 2 -> multiple per row 3 -> multiple per column 4 -> multiple per row and column 5 -> not paintable 6 -> paintable // Eliminado */ for(j = 0; j < N; j++ ){ for(i = 0; i < N; i++){ posElem = i + j*N; elem = Hit_State[posElem]; tup = make_tuple(elem,posElem); switch(elem) { case 2: M.push_back(tup); break; case 3: M.push_back(tup); break; case 4: M.push_back(tup); break; default: break; } } } return M; } /* Función para consistencia del Hitori Lo que está función hace es mirar si dos multiples en la misma columna o fila tienen el mismo número y si ambos son not paintable (5). */ bool isRule4Conform(int* Hit_State, int N){ int i; vector<tuple<int, int>> M = getRemainingMultiples(Hit_State, N); for( i = 0; i < M.size() ; i++){ } return true; } /* Ejecutar cada vez que un multiplo es pintado (6) 1. Setear todas las celdas adyacentes al múltiplo pintado. 2. */ bool StandardCyclePattern(int* Hitori, int* Hit_State, int N){ // Comprueba Regla 4: // return isRule4Conform(Hit_State, N); return true; } void copyHitoriToHitori(int* Hit_State, int* Hit_StateAux, int N){ int i, j; for(j = 0; j < N; j++) for( i = 0; j < N; j++) Hit_StateAux[i + j*N] = Hit_State[i + j*N]; } void setNotPaintable(int* Hit_State, tuple<int, int> tup ){ Hit_State[ get<0>(tup) ] = 5; } void paint(int* Hit_State, tuple<int, int> tup){ Hit_State[ get<0>(tup)] = 6; return; } void setInitialHitoriState(int *Hit_State, int N) { for(int j = 0; j < N; j++) for(int i = 0; i < N; i++) Hit_State[i + j*N] = 1; // 1 -> not multiple } void SetHitoriState( int* Hitori, int* Hit_State, int N){ bool flag1, flag2; for(int j = 0; j < N; j++){ for(int i = 0; i < N; i++){ flag1 = false; flag2 = false; int posElem = i + j*N; int elem = Hitori[posElem]; // iterar por Fila for(int k = j*N; k < N + j*N ; k++){ if( k == posElem ) continue; if( Hitori[k] == elem ){ flag1 = true; break; } } // iterar por Columna for(int t = i; t < N*N ;t += N ){ if( t == posElem ) continue; if( Hitori[t] == elem){ flag2 = true; break; } } if( flag1 == true && flag2 == true) // case 4 -> multiple per row and column Hit_State[posElem] = 4; else if( flag1 == true ) //2 -> multiple per row Hit_State[posElem] = 2; else if( flag2 == true) //3 -> multiple per column Hit_State[posElem] = 3; } } } void updateHitori(string* Hitori_Str, int* Hit_State, int N){ int i, j; for( j = 0; j < N; j++){ for( i = 0; i < N; i++){ if( Hit_State[i + j*N] == 6) Hitori_Str[i + j*N] = "X"; } } return; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* CPU */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ void tripletF(int *hitori, int* estado, int N){ int i, aux; bool back, next; for(i = 0; i < N*N; i++){ //int fila = i/N; int columna = i%N; if(columna > 0 && columna < N){ int valor = hitori[i]; aux = estado[i]; back = (hitori[i-1] == valor)? true : false; next = (hitori[i+1] == valor)? true : false; estado[i] = (back && next)? 5 : aux; } } } void tripletC(int *hitori, int *estado, int N){ int i, aux; bool up, down; for (i = 0; i < N*N; i++){ int fila = i/N; //int columna = i%N; if (fila > 0 && fila < N){ int valor = hitori[i]; aux = estado[i]; up = (hitori[i-N] == valor) ? true : false; down = (hitori[i+N] == valor)? true : false; estado[i] = (up && down) ? 5 : aux; } } } void rescateF(int *hitori, int *estado, int N){ int i, aux; bool back, next; for (i = 0; i < N*N; i++){ //int fila = i/N; int columna = i%N; if (columna > 0 && columna < N){ // int valor = hitori[i]; aux = estado[i]; back = (estado[i-1] == 6)? true : false; next = (estado[i+1] == 6)? true : false; estado[i] = (back || next) ? 5 : aux; } } } void rescateC(int *hitori, int *estado, int N){ int i, aux; bool up, down; for (i = 0; i < N*N; i++){ int fila = i/N; //int columna = i%N; if (fila > 0 && fila < N){ //int valor = hitori[i]; aux = estado[i]; up = (estado[i-N] == 6)? true : false; down = (estado[i+N] == 6)? true : false; estado[i] = (up || down) ? 5 : aux; } } } void DobleC(int* hitori,int *estado, int N){ //int f; //Fila en que esta int c; //Columna en la que esta int pos; for(int i = 0; i < N*N; i++) { bool ant = false; bool doble = false; //f = i / N; c = i % N; int valor = hitori[i]; for(int j = 0; j < N; j++){ pos = c+N*j; doble = (ant && i != pos && hitori[pos] == valor)? true : doble; ant = (i != pos && hitori[pos] == valor)? true : false; } if(doble) { estado[i] = 6; } } } void DobleF(int* hitori,int *estado, int N){ int f; //Fila en que esta //int c; //Columna en la que esta int pos; for(int i = 0; i < N*N; i++) { bool ant = false; bool doble = false; f = i / N; //c = i % N; int valor = hitori[i]; for(int j = 0; j < N; j++){ pos = f*N+j; doble = (ant && i != pos && hitori[pos] == valor)? true : doble; ant = (i != pos && hitori[pos] == valor)? true : false; } if(doble) { estado[i] = 6; } } } void muerteF(int *hitori, int *estado, int N){ int i, aux1, aux2; int pos; for(i = 0; i < N*N; i++){ int fila = i/N; //int columna = i%N; int valor = hitori[i]; aux1 = estado[i]; if(aux1 != 5 && aux1 !=6){ for(int j = 0; j < N; j++){ pos = fila*N+j; aux2 = hitori[pos]; if(valor == aux2){ aux1 = (estado[pos] == 5)? 6 : aux1; } } estado[i] = aux1; } } } void muerteC(int *hitori, int *estado, int N){ int i, aux1, aux2; int pos; for(i = 0; i < N*N; i++){ //int fila = i/N; int columna = i%N; int valor = hitori[i]; aux1 = estado[i]; if(aux1 != 5 && aux1 !=6){ for(int j = 0; j < N; j++){ pos = columna+N*j; aux2 = hitori[pos]; if(valor == aux2){ aux1 = (estado[pos] == 5)? 6 : aux1; } } estado[i] = aux1; } } } void funcionCPU(string* Hitori_Str, int* Hitori, int* estado, int N){ int i; // Ejecutar patrones //printf(" - TRIPLETE - \n"); tripletF(Hitori, estado, N); tripletC(Hitori, estado, N); //funcionQL(Hitori_Str, estado, N); //printf(" - DOBLE - \n"); DobleF(Hitori, estado, N); DobleC(Hitori, estado, N); //funcionQL(Hitori_Str, estado, N); for(i = 0; i < N; i++){ //printf(" - MUERTE - \n"); muerteF(Hitori, estado, N); muerteC(Hitori, estado, N); //funcionQL(Hitori_Str, estado, N); //printf(" - RESCATE - \n"); rescateF(Hitori, estado, N); rescateC(Hitori, estado, N); //funcionQL(Hitori_Str, estado, N); } return; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* GPU primera implementacion */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* -------------------------- Deteccion de patrones ------------------------- */ __global__ void kernelTripletF(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; //int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta bool back, next; int aux; if(tId < N*N && c > 0 && c < N) { int valor = hitori[tId]; aux = estado[tId]; back = (hitori[tId-1] == valor)? true : false; next = (hitori[tId+1] == valor)? true : false; estado[tId] = (back && next) ? 5 : aux; } } __global__ void kernelTripletC(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta //int c = tId % N; //Columna en la que esta bool up, down; int aux; if(tId < N*N && f > 0 && f < N) { int valor = hitori[tId]; aux = estado[tId]; up = (hitori[tId-N] == valor)? true : false; down = (hitori[tId+N] == valor)? true : false; estado[tId] = (up && down) ? 5 : aux; } } __global__ void kernelDobleF(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta bool ant = false; bool doble = false; int pos; if(tId < N*N) { int valor = hitori[tId]; for(int i = 0; i < N; i++){ pos = f*N+i; doble = (ant && tId != pos && hitori[pos] == valor)? true : doble; ant = (tId != pos && hitori[pos] == valor)? true : false; } if(doble) { estado[tId] = 6; } } } __global__ void kernelDobleC(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; //int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta bool ant = false; bool doble = false; int pos; if(tId < N*N) { int valor = hitori[tId]; for(int i = 0; i < N; i++){ pos = c+N*i; doble = (ant && tId != pos && hitori[pos] == valor)? true : doble; ant = (tId != pos && hitori[pos] == valor)? true : false; } if(doble) { estado[tId] = 6; } } } /* ---------------------------- Funciones del for --------------------------- */ __global__ void kernelRescateF(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; //int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta bool back, next; int aux; if(tId < N*N && c > 0 && c < N) { // int valor = hitori[tId]; aux = estado[tId]; back = (estado[tId-1] == 6)? true : false; next = (estado[tId+1] == 6)? true : false; estado[tId] = (back || next) ? 5 : aux; } } __global__ void kernelRescateC(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta bool up, down; int aux; if(tId < N*N && f > 0 && f < N) { //int valor = hitori[tId]; aux = estado[tId]; up = (estado[tId-N] == 6)? true : false; down = (estado[tId+N] == 6)? true : false; estado[tId] = (up || down) ? 5 : aux; } } __global__ void kernelMuerteF(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta int aux1, aux2, pos; if(tId < N*N) { int valor = hitori[tId]; aux1 = estado[tId]; if(aux1 != 5 && aux1 != 6){ for(int i = 0; i < N; i++){ pos = f*N+i; aux2 = hitori[pos]; if(valor == aux2){ aux1 = (estado[pos] == 5)? 6 : aux1; } } estado[tId] = aux1; } } } __global__ void kernelMuerteC(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; //int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta int aux1, aux2, pos; if(tId < N*N) { int valor = hitori[tId]; aux1 = estado[tId]; if (aux1 != 5 && aux1 != 6){ for(int i = 0; i < N; i++){ pos = c+N*i; aux2 = hitori[pos]; if(valor == aux2){ aux1 = (estado[pos] == 5)? 6 : aux1; } } estado[tId] = aux1; } } } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* GPU segunda implementacion */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ __global__ void kernelTripletF_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; // int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta bool back, next; int aux; if(tId < N*N && c > 0 && c < N) { int valor = HitoriCM[tId]; aux = estado[tId]; back = (HitoriCM[tId-1] == valor)? true : false; next = (HitoriCM[tId+1] == valor)? true : false; estado[tId] = (back && next) ? 5 : aux; } } __global__ void kernelTripletC_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta bool up, down; int aux; if(tId < N*N && f > 0 && f < N) { int valor = HitoriCM[tId]; aux = estado[tId]; up = (HitoriCM[tId-N] == valor)? true : false; down = (HitoriCM[tId+N] == valor)? true : false; estado[tId] = (up && down) ? 5 : aux; } } __global__ void kernelRescateF_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; // int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta bool back, next; int aux; if(tId < N*N && c > 0 && c < N) { // int valor = HitoriCM[tId]; aux = estado[tId]; back = (estado[tId-1] == 6)? true : false; next = (estado[tId+1] == 6)? true : false; estado[tId] = (back || next) ? 5 : aux; } } __global__ void kernelRescateC_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta bool up, down; int aux; if(tId < N*N && f > 0 && f < N) { // int valor = HitoriCM[tId]; aux = estado[tId]; up = (estado[tId-N] == 6)? true : false; down = (estado[tId+N] == 6)? true : false; estado[tId] = (up || down) ? 5 : aux; } } __global__ void kernelDobleC_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; // int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta bool ant = false; bool doble = false; int pos; if(tId < N*N) { int valor = HitoriCM[tId]; for(int i = 0; i < N; i++){ pos = c+N*i; doble = (ant && tId != pos && HitoriCM[pos] == valor)? true : doble; ant = (tId != pos && HitoriCM[pos] == valor)? true : false; } if(doble) { estado[tId] = 6; } } } __global__ void kernelDobleF_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta bool ant = false; bool doble = false; int pos; if(tId < N*N) { int valor = HitoriCM[tId]; for(int i = 0; i < N; i++){ pos = f*N+i; doble = (ant && tId != pos && HitoriCM[pos] == valor)? true : doble; ant = (tId != pos && HitoriCM[pos] == valor)? true : false; } if(doble) { estado[tId] = 6; } } } __global__ void kernelMuerteF_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta int aux1, aux2, pos; if(tId < N*N) { int valor = HitoriCM[tId]; aux1 = estado[tId]; if(aux1 != 5 && aux1 != 6){ for(int i = 0; i < N; i++){ pos = f*N+i; aux2 = HitoriCM[pos]; if(valor == aux2){ aux1 = (estado[pos] == 5)? 6 : aux1; } } estado[tId] = aux1; } } } __global__ void kernelMuerteC_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; // int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta int aux1, aux2; if(tId < N*N) { int valor = HitoriCM[tId]; aux1 = estado[tId]; if (aux1 != 5 && aux1 != 6){ for(int i = 0; i < N; i++){ aux2 = HitoriCM[c+N*i]; if(valor == aux2){ aux1 = (estado[c+N*i] == 5)? 6 : aux1; } } estado[tId] = aux1; } } } void funcionQL(string* Hitori_Str, int* Hit_State, int N){ // Visualizar Hitori updateHitori(Hitori_Str, Hit_State, N); showMatrix(Hitori_Str, N, N); //printf("\n Hitori Estado \n"); //showMatrix(Hit_State, N, N); return; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Main */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ int main(int argc, char* argv[]){ fstream FILE; int* Hitori; string* Hitori_Str; int* Hit_State; int N; string line; vector<tuple<int, int>> M; string nameFile = argv[1]; // Abrir el archivo en modo lectura FILE.open(nameFile, ios::in); if(!FILE){ cerr << "Unable to open file!" << endl; exit(1); } if( FILE.is_open() ){ getline(FILE, line); N = stoi(line); Hitori = new int[N*N]; Hit_State = new int[N*N]; Hitori_Str = new string[N*N]; setInitialHitoriState(Hit_State, N); readHitoriFromFile(&FILE, Hitori, Hitori_Str, N); SetHitoriState( Hitori, Hit_State, N); // Parte CPU // Inicialización variables de tiempo clock_t t1, t2; double ms; t1 = clock(); funcionCPU(Hitori_Str, Hitori, Hit_State, N); t2 = clock(); ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC; printf("Tiempo de CPU: %5f \n", ms); //cout << "Tiempo CPU: " << ms << "[ms]" << endl; funcionQL(Hitori_Str, Hit_State, N); SetHitoriState( Hitori, Hit_State, N); // Parte GPU 1 // Def tiempos GPU int* HitoriDev, *Hit_StateDev; cudaEvent_t ct1, ct2; float dt; cudaEventCreate(&ct1); cudaEventCreate(&ct2); int block_size = 256; // múltiplo de 32 int grid_size = (int)ceil((float)(N*N)/block_size); // ceil : función techo cudaMalloc(&HitoriDev, sizeof(int)*N*N); cudaMalloc(&Hit_StateDev, sizeof(int)*N*N); cudaEventCreate(&ct1); cudaEventCreate(&ct2); cudaEventRecord(ct1); cudaMemcpy(HitoriDev, Hitori, N*N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(Hit_StateDev, Hit_State, N*N*sizeof(int), cudaMemcpyHostToDevice); kernelTripletF<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); kernelTripletC<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); kernelDobleF<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); kernelDobleC<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); for(int i = 0; i < N; i++){ kernelMuerteF<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); kernelMuerteC<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); kernelRescateF<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); kernelRescateC<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); } cudaMemcpy(Hit_State, Hit_StateDev, N*N*sizeof(int), cudaMemcpyDeviceToHost); cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt, ct1, ct2); cout << "Tiempo GPU 1: " << dt << "[ms]" << endl; funcionQL(Hitori_Str, Hit_State, N); SetHitoriState( Hitori, Hit_State, N); // Parte GPU 2 int* Hit_StateDev2; cudaMalloc(&Hit_StateDev2, sizeof(int)*N*N); cudaEventRecord(ct1); cudaMemcpyToSymbol(HitoriCM, Hitori, N*N*sizeof(int), 0, cudaMemcpyHostToDevice); // Para kernel CM cudaMemcpy(Hit_StateDev2, Hit_State, N*N*sizeof(int), cudaMemcpyHostToDevice); kernelTripletF_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); kernelTripletC_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); kernelDobleF_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); kernelDobleC_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); for(int i = 0; i < N; i++){ kernelMuerteF_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); kernelMuerteC_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); kernelRescateF_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); kernelRescateC_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); } cudaMemcpy(Hit_State, Hit_StateDev2, N*N*sizeof(int), cudaMemcpyDeviceToHost); cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt, ct1, ct2); cout << "Tiempo GPU 2: " << dt << "[ms]" << endl; funcionQL(Hitori_Str, Hit_State, N); // Liberar memoria delete[] Hitori; delete[] Hit_State; delete[] Hitori_Str; //P1 cudaFree(HitoriDev); cudaFree(Hit_StateDev); //P2 cudaFree(Hit_StateDev2); } FILE.close(); return 0; }
#include <iostream> #include <time.h> #include <string> #include <vector> #include <sstream> #include <hip/hip_runtime.h> #include <math.h> #include <fstream> // Libreria para leer archivos #include <typeinfo> // for 'typeid' to work #include <tuple> using namespace std; /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Funciones de apoyo */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ __constant__ int HitoriCM[100*100]; //FIXME: Cambiar cuando se actualice N y M void funcionQL(string* Hitori_Str, int* Hit_State, int N); // Función para Splitear un String void tokenize(string const &str, const char delim, vector<string> &out) { // construct a stream from the string stringstream ss(str); string s; while (getline(ss, s, delim)) { out.push_back(s); } return; } /* * Impresor de Matrix 2D - Almacenada linealmente */ void showMatrix(int *matrix, int N, int M) { for(int j = 0; j < M; j++){ for(int i = 0; i < N; i++) printf("%d ", matrix[i + j*N]); printf("\n"); } printf("\n"); } void showMatrix(string* matrix, int N, int M) { for(int j = 0; j < M; j++){ for(int i = 0; i < N; i++) if(matrix[i + j*N].size() == 2) cout << matrix[i + j*N] << " "; else if(matrix[i + j*N].size() == 1) cout << matrix[i + j*N] << " "; printf("\n"); } printf("\n"); } /* void showMatrix(string* matrix, int N, int M) { for(int j = 0; j < M; j++){ for(int i = 0; i < N; i++) cout << matrix[i + j*N] << " "; printf("\n"); } printf("\n"); }*/ void readHitoriFromFile(fstream* FILE, int* matrixH, string* matrixHstr, int N){ int i, j = 0; const char delim = ' '; string line; vector<string> row; while( getline(*FILE, line)){ tokenize(line, delim, row); for(i = 0; i < N ; i++){ matrixHstr[j] = row[i]; matrixH[j++] = stoi(row[i]); } // Limpiar el buffer de salida row.clear(); } } /* 1 -> not multiple 2 -> multiple per row 3 -> multiple per column 4 -> multiple per row and column 5 -> not paintable 6 -> paintable // Eliminado */ // tuple (elem , posElem) vector<tuple<int , int>> getRemainingMultiples(int* Hit_State, int N){ int i,j; int elem; int posElem; vector<tuple<int, int>> M; tuple<int, int> tup; /* 1 -> not multiple 2 -> multiple per row 3 -> multiple per column 4 -> multiple per row and column 5 -> not paintable 6 -> paintable // Eliminado */ for(j = 0; j < N; j++ ){ for(i = 0; i < N; i++){ posElem = i + j*N; elem = Hit_State[posElem]; tup = make_tuple(elem,posElem); switch(elem) { case 2: M.push_back(tup); break; case 3: M.push_back(tup); break; case 4: M.push_back(tup); break; default: break; } } } return M; } /* Función para consistencia del Hitori Lo que está función hace es mirar si dos multiples en la misma columna o fila tienen el mismo número y si ambos son not paintable (5). */ bool isRule4Conform(int* Hit_State, int N){ int i; vector<tuple<int, int>> M = getRemainingMultiples(Hit_State, N); for( i = 0; i < M.size() ; i++){ } return true; } /* Ejecutar cada vez que un multiplo es pintado (6) 1. Setear todas las celdas adyacentes al múltiplo pintado. 2. */ bool StandardCyclePattern(int* Hitori, int* Hit_State, int N){ // Comprueba Regla 4: // return isRule4Conform(Hit_State, N); return true; } void copyHitoriToHitori(int* Hit_State, int* Hit_StateAux, int N){ int i, j; for(j = 0; j < N; j++) for( i = 0; j < N; j++) Hit_StateAux[i + j*N] = Hit_State[i + j*N]; } void setNotPaintable(int* Hit_State, tuple<int, int> tup ){ Hit_State[ get<0>(tup) ] = 5; } void paint(int* Hit_State, tuple<int, int> tup){ Hit_State[ get<0>(tup)] = 6; return; } void setInitialHitoriState(int *Hit_State, int N) { for(int j = 0; j < N; j++) for(int i = 0; i < N; i++) Hit_State[i + j*N] = 1; // 1 -> not multiple } void SetHitoriState( int* Hitori, int* Hit_State, int N){ bool flag1, flag2; for(int j = 0; j < N; j++){ for(int i = 0; i < N; i++){ flag1 = false; flag2 = false; int posElem = i + j*N; int elem = Hitori[posElem]; // iterar por Fila for(int k = j*N; k < N + j*N ; k++){ if( k == posElem ) continue; if( Hitori[k] == elem ){ flag1 = true; break; } } // iterar por Columna for(int t = i; t < N*N ;t += N ){ if( t == posElem ) continue; if( Hitori[t] == elem){ flag2 = true; break; } } if( flag1 == true && flag2 == true) // case 4 -> multiple per row and column Hit_State[posElem] = 4; else if( flag1 == true ) //2 -> multiple per row Hit_State[posElem] = 2; else if( flag2 == true) //3 -> multiple per column Hit_State[posElem] = 3; } } } void updateHitori(string* Hitori_Str, int* Hit_State, int N){ int i, j; for( j = 0; j < N; j++){ for( i = 0; i < N; i++){ if( Hit_State[i + j*N] == 6) Hitori_Str[i + j*N] = "X"; } } return; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* CPU */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ void tripletF(int *hitori, int* estado, int N){ int i, aux; bool back, next; for(i = 0; i < N*N; i++){ //int fila = i/N; int columna = i%N; if(columna > 0 && columna < N){ int valor = hitori[i]; aux = estado[i]; back = (hitori[i-1] == valor)? true : false; next = (hitori[i+1] == valor)? true : false; estado[i] = (back && next)? 5 : aux; } } } void tripletC(int *hitori, int *estado, int N){ int i, aux; bool up, down; for (i = 0; i < N*N; i++){ int fila = i/N; //int columna = i%N; if (fila > 0 && fila < N){ int valor = hitori[i]; aux = estado[i]; up = (hitori[i-N] == valor) ? true : false; down = (hitori[i+N] == valor)? true : false; estado[i] = (up && down) ? 5 : aux; } } } void rescateF(int *hitori, int *estado, int N){ int i, aux; bool back, next; for (i = 0; i < N*N; i++){ //int fila = i/N; int columna = i%N; if (columna > 0 && columna < N){ // int valor = hitori[i]; aux = estado[i]; back = (estado[i-1] == 6)? true : false; next = (estado[i+1] == 6)? true : false; estado[i] = (back || next) ? 5 : aux; } } } void rescateC(int *hitori, int *estado, int N){ int i, aux; bool up, down; for (i = 0; i < N*N; i++){ int fila = i/N; //int columna = i%N; if (fila > 0 && fila < N){ //int valor = hitori[i]; aux = estado[i]; up = (estado[i-N] == 6)? true : false; down = (estado[i+N] == 6)? true : false; estado[i] = (up || down) ? 5 : aux; } } } void DobleC(int* hitori,int *estado, int N){ //int f; //Fila en que esta int c; //Columna en la que esta int pos; for(int i = 0; i < N*N; i++) { bool ant = false; bool doble = false; //f = i / N; c = i % N; int valor = hitori[i]; for(int j = 0; j < N; j++){ pos = c+N*j; doble = (ant && i != pos && hitori[pos] == valor)? true : doble; ant = (i != pos && hitori[pos] == valor)? true : false; } if(doble) { estado[i] = 6; } } } void DobleF(int* hitori,int *estado, int N){ int f; //Fila en que esta //int c; //Columna en la que esta int pos; for(int i = 0; i < N*N; i++) { bool ant = false; bool doble = false; f = i / N; //c = i % N; int valor = hitori[i]; for(int j = 0; j < N; j++){ pos = f*N+j; doble = (ant && i != pos && hitori[pos] == valor)? true : doble; ant = (i != pos && hitori[pos] == valor)? true : false; } if(doble) { estado[i] = 6; } } } void muerteF(int *hitori, int *estado, int N){ int i, aux1, aux2; int pos; for(i = 0; i < N*N; i++){ int fila = i/N; //int columna = i%N; int valor = hitori[i]; aux1 = estado[i]; if(aux1 != 5 && aux1 !=6){ for(int j = 0; j < N; j++){ pos = fila*N+j; aux2 = hitori[pos]; if(valor == aux2){ aux1 = (estado[pos] == 5)? 6 : aux1; } } estado[i] = aux1; } } } void muerteC(int *hitori, int *estado, int N){ int i, aux1, aux2; int pos; for(i = 0; i < N*N; i++){ //int fila = i/N; int columna = i%N; int valor = hitori[i]; aux1 = estado[i]; if(aux1 != 5 && aux1 !=6){ for(int j = 0; j < N; j++){ pos = columna+N*j; aux2 = hitori[pos]; if(valor == aux2){ aux1 = (estado[pos] == 5)? 6 : aux1; } } estado[i] = aux1; } } } void funcionCPU(string* Hitori_Str, int* Hitori, int* estado, int N){ int i; // Ejecutar patrones //printf(" - TRIPLETE - \n"); tripletF(Hitori, estado, N); tripletC(Hitori, estado, N); //funcionQL(Hitori_Str, estado, N); //printf(" - DOBLE - \n"); DobleF(Hitori, estado, N); DobleC(Hitori, estado, N); //funcionQL(Hitori_Str, estado, N); for(i = 0; i < N; i++){ //printf(" - MUERTE - \n"); muerteF(Hitori, estado, N); muerteC(Hitori, estado, N); //funcionQL(Hitori_Str, estado, N); //printf(" - RESCATE - \n"); rescateF(Hitori, estado, N); rescateC(Hitori, estado, N); //funcionQL(Hitori_Str, estado, N); } return; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* GPU primera implementacion */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* -------------------------- Deteccion de patrones ------------------------- */ __global__ void kernelTripletF(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; //int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta bool back, next; int aux; if(tId < N*N && c > 0 && c < N) { int valor = hitori[tId]; aux = estado[tId]; back = (hitori[tId-1] == valor)? true : false; next = (hitori[tId+1] == valor)? true : false; estado[tId] = (back && next) ? 5 : aux; } } __global__ void kernelTripletC(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta //int c = tId % N; //Columna en la que esta bool up, down; int aux; if(tId < N*N && f > 0 && f < N) { int valor = hitori[tId]; aux = estado[tId]; up = (hitori[tId-N] == valor)? true : false; down = (hitori[tId+N] == valor)? true : false; estado[tId] = (up && down) ? 5 : aux; } } __global__ void kernelDobleF(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta bool ant = false; bool doble = false; int pos; if(tId < N*N) { int valor = hitori[tId]; for(int i = 0; i < N; i++){ pos = f*N+i; doble = (ant && tId != pos && hitori[pos] == valor)? true : doble; ant = (tId != pos && hitori[pos] == valor)? true : false; } if(doble) { estado[tId] = 6; } } } __global__ void kernelDobleC(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; //int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta bool ant = false; bool doble = false; int pos; if(tId < N*N) { int valor = hitori[tId]; for(int i = 0; i < N; i++){ pos = c+N*i; doble = (ant && tId != pos && hitori[pos] == valor)? true : doble; ant = (tId != pos && hitori[pos] == valor)? true : false; } if(doble) { estado[tId] = 6; } } } /* ---------------------------- Funciones del for --------------------------- */ __global__ void kernelRescateF(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; //int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta bool back, next; int aux; if(tId < N*N && c > 0 && c < N) { // int valor = hitori[tId]; aux = estado[tId]; back = (estado[tId-1] == 6)? true : false; next = (estado[tId+1] == 6)? true : false; estado[tId] = (back || next) ? 5 : aux; } } __global__ void kernelRescateC(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta bool up, down; int aux; if(tId < N*N && f > 0 && f < N) { //int valor = hitori[tId]; aux = estado[tId]; up = (estado[tId-N] == 6)? true : false; down = (estado[tId+N] == 6)? true : false; estado[tId] = (up || down) ? 5 : aux; } } __global__ void kernelMuerteF(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta int aux1, aux2, pos; if(tId < N*N) { int valor = hitori[tId]; aux1 = estado[tId]; if(aux1 != 5 && aux1 != 6){ for(int i = 0; i < N; i++){ pos = f*N+i; aux2 = hitori[pos]; if(valor == aux2){ aux1 = (estado[pos] == 5)? 6 : aux1; } } estado[tId] = aux1; } } } __global__ void kernelMuerteC(int *hitori, int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; //int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta int aux1, aux2, pos; if(tId < N*N) { int valor = hitori[tId]; aux1 = estado[tId]; if (aux1 != 5 && aux1 != 6){ for(int i = 0; i < N; i++){ pos = c+N*i; aux2 = hitori[pos]; if(valor == aux2){ aux1 = (estado[pos] == 5)? 6 : aux1; } } estado[tId] = aux1; } } } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* GPU segunda implementacion */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ __global__ void kernelTripletF_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; // int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta bool back, next; int aux; if(tId < N*N && c > 0 && c < N) { int valor = HitoriCM[tId]; aux = estado[tId]; back = (HitoriCM[tId-1] == valor)? true : false; next = (HitoriCM[tId+1] == valor)? true : false; estado[tId] = (back && next) ? 5 : aux; } } __global__ void kernelTripletC_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta bool up, down; int aux; if(tId < N*N && f > 0 && f < N) { int valor = HitoriCM[tId]; aux = estado[tId]; up = (HitoriCM[tId-N] == valor)? true : false; down = (HitoriCM[tId+N] == valor)? true : false; estado[tId] = (up && down) ? 5 : aux; } } __global__ void kernelRescateF_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; // int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta bool back, next; int aux; if(tId < N*N && c > 0 && c < N) { // int valor = HitoriCM[tId]; aux = estado[tId]; back = (estado[tId-1] == 6)? true : false; next = (estado[tId+1] == 6)? true : false; estado[tId] = (back || next) ? 5 : aux; } } __global__ void kernelRescateC_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta bool up, down; int aux; if(tId < N*N && f > 0 && f < N) { // int valor = HitoriCM[tId]; aux = estado[tId]; up = (estado[tId-N] == 6)? true : false; down = (estado[tId+N] == 6)? true : false; estado[tId] = (up || down) ? 5 : aux; } } __global__ void kernelDobleC_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; // int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta bool ant = false; bool doble = false; int pos; if(tId < N*N) { int valor = HitoriCM[tId]; for(int i = 0; i < N; i++){ pos = c+N*i; doble = (ant && tId != pos && HitoriCM[pos] == valor)? true : doble; ant = (tId != pos && HitoriCM[pos] == valor)? true : false; } if(doble) { estado[tId] = 6; } } } __global__ void kernelDobleF_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta bool ant = false; bool doble = false; int pos; if(tId < N*N) { int valor = HitoriCM[tId]; for(int i = 0; i < N; i++){ pos = f*N+i; doble = (ant && tId != pos && HitoriCM[pos] == valor)? true : doble; ant = (tId != pos && HitoriCM[pos] == valor)? true : false; } if(doble) { estado[tId] = 6; } } } __global__ void kernelMuerteF_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; int f = tId / N; //Fila en que esta // int c = tId % N; //Columna en la que esta int aux1, aux2, pos; if(tId < N*N) { int valor = HitoriCM[tId]; aux1 = estado[tId]; if(aux1 != 5 && aux1 != 6){ for(int i = 0; i < N; i++){ pos = f*N+i; aux2 = HitoriCM[pos]; if(valor == aux2){ aux1 = (estado[pos] == 5)? 6 : aux1; } } estado[tId] = aux1; } } } __global__ void kernelMuerteC_CM(int *estado, int N){ int tId = threadIdx.x + blockIdx.x * blockDim.x; // int f = tId / N; //Fila en que esta int c = tId % N; //Columna en la que esta int aux1, aux2; if(tId < N*N) { int valor = HitoriCM[tId]; aux1 = estado[tId]; if (aux1 != 5 && aux1 != 6){ for(int i = 0; i < N; i++){ aux2 = HitoriCM[c+N*i]; if(valor == aux2){ aux1 = (estado[c+N*i] == 5)? 6 : aux1; } } estado[tId] = aux1; } } } void funcionQL(string* Hitori_Str, int* Hit_State, int N){ // Visualizar Hitori updateHitori(Hitori_Str, Hit_State, N); showMatrix(Hitori_Str, N, N); //printf("\n Hitori Estado \n"); //showMatrix(Hit_State, N, N); return; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Main */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ int main(int argc, char* argv[]){ fstream FILE; int* Hitori; string* Hitori_Str; int* Hit_State; int N; string line; vector<tuple<int, int>> M; string nameFile = argv[1]; // Abrir el archivo en modo lectura FILE.open(nameFile, ios::in); if(!FILE){ cerr << "Unable to open file!" << endl; exit(1); } if( FILE.is_open() ){ getline(FILE, line); N = stoi(line); Hitori = new int[N*N]; Hit_State = new int[N*N]; Hitori_Str = new string[N*N]; setInitialHitoriState(Hit_State, N); readHitoriFromFile(&FILE, Hitori, Hitori_Str, N); SetHitoriState( Hitori, Hit_State, N); // Parte CPU // Inicialización variables de tiempo clock_t t1, t2; double ms; t1 = clock(); funcionCPU(Hitori_Str, Hitori, Hit_State, N); t2 = clock(); ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC; printf("Tiempo de CPU: %5f \n", ms); //cout << "Tiempo CPU: " << ms << "[ms]" << endl; funcionQL(Hitori_Str, Hit_State, N); SetHitoriState( Hitori, Hit_State, N); // Parte GPU 1 // Def tiempos GPU int* HitoriDev, *Hit_StateDev; hipEvent_t ct1, ct2; float dt; hipEventCreate(&ct1); hipEventCreate(&ct2); int block_size = 256; // múltiplo de 32 int grid_size = (int)ceil((float)(N*N)/block_size); // ceil : función techo hipMalloc(&HitoriDev, sizeof(int)*N*N); hipMalloc(&Hit_StateDev, sizeof(int)*N*N); hipEventCreate(&ct1); hipEventCreate(&ct2); hipEventRecord(ct1); hipMemcpy(HitoriDev, Hitori, N*N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(Hit_StateDev, Hit_State, N*N*sizeof(int), hipMemcpyHostToDevice); kernelTripletF<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); kernelTripletC<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); kernelDobleF<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); kernelDobleC<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); for(int i = 0; i < N; i++){ kernelMuerteF<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); kernelMuerteC<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); kernelRescateF<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); kernelRescateC<<<grid_size, block_size>>>(HitoriDev, Hit_StateDev, N); } hipMemcpy(Hit_State, Hit_StateDev, N*N*sizeof(int), hipMemcpyDeviceToHost); hipEventRecord(ct2); hipEventSynchronize(ct2); hipEventElapsedTime(&dt, ct1, ct2); cout << "Tiempo GPU 1: " << dt << "[ms]" << endl; funcionQL(Hitori_Str, Hit_State, N); SetHitoriState( Hitori, Hit_State, N); // Parte GPU 2 int* Hit_StateDev2; hipMalloc(&Hit_StateDev2, sizeof(int)*N*N); hipEventRecord(ct1); hipMemcpyToSymbol(HIP_SYMBOL(HitoriCM), Hitori, N*N*sizeof(int), 0, hipMemcpyHostToDevice); // Para kernel CM hipMemcpy(Hit_StateDev2, Hit_State, N*N*sizeof(int), hipMemcpyHostToDevice); kernelTripletF_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); kernelTripletC_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); kernelDobleF_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); kernelDobleC_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); for(int i = 0; i < N; i++){ kernelMuerteF_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); kernelMuerteC_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); kernelRescateF_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); kernelRescateC_CM<<<grid_size, block_size>>>(Hit_StateDev2, N); } hipMemcpy(Hit_State, Hit_StateDev2, N*N*sizeof(int), hipMemcpyDeviceToHost); hipEventRecord(ct2); hipEventSynchronize(ct2); hipEventElapsedTime(&dt, ct1, ct2); cout << "Tiempo GPU 2: " << dt << "[ms]" << endl; funcionQL(Hitori_Str, Hit_State, N); // Liberar memoria delete[] Hitori; delete[] Hit_State; delete[] Hitori_Str; //P1 hipFree(HitoriDev); hipFree(Hit_StateDev); //P2 hipFree(Hit_StateDev2); } FILE.close(); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <curand.h> #define GRID_SIZE 32 #define BLOCK_SIZE 512 #define NUM_TRY 10000 /** * 乱数に基づいて(x,y)を生成し、円の中に入る確率を計算し、devResultsに格納する。 */ __global__ void compute_pi(float* devResults, float* devRandom){ int idx = blockDim.x * blockIdx.x + threadIdx.x; int step = gridDim.x * blockDim.x * 2; int count = 0; for (int iter = 0; iter < NUM_TRY; ++iter) { // 乱数に基づいて(x,y)を生成 float x = devRandom[iter * step + idx * 2]; float y = devRandom[iter * step + idx * 2 + 1]; // 円の中に入っているかチェック if (x * x + y * y <= 1) { count++; } } devResults[idx] = (float)count / NUM_TRY; } int main() { float* results; float* devResults; curandGenerator_t gen; float *devRandom; // CPU側でメモリを確保する results = new float[GRID_SIZE * BLOCK_SIZE]; // GPU側でメモリを確保する cudaMalloc((void**)&devResults, sizeof(float) * GRID_SIZE * BLOCK_SIZE); cudaMalloc((void**)&devRandom, sizeof(float) * GRID_SIZE * BLOCK_SIZE * NUM_TRY * 2); // 乱数生成器を作成 curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(gen, 1234ULL); // 乱数を生成し、デバイス側のバッファに格納する curandGenerateUniform(gen, devRandom, GRID_SIZE * BLOCK_SIZE * NUM_TRY * 2); // GPU側の関数を呼び出す。()内が、そのまま関数の引数となる compute_pi<<<GRID_SIZE, BLOCK_SIZE>>>(devResults, devRandom); // 指定したsize分、GPUのd_bufferから、CPUのbufferへ、データを転送する cudaMemcpy(results, devResults, sizeof(float) * GRID_SIZE * BLOCK_SIZE, cudaMemcpyDeviceToHost); // GPU側で確保したメモリを開放する cudaFree(devResults); cudaFree(devRandom); // 結果を表示する float count = 0.0; for (int i = 0; i < GRID_SIZE * BLOCK_SIZE; ++i) { count += results[i]; } printf("PI: %lf\n", count * 4.0 / GRID_SIZE / BLOCK_SIZE); // CPU側で確保したメモリを開放する free(results); cudaDeviceReset(); }
 #include <hip/hip_runtime.h> #include <stdio.h> #include <hiprand/hiprand.h> #define GRID_SIZE 32 #define BLOCK_SIZE 512 #define NUM_TRY 10000 /** * 乱数に基づいて(x,y)を生成し、円の中に入る確率を計算し、devResultsに格納する。 */ __global__ void compute_pi(float* devResults, float* devRandom){ int idx = blockDim.x * blockIdx.x + threadIdx.x; int step = gridDim.x * blockDim.x * 2; int count = 0; for (int iter = 0; iter < NUM_TRY; ++iter) { // 乱数に基づいて(x,y)を生成 float x = devRandom[iter * step + idx * 2]; float y = devRandom[iter * step + idx * 2 + 1]; // 円の中に入っているかチェック if (x * x + y * y <= 1) { count++; } } devResults[idx] = (float)count / NUM_TRY; } int main() { float* results; float* devResults; hiprandGenerator_t gen; float *devRandom; // CPU側でメモリを確保する results = new float[GRID_SIZE * BLOCK_SIZE]; // GPU側でメモリを確保する hipMalloc((void**)&devResults, sizeof(float) * GRID_SIZE * BLOCK_SIZE); hipMalloc((void**)&devRandom, sizeof(float) * GRID_SIZE * BLOCK_SIZE * NUM_TRY * 2); // 乱数生成器を作成 hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL); // 乱数を生成し、デバイス側のバッファに格納する hiprandGenerateUniform(gen, devRandom, GRID_SIZE * BLOCK_SIZE * NUM_TRY * 2); // GPU側の関数を呼び出す。()内が、そのまま関数の引数となる compute_pi<<<GRID_SIZE, BLOCK_SIZE>>>(devResults, devRandom); // 指定したsize分、GPUのd_bufferから、CPUのbufferへ、データを転送する hipMemcpy(results, devResults, sizeof(float) * GRID_SIZE * BLOCK_SIZE, hipMemcpyDeviceToHost); // GPU側で確保したメモリを開放する hipFree(devResults); hipFree(devRandom); // 結果を表示する float count = 0.0; for (int i = 0; i < GRID_SIZE * BLOCK_SIZE; ++i) { count += results[i]; } printf("PI: %lf\n", count * 4.0 / GRID_SIZE / BLOCK_SIZE); // CPU側で確保したメモリを開放する free(results); hipDeviceReset(); }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void naiveGmem(float *out, float *in, const int nx, const int ny) { // matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // transpose with boundary test if (ix < nx && iy < ny) { out[ix * ny + iy] = in[iy * nx + ix]; } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void naiveGmem(float *out, float *in, const int nx, const int ny) { // matrix coordinate (ix,iy) unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; // transpose with boundary test if (ix < nx && iy < ny) { out[ix * ny + iy] = in[iy * nx + ix]; } }
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" namespace gccl { void GCCLSetCudaDevice(int dev_id) { cudaSetDevice(dev_id); } } // namespace gccl
#include "hip/hip_runtime.h" namespace gccl { void GCCLSetCudaDevice(int dev_id) { hipSetDevice(dev_id); } } // namespace gccl
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void bcnn_cuda_add_bias_kernel(float *output, float *bias, int num_channels, int spatial_size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int channel = blockIdx.y; int batch_size = blockIdx.z; if (offset < spatial_size) output[(batch_size * num_channels + channel) * spatial_size + offset] += bias[channel]; }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void bcnn_cuda_add_bias_kernel(float *output, float *bias, int num_channels, int spatial_size) { int offset = blockIdx.x * blockDim.x + threadIdx.x; int channel = blockIdx.y; int batch_size = blockIdx.z; if (offset < spatial_size) output[(batch_size * num_channels + channel) * spatial_size + offset] += bias[channel]; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> // Luis Miguel García Marín __global__ void initWith(float num, float *a, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *vector, int N) { for(int i = 0; i < N; i++) { if(vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); const int N = 2<<24; size_t size = N * sizeof(float); //float *a; //float *b; float *c; // Punteros que apuntarán a posiciones de memoria con los que trabajará la CPU //a = (float *) malloc(size); //b = (float *) malloc(size); c = (float *) malloc(size); // Asignamos espacio en la memoria que trabaja la CPU float *da; float *db; float *dc; // Punteros que apuntan a posiciones de la memoria de vídeo con los que trabajará la GPU cudaMalloc(&da, size); cudaMalloc(&db, size); cudaMalloc(&dc, size); // Asignamos espacio en la memoria de vídeo de la GPU size_t threadsPerBlock; size_t numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 32 * numberOfSMs; cudaError_t addVectorsErr; cudaError_t asyncErr; initWith<<<numberOfBlocks, threadsPerBlock>>>(3, da, N); initWith<<<numberOfBlocks, threadsPerBlock>>>(4, db, N); initWith<<<numberOfBlocks, threadsPerBlock>>>(0, dc, N); addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(dc, da, db, N); // Les pasamos los punteros de device (da,db,dc) y no de host (a,b,c) addVectorsErr = cudaGetLastError(); if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr)); asyncErr = cudaDeviceSynchronize(); if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr)); //cudaMemcpy(a, da, size, cudaMemcpyDeviceToHost); //cudaMemcpy(b, db, size, cudaMemcpyDeviceToHost); cudaMemcpy(c, dc, size, cudaMemcpyDeviceToHost); // Copiamos los resultados (del último vector, el resultante) de la GPU a la CPU checkElementsAre(7, c, N); cudaFree(da); cudaFree(db); cudaFree(dc); // Liberamos la memoria de vídeo //free(a); //free(b); free(c); // Liberamos la memoria principal que trabajaba la CPU }
#include <hip/hip_runtime.h> #include <stdio.h> // Luis Miguel García Marín __global__ void initWith(float num, float *a, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *vector, int N) { for(int i = 0; i < N; i++) { if(vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { int deviceId; int numberOfSMs; hipGetDevice(&deviceId); hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId); const int N = 2<<24; size_t size = N * sizeof(float); //float *a; //float *b; float *c; // Punteros que apuntarán a posiciones de memoria con los que trabajará la CPU //a = (float *) malloc(size); //b = (float *) malloc(size); c = (float *) malloc(size); // Asignamos espacio en la memoria que trabaja la CPU float *da; float *db; float *dc; // Punteros que apuntan a posiciones de la memoria de vídeo con los que trabajará la GPU hipMalloc(&da, size); hipMalloc(&db, size); hipMalloc(&dc, size); // Asignamos espacio en la memoria de vídeo de la GPU size_t threadsPerBlock; size_t numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 32 * numberOfSMs; hipError_t addVectorsErr; hipError_t asyncErr; initWith<<<numberOfBlocks, threadsPerBlock>>>(3, da, N); initWith<<<numberOfBlocks, threadsPerBlock>>>(4, db, N); initWith<<<numberOfBlocks, threadsPerBlock>>>(0, dc, N); addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(dc, da, db, N); // Les pasamos los punteros de device (da,db,dc) y no de host (a,b,c) addVectorsErr = hipGetLastError(); if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr)); asyncErr = hipDeviceSynchronize(); if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr)); //cudaMemcpy(a, da, size, cudaMemcpyDeviceToHost); //cudaMemcpy(b, db, size, cudaMemcpyDeviceToHost); hipMemcpy(c, dc, size, hipMemcpyDeviceToHost); // Copiamos los resultados (del último vector, el resultante) de la GPU a la CPU checkElementsAre(7, c, N); hipFree(da); hipFree(db); hipFree(dc); // Liberamos la memoria de vídeo //free(a); //free(b); free(c); // Liberamos la memoria principal que trabajaba la CPU }
Convert the following CUDA code to AMD GPU code: cuda #define CUDA_BLOCK_X 128 #define CUDA_BLOCK_Y 1 #define CUDA_BLOCK_Z 1 __global__ void _auto_kernel_2(int a[5][5],int b[5][5],int i) { int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x; int thread_y_id;thread_y_id = blockIdx.y * blockDim.y + threadIdx.y; if (thread_x_id && thread_y_id) if (thread_x_id <= 5 && thread_y_id <= 5) { b[i][1 * thread_y_id + -1] = a[i][1 * thread_y_id + -1]; } } __global__ void _auto_kernel_1(int b[5][5],int i) { int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_x_id) if (thread_x_id <= 5) { b[i][0] = 1; } } __global__ void _auto_kernel_0(int a[5][5]) { int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_x_id) if (thread_x_id <= 5) { a[1 * thread_x_id + -1][1 * thread_x_id + -1] = 1; } } int main() { int j; int i_nom_2; int i_nom_1; int i; int a[5][5]; int b[5][5]; int y; { { /* Auto-generated code for call to _auto_kernel_0 */ typedef int _narray_a[5]; _narray_a *d_a; cudaMalloc((void **) &d_a, sizeof(int ) * 5 * 5); cudaMemcpy(d_a, a, sizeof(int ) * 5 * 5, cudaMemcpyHostToDevice); int CUDA_GRID_X; CUDA_GRID_X = (5 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X; int CUDA_GRID_Y; CUDA_GRID_Y = (5 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y; int CUDA_GRID_Z; CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z; const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z); const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z); _auto_kernel_0<<<CUDA_gridSize,CUDA_blockSize>>>(d_a); cudaMemcpy(a, d_a, sizeof(int ) * 5 * 5, cudaMemcpyDeviceToHost); } { /* Auto-generated code for call to _auto_kernel_1 */ typedef int _narray_b[5]; _narray_b *d_b; cudaMalloc((void **) &d_b, sizeof(int ) * 5 * 5); cudaMemcpy(d_b, b, sizeof(int ) * 5 * 5, cudaMemcpyHostToDevice); int CUDA_GRID_X; CUDA_GRID_X = (5 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X; int CUDA_GRID_Y; CUDA_GRID_Y = (5 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y; int CUDA_GRID_Z; CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z; const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z); const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z); _auto_kernel_1<<<CUDA_gridSize,CUDA_blockSize>>>(d_b, i); cudaMemcpy(b, d_b, sizeof(int ) * 5 * 5, cudaMemcpyDeviceToHost); } { /* Auto-generated code for call to _auto_kernel_2 */ typedef int _narray_a[5]; _narray_a *d_a; cudaMalloc((void **) &d_a, sizeof(int ) * 5 * 5); cudaMemcpy(d_a, a, sizeof(int ) * 5 * 5, cudaMemcpyHostToDevice); typedef int _narray_b[5]; _narray_b *d_b; cudaMalloc((void **) &d_b, sizeof(int ) * 5 * 5); cudaMemcpy(d_b, b, sizeof(int ) * 5 * 5, cudaMemcpyHostToDevice); int CUDA_GRID_X; CUDA_GRID_X = (5 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X; int CUDA_GRID_Y; CUDA_GRID_Y = (5 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y; int CUDA_GRID_Z; CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z; const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z); const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z); _auto_kernel_2<<<CUDA_gridSize,CUDA_blockSize>>>(d_a, d_b, i); cudaMemcpy(a, d_a, sizeof(int ) * 5 * 5, cudaMemcpyDeviceToHost); cudaMemcpy(b, d_b, sizeof(int ) * 5 * 5, cudaMemcpyDeviceToHost); } } /* This should NOT be convertible */ for (int i = 0; i < 5; i++) { a[i][i] = b[i][i]; for (int j = 0; j < 5; j++) b[i][j] = a[i][j]; } return 0; }
#include <hip/hip_runtime.h> #define CUDA_BLOCK_X 128 #define CUDA_BLOCK_Y 1 #define CUDA_BLOCK_Z 1 __global__ void _auto_kernel_2(int a[5][5],int b[5][5],int i) { int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x; int thread_y_id;thread_y_id = blockIdx.y * blockDim.y + threadIdx.y; if (thread_x_id && thread_y_id) if (thread_x_id <= 5 && thread_y_id <= 5) { b[i][1 * thread_y_id + -1] = a[i][1 * thread_y_id + -1]; } } __global__ void _auto_kernel_1(int b[5][5],int i) { int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_x_id) if (thread_x_id <= 5) { b[i][0] = 1; } } __global__ void _auto_kernel_0(int a[5][5]) { int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x; if (thread_x_id) if (thread_x_id <= 5) { a[1 * thread_x_id + -1][1 * thread_x_id + -1] = 1; } } int main() { int j; int i_nom_2; int i_nom_1; int i; int a[5][5]; int b[5][5]; int y; { { /* Auto-generated code for call to _auto_kernel_0 */ typedef int _narray_a[5]; _narray_a *d_a; hipMalloc((void **) &d_a, sizeof(int ) * 5 * 5); hipMemcpy(d_a, a, sizeof(int ) * 5 * 5, hipMemcpyHostToDevice); int CUDA_GRID_X; CUDA_GRID_X = (5 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X; int CUDA_GRID_Y; CUDA_GRID_Y = (5 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y; int CUDA_GRID_Z; CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z; const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z); const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z); _auto_kernel_0<<<CUDA_gridSize,CUDA_blockSize>>>(d_a); hipMemcpy(a, d_a, sizeof(int ) * 5 * 5, hipMemcpyDeviceToHost); } { /* Auto-generated code for call to _auto_kernel_1 */ typedef int _narray_b[5]; _narray_b *d_b; hipMalloc((void **) &d_b, sizeof(int ) * 5 * 5); hipMemcpy(d_b, b, sizeof(int ) * 5 * 5, hipMemcpyHostToDevice); int CUDA_GRID_X; CUDA_GRID_X = (5 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X; int CUDA_GRID_Y; CUDA_GRID_Y = (5 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y; int CUDA_GRID_Z; CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z; const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z); const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z); _auto_kernel_1<<<CUDA_gridSize,CUDA_blockSize>>>(d_b, i); hipMemcpy(b, d_b, sizeof(int ) * 5 * 5, hipMemcpyDeviceToHost); } { /* Auto-generated code for call to _auto_kernel_2 */ typedef int _narray_a[5]; _narray_a *d_a; hipMalloc((void **) &d_a, sizeof(int ) * 5 * 5); hipMemcpy(d_a, a, sizeof(int ) * 5 * 5, hipMemcpyHostToDevice); typedef int _narray_b[5]; _narray_b *d_b; hipMalloc((void **) &d_b, sizeof(int ) * 5 * 5); hipMemcpy(d_b, b, sizeof(int ) * 5 * 5, hipMemcpyHostToDevice); int CUDA_GRID_X; CUDA_GRID_X = (5 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X; int CUDA_GRID_Y; CUDA_GRID_Y = (5 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y; int CUDA_GRID_Z; CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z; const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z); const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z); _auto_kernel_2<<<CUDA_gridSize,CUDA_blockSize>>>(d_a, d_b, i); hipMemcpy(a, d_a, sizeof(int ) * 5 * 5, hipMemcpyDeviceToHost); hipMemcpy(b, d_b, sizeof(int ) * 5 * 5, hipMemcpyDeviceToHost); } } /* This should NOT be convertible */ for (int i = 0; i < 5; i++) { a[i][i] = b[i][i]; for (int j = 0; j < 5; j++) b[i][j] = a[i][j]; } return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <cuda.h> #include <stdio.h> __device__ int ceilDiv(int num1,int num2) { int adder = 0; if(num1%num2) adder = 1; return ((int)num1/num2)+adder; } __global__ void reduceVec(int* vecA,int* answer,int size) { int i = threadIdx.x; int halfSize = ceilDiv(size,2); int prevSize = size; while(halfSize>1) { if(i<halfSize && (i+halfSize)<prevSize) { vecA[i] += vecA[i+halfSize]; } __syncthreads(); prevSize = halfSize; halfSize = ceilDiv(halfSize,2); } __syncthreads(); if(i==0) { if(size>1) *answer = vecA[0]+vecA[1]; else if (size) *answer = vecA[0]; else *answer = 0; } } int main(int argc,char** argv) { int SIZEARRAY = 0; if(argc==2) { SIZEARRAY = atoi(argv[1]); }else { SIZEARRAY = 400; } //allocate memory in host int* myVec = new int[SIZEARRAY]; int* result = new int; // *result = 700; //allocate memory in the device int *dVec,*dResult; cudaMalloc((void**)&dVec,SIZEARRAY*sizeof(int)); cudaMalloc((void**)&dResult,sizeof(int)); for(int i=0;i<SIZEARRAY;i++) { myVec[i] = 1; } //copy data to device cudaMemcpy(dVec,myVec,SIZEARRAY*sizeof(int),cudaMemcpyHostToDevice); //call the kernel reduceVec<<<1,SIZEARRAY>>>(dVec,dResult,SIZEARRAY); //copy result to the host cudaMemcpy(result,dResult,sizeof(int),cudaMemcpyDeviceToHost); //display result printf("The sum is %d\n",*result); cudaFree(dVec); cudaFree(dResult); delete[] myVec; delete result; return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> __device__ int ceilDiv(int num1,int num2) { int adder = 0; if(num1%num2) adder = 1; return ((int)num1/num2)+adder; } __global__ void reduceVec(int* vecA,int* answer,int size) { int i = threadIdx.x; int halfSize = ceilDiv(size,2); int prevSize = size; while(halfSize>1) { if(i<halfSize && (i+halfSize)<prevSize) { vecA[i] += vecA[i+halfSize]; } __syncthreads(); prevSize = halfSize; halfSize = ceilDiv(halfSize,2); } __syncthreads(); if(i==0) { if(size>1) *answer = vecA[0]+vecA[1]; else if (size) *answer = vecA[0]; else *answer = 0; } } int main(int argc,char** argv) { int SIZEARRAY = 0; if(argc==2) { SIZEARRAY = atoi(argv[1]); }else { SIZEARRAY = 400; } //allocate memory in host int* myVec = new int[SIZEARRAY]; int* result = new int; // *result = 700; //allocate memory in the device int *dVec,*dResult; hipMalloc((void**)&dVec,SIZEARRAY*sizeof(int)); hipMalloc((void**)&dResult,sizeof(int)); for(int i=0;i<SIZEARRAY;i++) { myVec[i] = 1; } //copy data to device hipMemcpy(dVec,myVec,SIZEARRAY*sizeof(int),hipMemcpyHostToDevice); //call the kernel reduceVec<<<1,SIZEARRAY>>>(dVec,dResult,SIZEARRAY); //copy result to the host hipMemcpy(result,dResult,sizeof(int),hipMemcpyDeviceToHost); //display result printf("The sum is %d\n",*result); hipFree(dVec); hipFree(dResult); delete[] myVec; delete result; return 0; }
Convert the following CUDA code to AMD GPU code: cuda //MatrixMult.cu #include <stdio.h> #include <cuda.h> #include <stdlib.h> __global__ void gpu_sort(int *a,int *b,int *c, int N) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int count = 0; int d; for(d=0;d<N;d++) { if(a[d] < a[tid]) { count++; } } c[count] = a[tid]; } int main(int argc, char *argv[]) { int i, j; // loop counters int Grid_Dim_x=1, Grid_Dim_y=1; //Grid structure values int Block_Dim_x=1, Block_Dim_y=1; //Block structure values int noThreads_x, noThreads_y; // number of threads available in device, each dimension int noThreads_block; // number of threads in a block int N = 10; // size of array in each dimension int B; int T; int *a,*b,*c,*d; int *dev_a, *dev_b, *dev_c; int size; // number of bytes in arrays cudaEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also cudaEventCreate(&start); cudaEventCreate(&stop); /* --------------------ENTER INPUT PARAMETERS AND ALLOCATE DATA -----------------------*/ // keyboard input printf("Enter the value for N: "); scanf("%d", &N); //takes in input int valid = 0; while(valid == 0) { printf("Enter the number of blocks: "); scanf("%d", &B); printf("Enter the number of threads: "); scanf("%d", &T); if(B > 1024 || T > 1024 || B*T < N) { printf("Invlaid input entered.\n"); } else { valid = 1; Grid_Dim_x = B; Block_Dim_x = T; //puts the size of blocks and thread in for the dim3 } } dim3 Grid(Grid_Dim_x, Grid_Dim_x); //Grid structure dim3 Block(Block_Dim_x,Block_Dim_y); //Block structure, threads/block limited by specific device size = N * N * sizeof(int); // number of bytes in total in arrays a = (int*) malloc(size); //dynamically allocated memory for arrays on host b = (int*) malloc(size); c = (int*) malloc(size); // results from GPU d = (int*) malloc(size); // results from CPU // load arrays with some numbers srand(3); //initialize random number generator for (i=0; i < N; i++) { //load array with numbers a[i] = (int)rand(); } cudaMalloc((void**)&dev_a, size); // allocate memory on device cudaMalloc((void**)&dev_b, size); cudaMalloc((void**)&dev_c, size); cudaMemcpy(dev_a, a , size ,cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b , size ,cudaMemcpyHostToDevice); cudaEventRecord(start, 0); // here start time, after memcpy gpu_sort<<<Grid,Block>>>(dev_a,dev_b,dev_c,N); cudaMemcpy(c, dev_c, size , cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); // measuse end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); double gpuTime = elapsed_time_ms; /* ------------- COMPUTATION DONE ON HOST CPU ----------------------------*/ cudaEventRecord(start, 0); // use same timing* //cpu_matrixmult(a,b,d,N); // do calculation on host //sequential rank sort int k; for(k=0;k<N;k++) { int count = 0; int d; for(d=0;d<N;d++) { if(a[d] < a[k]) { count++; } } b[count] = a[k]; count = 0; } cudaEventRecord(stop, 0); // measure end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exe. time double cpuTime = elapsed_time_ms; /* ------------------- check device creates correct results -----------------*/ /* printf("Initial Array: \n"); int h; for(h=0;h<N;h++) { printf("%d ", a[h]); } printf("\n"); /* printf("Sequential Rank Sort: \n"); for(k=0;k<N;k++) { int count = 0; int d; for(d=0;d<N;d++) { if(a[d] < a[k]) { count++; } } b[count] = a[k]; count = 0; } for(h=0;h<N;h++) { printf("%d ", b[h]); } printf("Parallel Rank Sort\n"); for(h=0;h<N;h++) { printf("%d ", c[h]); } */ int error = 0; int r; for(r=0;r<N;r++) { if(b[r] != c[r]) { error = 1; break; } } if(error == 1) { printf("Parallel and sequential do not match.\n"); } else { printf("Seqential and parallel match.\n"); } printf("Speedup Factor: %lf\n", cpuTime/gpuTime); /* --------------------- repeat program ----------------------------------------*/ // while loop to repeat calc with different parameters /* -------------- clean up ---------------------------------------*/ free(a); free(b); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
//MatrixMult.cu #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> __global__ void gpu_sort(int *a,int *b,int *c, int N) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int count = 0; int d; for(d=0;d<N;d++) { if(a[d] < a[tid]) { count++; } } c[count] = a[tid]; } int main(int argc, char *argv[]) { int i, j; // loop counters int Grid_Dim_x=1, Grid_Dim_y=1; //Grid structure values int Block_Dim_x=1, Block_Dim_y=1; //Block structure values int noThreads_x, noThreads_y; // number of threads available in device, each dimension int noThreads_block; // number of threads in a block int N = 10; // size of array in each dimension int B; int T; int *a,*b,*c,*d; int *dev_a, *dev_b, *dev_c; int size; // number of bytes in arrays hipEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also hipEventCreate(&start); hipEventCreate(&stop); /* --------------------ENTER INPUT PARAMETERS AND ALLOCATE DATA -----------------------*/ // keyboard input printf("Enter the value for N: "); scanf("%d", &N); //takes in input int valid = 0; while(valid == 0) { printf("Enter the number of blocks: "); scanf("%d", &B); printf("Enter the number of threads: "); scanf("%d", &T); if(B > 1024 || T > 1024 || B*T < N) { printf("Invlaid input entered.\n"); } else { valid = 1; Grid_Dim_x = B; Block_Dim_x = T; //puts the size of blocks and thread in for the dim3 } } dim3 Grid(Grid_Dim_x, Grid_Dim_x); //Grid structure dim3 Block(Block_Dim_x,Block_Dim_y); //Block structure, threads/block limited by specific device size = N * N * sizeof(int); // number of bytes in total in arrays a = (int*) malloc(size); //dynamically allocated memory for arrays on host b = (int*) malloc(size); c = (int*) malloc(size); // results from GPU d = (int*) malloc(size); // results from CPU // load arrays with some numbers srand(3); //initialize random number generator for (i=0; i < N; i++) { //load array with numbers a[i] = (int)rand(); } hipMalloc((void**)&dev_a, size); // allocate memory on device hipMalloc((void**)&dev_b, size); hipMalloc((void**)&dev_c, size); hipMemcpy(dev_a, a , size ,hipMemcpyHostToDevice); hipMemcpy(dev_b, b , size ,hipMemcpyHostToDevice); hipEventRecord(start, 0); // here start time, after memcpy gpu_sort<<<Grid,Block>>>(dev_a,dev_b,dev_c,N); hipMemcpy(c, dev_c, size , hipMemcpyDeviceToHost); hipEventRecord(stop, 0); // measuse end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); double gpuTime = elapsed_time_ms; /* ------------- COMPUTATION DONE ON HOST CPU ----------------------------*/ hipEventRecord(start, 0); // use same timing* //cpu_matrixmult(a,b,d,N); // do calculation on host //sequential rank sort int k; for(k=0;k<N;k++) { int count = 0; int d; for(d=0;d<N;d++) { if(a[d] < a[k]) { count++; } } b[count] = a[k]; count = 0; } hipEventRecord(stop, 0); // measure end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exe. time double cpuTime = elapsed_time_ms; /* ------------------- check device creates correct results -----------------*/ /* printf("Initial Array: \n"); int h; for(h=0;h<N;h++) { printf("%d ", a[h]); } printf("\n"); /* printf("Sequential Rank Sort: \n"); for(k=0;k<N;k++) { int count = 0; int d; for(d=0;d<N;d++) { if(a[d] < a[k]) { count++; } } b[count] = a[k]; count = 0; } for(h=0;h<N;h++) { printf("%d ", b[h]); } printf("Parallel Rank Sort\n"); for(h=0;h<N;h++) { printf("%d ", c[h]); } */ int error = 0; int r; for(r=0;r<N;r++) { if(b[r] != c[r]) { error = 1; break; } } if(error == 1) { printf("Parallel and sequential do not match.\n"); } else { printf("Seqential and parallel match.\n"); } printf("Speedup Factor: %lf\n", cpuTime/gpuTime); /* --------------------- repeat program ----------------------------------------*/ // while loop to repeat calc with different parameters /* -------------- clean up ---------------------------------------*/ free(a); free(b); free(c); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" //#define ITEM_COUNT 2 #define _PI 3.14159265358979323846 #define _PI2 1.57079632679489661923 #define _RAD 6372795 using namespace std; cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void d_cudainit(int *a, int *b) { int i = threadIdx.x; if (i==1) { b[i] = a[i] * 2; } }
#include <hip/hip_runtime.h> #include "includes.h" //#define ITEM_COUNT 2 #define _PI 3.14159265358979323846 #define _PI2 1.57079632679489661923 #define _RAD 6372795 using namespace std; hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void d_cudainit(int *a, int *b) { int i = threadIdx.x; if (i==1) { b[i] = a[i] * 2; } }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <cuda_runtime.h> __global__ void hello_world_from_gpu(void) { printf("Hello World from GPU\n"); return; } int main(void) { printf("Hello World from CPU\n"); hello_world_from_gpu <<<1, 1>>> (); cudaDeviceReset(); return 0; }
#include <stdio.h> #include <hip/hip_runtime.h> __global__ void hello_world_from_gpu(void) { printf("Hello World from GPU\n"); return; } int main(void) { printf("Hello World from CPU\n"); hello_world_from_gpu <<<1, 1>>> (); hipDeviceReset(); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include<stdlib.h> #include<stdio.h> #include<time.h> #define n 1024 #define block_size 32 /* __global__ void mult_mat(int *a, int *b, int *c) { int blockRow = blockIdx.y, blockCol = blockIdx.x; int row = threadIdx.y, col = threadIdx.x; for (int m = 0; m < (n/block_size); ++m) { } } */ __global__ void mul_mat(int *a, int *b, int *c) { int blockRow = blockIdx.y; int blockCol = blockIdx.x; int row = threadIdx.y; int col = threadIdx.x; int i,j; int myx = blockIdx.x * blockDim.x + threadIdx.x; int myy = blockIdx.y * blockDim.y + threadIdx.y; int local=0; __shared__ int As[32][32]; __shared__ int Bs[32][32]; for(i=0;i<n/block_size;i++) { As[row][col] = a[myx*n + (i*blockDim.y + col)]; Bs[row][col] = b[(i*blockDim.x+row)*n + myy]; __syncthreads(); for(j=0;j<block_size;j++) local += As[row][j]*Bs[j][col]; __syncthreads(); } c[myx*n+myy] = local; } int main() { int i; int *a = (int*)malloc(sizeof(int)*n*n); int *b = (int*)malloc(sizeof(int)*n*n); int *c = (int*)malloc(sizeof(int)*n*n); cudaEvent_t start, stop; float time; dim3 dimGrid(32,32); dim3 dimBlock(32,32); for(i=0;i<n*n;i++) { a[i] = 1; b[i] = 2; c[i] = 0; } int *gpua, *gpub, *gpuc; cudaMalloc((void**)&gpua, sizeof(int)*n*n); cudaMalloc((void**)&gpub, sizeof(int)*n*n); cudaMalloc((void**)&gpuc, sizeof(int)*n*n); cudaMemcpy(gpua, a, sizeof(int)*n*n, cudaMemcpyHostToDevice); cudaMemcpy(gpub, b, sizeof(int)*n*n, cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); mul_mat<<<dimGrid, dimBlock>>> (gpua, gpub, gpuc); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaMemcpy(c, gpuc, sizeof(int)*n*n, cudaMemcpyDeviceToHost); cudaEventElapsedTime(&time, start, stop); printf("C[451][451] = %d\n",c[451*1024 + 451]); printf("Time - %f\n", time); free(a); free(b); free(c); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(gpua); cudaFree(gpub); cudaFree(gpuc); return 0; }
#include <hip/hip_runtime.h> #include<stdlib.h> #include<stdio.h> #include<time.h> #define n 1024 #define block_size 32 /* __global__ void mult_mat(int *a, int *b, int *c) { int blockRow = blockIdx.y, blockCol = blockIdx.x; int row = threadIdx.y, col = threadIdx.x; for (int m = 0; m < (n/block_size); ++m) { } } */ __global__ void mul_mat(int *a, int *b, int *c) { int blockRow = blockIdx.y; int blockCol = blockIdx.x; int row = threadIdx.y; int col = threadIdx.x; int i,j; int myx = blockIdx.x * blockDim.x + threadIdx.x; int myy = blockIdx.y * blockDim.y + threadIdx.y; int local=0; __shared__ int As[32][32]; __shared__ int Bs[32][32]; for(i=0;i<n/block_size;i++) { As[row][col] = a[myx*n + (i*blockDim.y + col)]; Bs[row][col] = b[(i*blockDim.x+row)*n + myy]; __syncthreads(); for(j=0;j<block_size;j++) local += As[row][j]*Bs[j][col]; __syncthreads(); } c[myx*n+myy] = local; } int main() { int i; int *a = (int*)malloc(sizeof(int)*n*n); int *b = (int*)malloc(sizeof(int)*n*n); int *c = (int*)malloc(sizeof(int)*n*n); hipEvent_t start, stop; float time; dim3 dimGrid(32,32); dim3 dimBlock(32,32); for(i=0;i<n*n;i++) { a[i] = 1; b[i] = 2; c[i] = 0; } int *gpua, *gpub, *gpuc; hipMalloc((void**)&gpua, sizeof(int)*n*n); hipMalloc((void**)&gpub, sizeof(int)*n*n); hipMalloc((void**)&gpuc, sizeof(int)*n*n); hipMemcpy(gpua, a, sizeof(int)*n*n, hipMemcpyHostToDevice); hipMemcpy(gpub, b, sizeof(int)*n*n, hipMemcpyHostToDevice); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); mul_mat<<<dimGrid, dimBlock>>> (gpua, gpub, gpuc); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipMemcpy(c, gpuc, sizeof(int)*n*n, hipMemcpyDeviceToHost); hipEventElapsedTime(&time, start, stop); printf("C[451][451] = %d\n",c[451*1024 + 451]); printf("Time - %f\n", time); free(a); free(b); free(c); hipEventDestroy(start); hipEventDestroy(stop); hipFree(gpua); hipFree(gpub); hipFree(gpuc); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #define MAX_ARRAY_SIZE 1000000 /* * Check GPU device */ void check_dev(void) { int deviceCount; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { printf("!! Error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } int dev = 0; cudaSetDevice(dev); } /* * Round up to the nearest power of 2 */ int round_up_pow2(int val) { if (val == 0) return 1; int pow2 = 1; while (pow2 < val) { pow2 <<= 1; } return pow2; } /* * Calculate the number of threads per block based on array size * The function is so designed that a reduction on the array can * be completed in two steps. * The assumption is that the size of the array is no more than * 1,000,000, such that the number of threads is no more than * 1024, which is the computational limit of the GPU device. */ int calc_num_thread(int size) { int approx = (int)sqrt((double)size); // find the nearest power of 2 return round_up_pow2(approx); } /* * Read data from ./inp.txt * Return the pointer to the data array * Ouput the number of data items thru passed-in pointer (int * size) */ int * read_data(int * size) { FILE * fptr = fopen("./inp.txt", "r"); if (!fptr) { printf("!! Error in opening data file \n"); exit(1); } int cur_array_size = MAX_ARRAY_SIZE; int * buffer = (int *)malloc(cur_array_size * sizeof(int)); int i = 0; while (!feof(fptr)) { if (fscanf(fptr, "%d,", &buffer[i]) != 1) { break; } ++i; } fclose(fptr); *size = i; return buffer; } /* * Outputs the result array into file */ void print_file(int * array, int array_size, const char fname[]) { FILE * fptr_b = fopen(fname, "w"); if (!fptr_b) { printf("!! Error in opening output file \n"); exit(1); } for (int i = 0; i < array_size; ++i) { fprintf(fptr_b, "%d", array[i]); if (i < array_size - 1) fprintf(fptr_b, ", "); } fclose(fptr_b); } /* * GPU kernel: inclusive prefix scan, one step * The result can not be stored in the original array since different blocks * cannot be synchronized within the kernel */ __global__ void prefix_scan_step(int * array_i, int * array_o, int array_size, int dist) { // shared memory to store intermediate results extern __shared__ int sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int thId = threadIdx.x; // load initial values to shared memory sdata[thId] = array_i[myId]; __syncthreads(); // store block results in shared memory if (!(myId < dist) && myId < array_size) { sdata[thId] += array_i[myId - dist]; } __syncthreads(); // copy results to global memory if (myId < array_size) { array_o[myId] = sdata[thId]; } } /* * Inclusive prefix scan */ void prefix_scan(int * array_i, int * array_o, int array_size) { // dynamically calculate the number of threads and blocks const int maxThreadsPerBlock = calc_num_thread(array_size); int threads = maxThreadsPerBlock; int blocks = (array_size + maxThreadsPerBlock - 1) / maxThreadsPerBlock; int dist = 1, i = 0; while (dist < array_size) { // each array is alternatively used as the kernel input or output to avoid the overhead of // copying the output to the input in evey iteration if (i % 2 == 0) prefix_scan_step<<<blocks, threads, threads * sizeof(int)>>>(array_i, array_o, array_size, dist); else prefix_scan_step<<<blocks, threads, threads * sizeof(int)>>>(array_o, array_i, array_size, dist); cudaDeviceSynchronize(); ++i; dist *= 2; } if (i % 2 == 0) cudaMemcpy(array_o, array_i, array_size * sizeof(int), cudaMemcpyDeviceToDevice); } /* * GPU kernel: reduction, getting the sum of an array */ __global__ void shmem_reduce_kernel(int * d_out, const int * d_in, const int size) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ int sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // load shared mem from global mem sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s = s / 2) { if (tid < s && (myId + s) < size) { sdata[tid] += sdata[tid + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } /* * Reduction-based algorithm to find the sum of an array */ void reduce(int * d_out, int * d_intermediate, int * d_in, int size) { // assumes that size is not greater than maxThreadsPerBlock^2 const int maxThreadsPerBlock = calc_num_thread(size); int threads = maxThreadsPerBlock; int blocks = (size + maxThreadsPerBlock - 1) / maxThreadsPerBlock; shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>(d_intermediate, d_in, size); // now we're down to one block left, so reduce it threads = blocks; blocks = 1; shmem_reduce_kernel<<<blocks, round_up_pow2(threads), threads * sizeof(int)>>>(d_out, d_intermediate, threads); } /* * GPU kernel for part a */ __global__ void global_counter_kernel(int * array_i, int * cnt_o, int array_size) { int myId = threadIdx.x + blockDim.x * blockIdx.x; if (myId < array_size) { atomicAdd(&cnt_o[array_i[myId] / 100], 1); } } /* * part a: global memory counter * returns the pointer to the result array B */ int * global_counter(int * array_i, int array_size) { // dynamically calculate the number of threads and blocks const int maxThreadsPerBlock = calc_num_thread(array_size); int threads = maxThreadsPerBlock; int blocks = (array_size + maxThreadsPerBlock - 1) / maxThreadsPerBlock; // allocate GPU global memories for input & output arrays int * array_device, * array_device_out; cudaMalloc((void **) &array_device, array_size * sizeof(int)); cudaMalloc((void **) &array_device_out, 11 * sizeof(int)); // copy the input array into GPU shared memory cudaMemcpy(array_device, array_i, array_size * sizeof(int), cudaMemcpyHostToDevice); // launch the kernel global_counter_kernel<<<blocks, threads>>>(array_device, array_device_out, array_size); // allocate CPU memory for output array int * array_o = (int *)malloc(10 * sizeof(int)); // copy result back to CPU cudaMemcpy(array_o, array_device_out, 10 * sizeof(int), cudaMemcpyDeviceToHost); // finish cudaFree(array_device); cudaFree(array_device_out); return array_o; } /* * GPU kernel for part b * cnt_matrix dimensions: 10 x (# of blocks) */ __global__ void shmem_counter_kernel(int * array_i, int * cnt_matrix, int array_size, int num_block) { // shared counter within block // size: 11 * sizeof(int) // one extra int for numbers greater than 1000 extern __shared__ int scnt[]; // initialize to 0 if (threadIdx.x < 10) { scnt[threadIdx.x] = 0; } __syncthreads(); // block-local counter int myId = threadIdx.x + blockDim.x * blockIdx.x; if (myId < array_size) { atomicAdd(&scnt[array_i[myId] / 100], 1); } __syncthreads(); // copy the counter values to shared memory // only have 10 values if (threadIdx.x < 10) { cnt_matrix[threadIdx.x * num_block + blockIdx.x] = scnt[threadIdx.x]; } } /* * part b: shared memory counter * returns the pointer to the result array B */ int * shmem_counter(int * array_i, int array_size) { // dynamically calculate the number of threads and blocks const int maxThreadsPerBlock = calc_num_thread(array_size); int threads = maxThreadsPerBlock; int blocks = (array_size + maxThreadsPerBlock - 1) / maxThreadsPerBlock; // allocate GPU global memories for input & output arrays and intermediate counter matrix int * array_device, * array_device_inter, * array_device_out; cudaMalloc((void **) &array_device, array_size * sizeof(int)); cudaMalloc((void **) &array_device_inter, 10 * blocks * sizeof(int)); cudaMalloc((void **) &array_device_out, 10 * sizeof(int)); /* -------------------------------------------------------------------------------- * The intermediate counter matrix * * block 0 | block 1 | block 2 | ... ... | block N* * [ 0, 99] * [100, 199] * [200, 299] * ... ... * [900, 999] * * *Note: the number of blocks, N, is stored in variable "blocks" -------------------------------------------------------------------------------- */ // allocate GPU global memory for reduction's intermediate results int * array_device_reduction_inter; cudaMalloc((void **) &array_device_reduction_inter, blocks * sizeof(int)); // allocate CPU memory for the output array int * array_o = (int *)malloc(10 * sizeof(int)); // copy the input array into GPU shared memory cudaMemcpy(array_device, array_i, array_size * sizeof(int), cudaMemcpyHostToDevice); // launch the counter kernel // shared memory size: 11 * sizeof(int) // one extra int for numbers greater than 1000 shmem_counter_kernel<<<blocks, threads, 11 * sizeof(int)>>>(array_device, array_device_inter, array_size, blocks); // do reduction for each range for (int i = 0; i < 10; ++i) { reduce(&array_device_out[i], array_device_reduction_inter, &array_device_inter[blocks * i], blocks); } // copy result back to CPU cudaMemcpy(array_o, array_device_out, 10 * sizeof(int), cudaMemcpyDeviceToHost); // finish cudaFree(array_device); cudaFree(array_device_out); cudaFree(array_device_inter); cudaFree(array_device_reduction_inter); return array_o; } /* * part c: prefix scan * returns the pointer to the result array C */ int * integrate_counter(int * array_i, int array_size) { // allocate GPU global memories for input/output array int * array_device, * array_buffer; cudaMalloc((void **) &array_device, array_size * sizeof(int)); cudaMalloc((void **) &array_buffer, array_size * sizeof(int)); // copy the input array into GPU shared memory cudaMemcpy(array_device, array_i, array_size * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(array_buffer, array_device, array_size * sizeof(int), cudaMemcpyDeviceToDevice); // run prefix scan prefix_scan(array_buffer, array_device, array_size); // allocate CPU memory for the output array int * array_o = (int *)malloc(array_size * sizeof(int)); // copy result back to CPU cudaMemcpy(array_o, array_device, array_size * sizeof(int), cudaMemcpyDeviceToHost); // finish cudaFree(array_device); cudaFree(array_buffer); return array_o; } /* * CPU main routine */ int main(void) { // check device check_dev(); // data array on host int array_size = 0; int * array_i = read_data(&array_size); // part a ------------------------------------------------------------ // compute counter values cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); int * array_o_a = global_counter(array_i, array_size); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); // print to file print_file(array_o_a, 10, "./q2a.txt"); // print debug information to stdout // printf(">> Average time elapsed in part a: %f\n", elapsedTime); // part b ------------------------------------------------------------ cudaEvent_t start_b, stop_b; cudaEventCreate(&start_b); cudaEventCreate(&stop_b); cudaEventRecord(start_b, 0); int * array_o_b = shmem_counter(array_i, array_size); cudaEventRecord(stop_b, 0); cudaEventSynchronize(stop_b); cudaEventElapsedTime(&elapsedTime, start_b, stop_b); // print to file print_file(array_o_b, 10, "./q2b.txt"); // print debug information to stdout // printf(">> Average time elapsed in part b: %f\n", elapsedTime); // part c ------------------------------------------------------------ cudaEvent_t start_c, stop_c; cudaEventCreate(&start_c); cudaEventCreate(&stop_c); cudaEventRecord(start_c, 0); int * array_o_c = integrate_counter(array_o_b, 10); cudaEventRecord(stop_c, 0); cudaEventSynchronize(stop_c); cudaEventElapsedTime(&elapsedTime, start_c, stop_c); // print to file print_file(array_o_c, 10, "./q2c.txt"); // print debug information to stdout // printf(">> Average time elapsed in part c: %f\n", elapsedTime); // finish free(array_i); free(array_o_a); free(array_o_b); free(array_o_c); return 0; }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #define MAX_ARRAY_SIZE 1000000 /* * Check GPU device */ void check_dev(void) { int deviceCount; hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { printf("!! Error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } int dev = 0; hipSetDevice(dev); } /* * Round up to the nearest power of 2 */ int round_up_pow2(int val) { if (val == 0) return 1; int pow2 = 1; while (pow2 < val) { pow2 <<= 1; } return pow2; } /* * Calculate the number of threads per block based on array size * The function is so designed that a reduction on the array can * be completed in two steps. * The assumption is that the size of the array is no more than * 1,000,000, such that the number of threads is no more than * 1024, which is the computational limit of the GPU device. */ int calc_num_thread(int size) { int approx = (int)sqrt((double)size); // find the nearest power of 2 return round_up_pow2(approx); } /* * Read data from ./inp.txt * Return the pointer to the data array * Ouput the number of data items thru passed-in pointer (int * size) */ int * read_data(int * size) { FILE * fptr = fopen("./inp.txt", "r"); if (!fptr) { printf("!! Error in opening data file \n"); exit(1); } int cur_array_size = MAX_ARRAY_SIZE; int * buffer = (int *)malloc(cur_array_size * sizeof(int)); int i = 0; while (!feof(fptr)) { if (fscanf(fptr, "%d,", &buffer[i]) != 1) { break; } ++i; } fclose(fptr); *size = i; return buffer; } /* * Outputs the result array into file */ void print_file(int * array, int array_size, const char fname[]) { FILE * fptr_b = fopen(fname, "w"); if (!fptr_b) { printf("!! Error in opening output file \n"); exit(1); } for (int i = 0; i < array_size; ++i) { fprintf(fptr_b, "%d", array[i]); if (i < array_size - 1) fprintf(fptr_b, ", "); } fclose(fptr_b); } /* * GPU kernel: inclusive prefix scan, one step * The result can not be stored in the original array since different blocks * cannot be synchronized within the kernel */ __global__ void prefix_scan_step(int * array_i, int * array_o, int array_size, int dist) { // shared memory to store intermediate results extern __shared__ int sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int thId = threadIdx.x; // load initial values to shared memory sdata[thId] = array_i[myId]; __syncthreads(); // store block results in shared memory if (!(myId < dist) && myId < array_size) { sdata[thId] += array_i[myId - dist]; } __syncthreads(); // copy results to global memory if (myId < array_size) { array_o[myId] = sdata[thId]; } } /* * Inclusive prefix scan */ void prefix_scan(int * array_i, int * array_o, int array_size) { // dynamically calculate the number of threads and blocks const int maxThreadsPerBlock = calc_num_thread(array_size); int threads = maxThreadsPerBlock; int blocks = (array_size + maxThreadsPerBlock - 1) / maxThreadsPerBlock; int dist = 1, i = 0; while (dist < array_size) { // each array is alternatively used as the kernel input or output to avoid the overhead of // copying the output to the input in evey iteration if (i % 2 == 0) prefix_scan_step<<<blocks, threads, threads * sizeof(int)>>>(array_i, array_o, array_size, dist); else prefix_scan_step<<<blocks, threads, threads * sizeof(int)>>>(array_o, array_i, array_size, dist); hipDeviceSynchronize(); ++i; dist *= 2; } if (i % 2 == 0) hipMemcpy(array_o, array_i, array_size * sizeof(int), hipMemcpyDeviceToDevice); } /* * GPU kernel: reduction, getting the sum of an array */ __global__ void shmem_reduce_kernel(int * d_out, const int * d_in, const int size) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ int sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // load shared mem from global mem sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s = s / 2) { if (tid < s && (myId + s) < size) { sdata[tid] += sdata[tid + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } /* * Reduction-based algorithm to find the sum of an array */ void reduce(int * d_out, int * d_intermediate, int * d_in, int size) { // assumes that size is not greater than maxThreadsPerBlock^2 const int maxThreadsPerBlock = calc_num_thread(size); int threads = maxThreadsPerBlock; int blocks = (size + maxThreadsPerBlock - 1) / maxThreadsPerBlock; shmem_reduce_kernel<<<blocks, threads, threads * sizeof(int)>>>(d_intermediate, d_in, size); // now we're down to one block left, so reduce it threads = blocks; blocks = 1; shmem_reduce_kernel<<<blocks, round_up_pow2(threads), threads * sizeof(int)>>>(d_out, d_intermediate, threads); } /* * GPU kernel for part a */ __global__ void global_counter_kernel(int * array_i, int * cnt_o, int array_size) { int myId = threadIdx.x + blockDim.x * blockIdx.x; if (myId < array_size) { atomicAdd(&cnt_o[array_i[myId] / 100], 1); } } /* * part a: global memory counter * returns the pointer to the result array B */ int * global_counter(int * array_i, int array_size) { // dynamically calculate the number of threads and blocks const int maxThreadsPerBlock = calc_num_thread(array_size); int threads = maxThreadsPerBlock; int blocks = (array_size + maxThreadsPerBlock - 1) / maxThreadsPerBlock; // allocate GPU global memories for input & output arrays int * array_device, * array_device_out; hipMalloc((void **) &array_device, array_size * sizeof(int)); hipMalloc((void **) &array_device_out, 11 * sizeof(int)); // copy the input array into GPU shared memory hipMemcpy(array_device, array_i, array_size * sizeof(int), hipMemcpyHostToDevice); // launch the kernel global_counter_kernel<<<blocks, threads>>>(array_device, array_device_out, array_size); // allocate CPU memory for output array int * array_o = (int *)malloc(10 * sizeof(int)); // copy result back to CPU hipMemcpy(array_o, array_device_out, 10 * sizeof(int), hipMemcpyDeviceToHost); // finish hipFree(array_device); hipFree(array_device_out); return array_o; } /* * GPU kernel for part b * cnt_matrix dimensions: 10 x (# of blocks) */ __global__ void shmem_counter_kernel(int * array_i, int * cnt_matrix, int array_size, int num_block) { // shared counter within block // size: 11 * sizeof(int) // one extra int for numbers greater than 1000 extern __shared__ int scnt[]; // initialize to 0 if (threadIdx.x < 10) { scnt[threadIdx.x] = 0; } __syncthreads(); // block-local counter int myId = threadIdx.x + blockDim.x * blockIdx.x; if (myId < array_size) { atomicAdd(&scnt[array_i[myId] / 100], 1); } __syncthreads(); // copy the counter values to shared memory // only have 10 values if (threadIdx.x < 10) { cnt_matrix[threadIdx.x * num_block + blockIdx.x] = scnt[threadIdx.x]; } } /* * part b: shared memory counter * returns the pointer to the result array B */ int * shmem_counter(int * array_i, int array_size) { // dynamically calculate the number of threads and blocks const int maxThreadsPerBlock = calc_num_thread(array_size); int threads = maxThreadsPerBlock; int blocks = (array_size + maxThreadsPerBlock - 1) / maxThreadsPerBlock; // allocate GPU global memories for input & output arrays and intermediate counter matrix int * array_device, * array_device_inter, * array_device_out; hipMalloc((void **) &array_device, array_size * sizeof(int)); hipMalloc((void **) &array_device_inter, 10 * blocks * sizeof(int)); hipMalloc((void **) &array_device_out, 10 * sizeof(int)); /* -------------------------------------------------------------------------------- * The intermediate counter matrix * * block 0 | block 1 | block 2 | ... ... | block N* * [ 0, 99] * [100, 199] * [200, 299] * ... ... * [900, 999] * * *Note: the number of blocks, N, is stored in variable "blocks" -------------------------------------------------------------------------------- */ // allocate GPU global memory for reduction's intermediate results int * array_device_reduction_inter; hipMalloc((void **) &array_device_reduction_inter, blocks * sizeof(int)); // allocate CPU memory for the output array int * array_o = (int *)malloc(10 * sizeof(int)); // copy the input array into GPU shared memory hipMemcpy(array_device, array_i, array_size * sizeof(int), hipMemcpyHostToDevice); // launch the counter kernel // shared memory size: 11 * sizeof(int) // one extra int for numbers greater than 1000 shmem_counter_kernel<<<blocks, threads, 11 * sizeof(int)>>>(array_device, array_device_inter, array_size, blocks); // do reduction for each range for (int i = 0; i < 10; ++i) { reduce(&array_device_out[i], array_device_reduction_inter, &array_device_inter[blocks * i], blocks); } // copy result back to CPU hipMemcpy(array_o, array_device_out, 10 * sizeof(int), hipMemcpyDeviceToHost); // finish hipFree(array_device); hipFree(array_device_out); hipFree(array_device_inter); hipFree(array_device_reduction_inter); return array_o; } /* * part c: prefix scan * returns the pointer to the result array C */ int * integrate_counter(int * array_i, int array_size) { // allocate GPU global memories for input/output array int * array_device, * array_buffer; hipMalloc((void **) &array_device, array_size * sizeof(int)); hipMalloc((void **) &array_buffer, array_size * sizeof(int)); // copy the input array into GPU shared memory hipMemcpy(array_device, array_i, array_size * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(array_buffer, array_device, array_size * sizeof(int), hipMemcpyDeviceToDevice); // run prefix scan prefix_scan(array_buffer, array_device, array_size); // allocate CPU memory for the output array int * array_o = (int *)malloc(array_size * sizeof(int)); // copy result back to CPU hipMemcpy(array_o, array_device, array_size * sizeof(int), hipMemcpyDeviceToHost); // finish hipFree(array_device); hipFree(array_buffer); return array_o; } /* * CPU main routine */ int main(void) { // check device check_dev(); // data array on host int array_size = 0; int * array_i = read_data(&array_size); // part a ------------------------------------------------------------ // compute counter values hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); int * array_o_a = global_counter(array_i, array_size); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); // print to file print_file(array_o_a, 10, "./q2a.txt"); // print debug information to stdout // printf(">> Average time elapsed in part a: %f\n", elapsedTime); // part b ------------------------------------------------------------ hipEvent_t start_b, stop_b; hipEventCreate(&start_b); hipEventCreate(&stop_b); hipEventRecord(start_b, 0); int * array_o_b = shmem_counter(array_i, array_size); hipEventRecord(stop_b, 0); hipEventSynchronize(stop_b); hipEventElapsedTime(&elapsedTime, start_b, stop_b); // print to file print_file(array_o_b, 10, "./q2b.txt"); // print debug information to stdout // printf(">> Average time elapsed in part b: %f\n", elapsedTime); // part c ------------------------------------------------------------ hipEvent_t start_c, stop_c; hipEventCreate(&start_c); hipEventCreate(&stop_c); hipEventRecord(start_c, 0); int * array_o_c = integrate_counter(array_o_b, 10); hipEventRecord(stop_c, 0); hipEventSynchronize(stop_c); hipEventElapsedTime(&elapsedTime, start_c, stop_c); // print to file print_file(array_o_c, 10, "./q2c.txt"); // print debug information to stdout // printf(">> Average time elapsed in part c: %f\n", elapsedTime); // finish free(array_i); free(array_o_a); free(array_o_b); free(array_o_c); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #define LSIZE 31 #define MIN_LIM 12.0 #define MAX_LIM 30.0 void check_input(int argc,char* argv[]); __global__ void examine(float *d_coordinates,int *d_coords_within,int d_lines); long calc_lines(char *filename); int main(int argc,char * argv[]) { check_input(argc,argv); // Check cmd inputs char *filename=argv[3]; // Variable initialization int coll = atoi(argv[1]); int exec_time=atoi(argv[2]); int threads=atoi(argv[4]); int BLOCKSIZE = atoi(argv[5]); long loop_count; loop_count =calc_lines(filename); // Count the lines of input file FILE *input=fopen(filename,"r"); // Open file with file descriptor struct cudaDeviceProp prop; cudaGetDeviceProperties(&prop,0); // Get gpu's properties information if(coll != -1) // Handle max_collisions argument { if(coll>loop_count) { printf("[!] Warning: Specified collisions to be tested exceed the ones in input file\n"); printf("[!] Setting the number of collisions to the maximum (taken from input file)\n"); } else { if (coll<0) return 1; loop_count = coll; } } if (BLOCKSIZE==-1) // Handle blocksize argument { BLOCKSIZE=512; // A default value } else { if (BLOCKSIZE%prop.warpSize!=0 || BLOCKSIZE<=0) { printf("[-]Block_size must be a positive multiple of gpu's warp_size %d \n",prop.warpSize ); return 5; } } if (threads!=-1) // Handle threads argument { if (threads<=0) return 4; if (threads%BLOCKSIZE!=0) { threads=(threads/BLOCKSIZE)*BLOCKSIZE; } } else { threads=prop.maxThreadsPerMultiProcessor*prop.multiProcessorCount; } // Print some information [ Usefull for debugging ] printf("[+] GPU-model: %s\tTotal GPU memory %ld MB \n",prop.name,prop.totalGlobalMem/(1024*1024) ); printf("[!] You are trying to allocate %ld MBs of memmory on CPU-RAM and GPU-GlobalMem\n",threads*3*sizeof(float)/(1024*1024) ); printf("[+] Launching %d GPU-Threads with BlockSize %d\n",threads,BLOCKSIZE ); // Initialize CUDA WallClock-time counters as events cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); dim3 blockSize(BLOCKSIZE); // Declare CUDA Block size explicitly dim3 gridSize(threads/BLOCKSIZE); // Declare CUDA Grid size explicitly float *h_coordinates=(float * )malloc(3*threads*sizeof(float)); // allocate Host memmory for elements to be read from file float *d_coordinates; int *d_coords_within,*h_coords_within=(int*)malloc(sizeof(int)); // allocate Host memmory for the counter of coordinates in area of interest *h_coords_within=0; // Allocate memmory on CUDA capable Device for: cudaMalloc(&d_coordinates,3*threads*sizeof(float)); // input file's coordinates cudaMalloc(&d_coords_within,sizeof(int)); // coordinates counter cudaMemcpy(d_coords_within,h_coords_within,sizeof(int),cudaMemcpyHostToDevice); // Initialize the value of cuounter on Device int i,j=0; float time_elapsed = 0; printf("[+] Working...\n" ); cudaEventRecord(start); // Starting time reference while(j<loop_count && (exec_time==-1?1:time_elapsed<exec_time)) // Main loop of the programm { if (j+threads>loop_count) { threads=loop_count-j; cudaFree(d_coordinates); cudaMalloc(&d_coordinates,3*threads*sizeof(float)); } for(i=0;i<threads;i++) { fscanf(input,"%f %f %f",&h_coordinates[i*3],&h_coordinates[i*3+1],&h_coordinates[i*3+2]); // Read cooordinates from file } cudaMemcpy(d_coordinates,h_coordinates,3*threads*sizeof(float),cudaMemcpyHostToDevice); // Copy read cooordinates on Device examine<<<gridSize,blockSize>>>(d_coordinates,d_coords_within,3*threads); // Launch gpu kernel for calculations cudaEventRecord(stop); // Stop time reference cudaEventSynchronize(stop); // Block CPU until "stop" event is recorded cudaEventElapsedTime(&time_elapsed, start, stop); // Calculate the time elapsed in milliseconds time_elapsed=time_elapsed/1000; // Convert milliseconds to seconds j+=threads; } // Destroy CUDA timers cudaEventDestroy(start); cudaEventDestroy(stop); cudaMemcpy(h_coords_within,d_coords_within,sizeof(int),cudaMemcpyDeviceToHost); // Copy results from Device to Host //Printing results printf("[+] Main part of the program was being executed for :: %.3f :: sec)\n", time_elapsed); printf("[+] %ld coordinates have been analyzed\n[+] %d cooordinates were inside the area of interest\n[+] %ld coordinates read per second\n", loop_count, *h_coords_within, (time_elapsed<1?loop_count:loop_count/(int)time_elapsed)); // Free Host and Device memory cudaFree(d_coordinates); cudaFree(d_coords_within); fclose(input); free(h_coordinates); free(h_coords_within); return 0; } __global__ void examine(float *d_coordinates,int *d_coords_within,int d_lines) { int index=blockIdx.x*3*blockDim.x+3*threadIdx.x; // find the index of starting element for each thread on each block float coord1=d_coordinates[index],coord2=d_coordinates[index+1],coord3=d_coordinates[index+2]; // Copy cooordinates from GPU's global memory to thread's local memory if(index>=d_lines) return; if(coord1 >= MIN_LIM && coord1 <= MAX_LIM && coord2 >= MIN_LIM && coord2 <= MAX_LIM && coord3 >= MIN_LIM && coord3 <= MAX_LIM) { // If the current coordinate is within the accepted limits, atomicAdd((unsigned int*)d_coords_within,1); // So as threads do not mess up the values } } void check_input(int argc,char *argv[]) // Handle number of arguments errors and show usage { if (argc<6 || argc>6) { printf("[-] Usage: ./examine [max_collisions] [max_exec_time] [input_file] [Threads] [1D_blockSize]\nUse \"-1\": for no boundies \n"); if (argc==2) if (!strcmp(argv[1],"--help")) { printf("max_collisions: Maximum number of collisions\nmax_exec_time: Maximum execution time\ninput_file: Filename to examine\nThreads: Number of gpu-threads to use / # Rows in memmory\n1D_blocksize: gpu-blocksize to use" ); printf("\t ======Usefull info!======\n"); printf("1) 1D_blockSize must be a multiple of 32. (or whatever warp_size is supported by your GPU)\n2) Threads should be a multiple of blockSize\n 3)These 2 parameters are important for performance\n" ); } exit(2); } } long calc_lines(char *filename) // Calculates the lines of input file { FILE *file=fopen(filename,"r"); fseek(file,0L,SEEK_END); //set file position indicator right to the end-of-file long lines=ftell(file); //store the number of bytes since the beginning of the file fseek(file,0L,SEEK_SET); fclose(file); return lines/LSIZE; //return lines count of the file }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #define LSIZE 31 #define MIN_LIM 12.0 #define MAX_LIM 30.0 void check_input(int argc,char* argv[]); __global__ void examine(float *d_coordinates,int *d_coords_within,int d_lines); long calc_lines(char *filename); int main(int argc,char * argv[]) { check_input(argc,argv); // Check cmd inputs char *filename=argv[3]; // Variable initialization int coll = atoi(argv[1]); int exec_time=atoi(argv[2]); int threads=atoi(argv[4]); int BLOCKSIZE = atoi(argv[5]); long loop_count; loop_count =calc_lines(filename); // Count the lines of input file FILE *input=fopen(filename,"r"); // Open file with file descriptor struct hipDeviceProp_t prop; hipGetDeviceProperties(&prop,0); // Get gpu's properties information if(coll != -1) // Handle max_collisions argument { if(coll>loop_count) { printf("[!] Warning: Specified collisions to be tested exceed the ones in input file\n"); printf("[!] Setting the number of collisions to the maximum (taken from input file)\n"); } else { if (coll<0) return 1; loop_count = coll; } } if (BLOCKSIZE==-1) // Handle blocksize argument { BLOCKSIZE=512; // A default value } else { if (BLOCKSIZE%prop.warpSize!=0 || BLOCKSIZE<=0) { printf("[-]Block_size must be a positive multiple of gpu's warp_size %d \n",prop.warpSize ); return 5; } } if (threads!=-1) // Handle threads argument { if (threads<=0) return 4; if (threads%BLOCKSIZE!=0) { threads=(threads/BLOCKSIZE)*BLOCKSIZE; } } else { threads=prop.maxThreadsPerMultiProcessor*prop.multiProcessorCount; } // Print some information [ Usefull for debugging ] printf("[+] GPU-model: %s\tTotal GPU memory %ld MB \n",prop.name,prop.totalGlobalMem/(1024*1024) ); printf("[!] You are trying to allocate %ld MBs of memmory on CPU-RAM and GPU-GlobalMem\n",threads*3*sizeof(float)/(1024*1024) ); printf("[+] Launching %d GPU-Threads with BlockSize %d\n",threads,BLOCKSIZE ); // Initialize CUDA WallClock-time counters as events hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); dim3 blockSize(BLOCKSIZE); // Declare CUDA Block size explicitly dim3 gridSize(threads/BLOCKSIZE); // Declare CUDA Grid size explicitly float *h_coordinates=(float * )malloc(3*threads*sizeof(float)); // allocate Host memmory for elements to be read from file float *d_coordinates; int *d_coords_within,*h_coords_within=(int*)malloc(sizeof(int)); // allocate Host memmory for the counter of coordinates in area of interest *h_coords_within=0; // Allocate memmory on CUDA capable Device for: hipMalloc(&d_coordinates,3*threads*sizeof(float)); // input file's coordinates hipMalloc(&d_coords_within,sizeof(int)); // coordinates counter hipMemcpy(d_coords_within,h_coords_within,sizeof(int),hipMemcpyHostToDevice); // Initialize the value of cuounter on Device int i,j=0; float time_elapsed = 0; printf("[+] Working...\n" ); hipEventRecord(start); // Starting time reference while(j<loop_count && (exec_time==-1?1:time_elapsed<exec_time)) // Main loop of the programm { if (j+threads>loop_count) { threads=loop_count-j; hipFree(d_coordinates); hipMalloc(&d_coordinates,3*threads*sizeof(float)); } for(i=0;i<threads;i++) { fscanf(input,"%f %f %f",&h_coordinates[i*3],&h_coordinates[i*3+1],&h_coordinates[i*3+2]); // Read cooordinates from file } hipMemcpy(d_coordinates,h_coordinates,3*threads*sizeof(float),hipMemcpyHostToDevice); // Copy read cooordinates on Device examine<<<gridSize,blockSize>>>(d_coordinates,d_coords_within,3*threads); // Launch gpu kernel for calculations hipEventRecord(stop); // Stop time reference hipEventSynchronize(stop); // Block CPU until "stop" event is recorded hipEventElapsedTime(&time_elapsed, start, stop); // Calculate the time elapsed in milliseconds time_elapsed=time_elapsed/1000; // Convert milliseconds to seconds j+=threads; } // Destroy CUDA timers hipEventDestroy(start); hipEventDestroy(stop); hipMemcpy(h_coords_within,d_coords_within,sizeof(int),hipMemcpyDeviceToHost); // Copy results from Device to Host //Printing results printf("[+] Main part of the program was being executed for :: %.3f :: sec)\n", time_elapsed); printf("[+] %ld coordinates have been analyzed\n[+] %d cooordinates were inside the area of interest\n[+] %ld coordinates read per second\n", loop_count, *h_coords_within, (time_elapsed<1?loop_count:loop_count/(int)time_elapsed)); // Free Host and Device memory hipFree(d_coordinates); hipFree(d_coords_within); fclose(input); free(h_coordinates); free(h_coords_within); return 0; } __global__ void examine(float *d_coordinates,int *d_coords_within,int d_lines) { int index=blockIdx.x*3*blockDim.x+3*threadIdx.x; // find the index of starting element for each thread on each block float coord1=d_coordinates[index],coord2=d_coordinates[index+1],coord3=d_coordinates[index+2]; // Copy cooordinates from GPU's global memory to thread's local memory if(index>=d_lines) return; if(coord1 >= MIN_LIM && coord1 <= MAX_LIM && coord2 >= MIN_LIM && coord2 <= MAX_LIM && coord3 >= MIN_LIM && coord3 <= MAX_LIM) { // If the current coordinate is within the accepted limits, atomicAdd((unsigned int*)d_coords_within,1); // So as threads do not mess up the values } } void check_input(int argc,char *argv[]) // Handle number of arguments errors and show usage { if (argc<6 || argc>6) { printf("[-] Usage: ./examine [max_collisions] [max_exec_time] [input_file] [Threads] [1D_blockSize]\nUse \"-1\": for no boundies \n"); if (argc==2) if (!strcmp(argv[1],"--help")) { printf("max_collisions: Maximum number of collisions\nmax_exec_time: Maximum execution time\ninput_file: Filename to examine\nThreads: Number of gpu-threads to use / # Rows in memmory\n1D_blocksize: gpu-blocksize to use" ); printf("\t ======Usefull info!======\n"); printf("1) 1D_blockSize must be a multiple of 32. (or whatever warp_size is supported by your GPU)\n2) Threads should be a multiple of blockSize\n 3)These 2 parameters are important for performance\n" ); } exit(2); } } long calc_lines(char *filename) // Calculates the lines of input file { FILE *file=fopen(filename,"r"); fseek(file,0L,SEEK_END); //set file position indicator right to the end-of-file long lines=ftell(file); //store the number of bytes since the beginning of the file fseek(file,0L,SEEK_SET); fclose(file); return lines/LSIZE; //return lines count of the file }
Convert the following CUDA code to AMD GPU code: cuda /********************************************************************* * * © (or copyright) 2020. Triad National Security, LLC. * All rights reserved. * This program was produced under U.S. Government contract * 89233218CNA000001 for Los AlamosNational Laboratory (LANL), * which is operated by Triad National Security, LLC for the U.S. * Department of Energy/National Nuclear Security Administration. * All rights in the program are reserved by Triad National * Security, LLC, and the U.S. Department of Energy/National Nuclear * Security Administration. The Government is granted for itself * and others acting on its behalf a nonexclusive, paid-up, * irrevocable worldwide license in this material to reproduce, * prepare derivative works, distribute copies to the public, * perform publicly and display publicly, and to permit others * to do so. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Author: Alex von Sternberg *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 __device__ bool d_error; __global__ void crs(float* fPoints, int* iPoints, int cloudSize, int maxRay, float* fStart, int* iStart, float* fStartVoxel, int* outPoints, int* outSizes, int* min, int* max, float resolution) { int index = threadIdx.x + blockIdx.x * blockDim.x; // if there are extra threads, don't run them if(index >= cloudSize) return; // init vars bool done = false; // calculate normal vector in direction of sensor->point float direction[3] = {fPoints[index]-fStart[0], fPoints[cloudSize+index]-fStart[1], fPoints[2*cloudSize+index]-fStart[2]}; float directionMagnitude = powf(powf(direction[0],2) + powf(direction[1],2) + powf(direction[2],2),0.5); // variables used for ray casting algorithm int stepDirection[3]; // +/- step in each cardinal direction float accumulatedError[3]; // error accumulated in each direction float deltaError[3]; // change in error accumulated for a step in a direction int currentIndex[3]; // for tracking the index as we trace int pointIndex[3]; // index of final occupied point bool usePI = true; // we only check for the final point if it is on the map, // otherwise we are done when we leave the map // Set the starting position to the sensor position, and the final index for(int i = 0; i < 3; i++) { currentIndex[i] = iStart[i]; } pointIndex[0] = iPoints[index]; pointIndex[1] = iPoints[cloudSize+index]; pointIndex[2] = iPoints[2*cloudSize+index]; // If the occupied point is in the map, we use it as a stopping point if(pointIndex[0] < min[0] || pointIndex[0] > max[0] || pointIndex[1] < min[1] || pointIndex[1] > max[1] || pointIndex[2] < min[2] || pointIndex[2] > max[2]) usePI = false; // check direction magnitude for divide by zero or same cell if(fabs(directionMagnitude) < resolution) { d_error = true; return; } // set up initial values in each direction for(int dir = 0; dir < 3; dir++) { direction[dir] = fdividef(direction[dir],directionMagnitude); if(direction[dir] > 0.0) stepDirection[dir] = 1; else if(direction[dir] < 0.0) stepDirection[dir] = -1; float voxelBorder = fStartVoxel[dir] + stepDirection[dir]*resolution*0.5; accumulatedError[dir] = fdividef((voxelBorder - fStart[dir]),direction[dir]); deltaError[dir] = fdividef(resolution,fabs(direction[dir])); } int count = 0; // loop until we are out of map bounds while(!done) { // find direction of min error int dim = 2; if(fabs(accumulatedError[0]) < fabs(accumulatedError[1]) && fabs(accumulatedError[0]) < fabs(accumulatedError[2])) dim = 0; else if(fabs(accumulatedError[1]) < fabs(accumulatedError[0]) && fabs(accumulatedError[1]) < fabs(accumulatedError[2])) dim = 1; // advance in direction of min error currentIndex[dim] = currentIndex[dim] + stepDirection[dim]; accumulatedError[dim] = accumulatedError[dim] + deltaError[dim]; // done if we are at occ point if(usePI) { if(currentIndex[0] == pointIndex[0] && currentIndex[1] == pointIndex[1] && currentIndex[2] == pointIndex[2]) { done = true; } } // if we are off the map, we are done. if(currentIndex[0] < min[0] || currentIndex[0] > max[0] || currentIndex[1] < min[1] || currentIndex[1] > max[1] || currentIndex[2] < min[2] || currentIndex[2] > max[2]) { done = true; } //otherwise we mark the current index as unoccupied if(!done) { outPoints[index*maxRay+count] = currentIndex[0]; outPoints[index*maxRay+(cloudSize*maxRay)+count] = currentIndex[1]; outPoints[index*maxRay+(2*cloudSize*maxRay)+count] = currentIndex[2]; } count = count + 1; } outSizes[index] = count; return; } bool castRays(float* fPoints, int* iPoints, int cloudSize, int maxRay, float* fStart, int* iStart, float* fStartVoxel, int* outPoints, int* outSizes, int minX, int maxX, int minY, int maxY, int minZ, int maxZ, float resolution) { // Device copies of three inputs and output, size of allocated memory, num of threads and blocks float *d_fPoints, *d_fStart, *d_fStartVoxel; int *d_iPoints, *d_outPoints, *d_iStart, *d_outSizes, *d_min, *d_max; int min[3] = {minX, minY, minZ}; int max[3] = {maxX, maxY, maxZ}; int thr, blk; bool h_error = false; int temp; for(int i = 0; i < 3; i ++) { if(min[i] > max[i]) { temp = min[i]; min[i] = max[i]; max[i] = temp; } } //cudaMemset(&d_error,0,sizeof(bool)); // Alloc memory for device copies of inputs and outputs cudaMalloc((void**)&d_fPoints, ((cloudSize*3) * sizeof(float))); cudaMalloc((void**)&d_iPoints, ((cloudSize*3) * sizeof(int))); cudaMalloc((void**)&d_fStart, (3 * sizeof(float))); cudaMalloc((void**)&d_iStart, (3 * sizeof(int))); cudaMalloc((void**)&d_fStartVoxel, (3 * sizeof(float))); cudaMalloc((void**)&d_min, (3 * sizeof(int))); cudaMalloc((void**)&d_max, (3 * sizeof(int))); cudaMalloc((void**)&d_outPoints, ((cloudSize*maxRay*3) * sizeof(int))); cudaMalloc((void**)&d_outSizes, (cloudSize * sizeof(int))); // Copy inputs to device cudaMemcpy(d_fPoints, fPoints, ((cloudSize*3) * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(d_iPoints, iPoints, ((cloudSize*3) * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(d_fStart, fStart, (3 * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(d_iStart, iStart, (3 * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(d_fStartVoxel, fStartVoxel, (3 * sizeof(float)), cudaMemcpyHostToDevice); cudaMemcpy(d_min, min, (3 * sizeof(int)), cudaMemcpyHostToDevice); cudaMemcpy(d_max, max, (3 * sizeof(int)), cudaMemcpyHostToDevice); // Calculates blocks and threads and launch average3 kernel on GPU thr=THREADS_PER_BLOCK; blk=cloudSize/THREADS_PER_BLOCK+1; crs<<<blk,thr>>>(d_fPoints, d_iPoints, cloudSize, maxRay, d_fStart, d_iStart, d_fStartVoxel, d_outPoints, d_outSizes, d_min, d_max, resolution); // Wait for the GPU to finish cudaDeviceSynchronize(); //// Copy result back to host and cleanup cudaMemcpy(outPoints, d_outPoints, (cloudSize*maxRay*3) * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(outSizes, d_outSizes, cloudSize * sizeof(int), cudaMemcpyDeviceToHost); //cudaMemcpyFromSymbol(&h_error, "d_error", sizeof(bool), 0, cudaMemcpyDeviceToHost); cudaFree(d_outSizes); cudaFree(d_outPoints); cudaFree(d_max); cudaFree(d_min); cudaFree(d_fStartVoxel); cudaFree(d_iStart); cudaFree(d_fStart); cudaFree(d_iPoints); cudaFree(d_fPoints); return !h_error; }
/********************************************************************* * * © (or copyright) 2020. Triad National Security, LLC. * All rights reserved. * This program was produced under U.S. Government contract * 89233218CNA000001 for Los AlamosNational Laboratory (LANL), * which is operated by Triad National Security, LLC for the U.S. * Department of Energy/National Nuclear Security Administration. * All rights in the program are reserved by Triad National * Security, LLC, and the U.S. Department of Energy/National Nuclear * Security Administration. The Government is granted for itself * and others acting on its behalf a nonexclusive, paid-up, * irrevocable worldwide license in this material to reproduce, * prepare derivative works, distribute copies to the public, * perform publicly and display publicly, and to permit others * to do so. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Author: Alex von Sternberg *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 __device__ bool d_error; __global__ void crs(float* fPoints, int* iPoints, int cloudSize, int maxRay, float* fStart, int* iStart, float* fStartVoxel, int* outPoints, int* outSizes, int* min, int* max, float resolution) { int index = threadIdx.x + blockIdx.x * blockDim.x; // if there are extra threads, don't run them if(index >= cloudSize) return; // init vars bool done = false; // calculate normal vector in direction of sensor->point float direction[3] = {fPoints[index]-fStart[0], fPoints[cloudSize+index]-fStart[1], fPoints[2*cloudSize+index]-fStart[2]}; float directionMagnitude = powf(powf(direction[0],2) + powf(direction[1],2) + powf(direction[2],2),0.5); // variables used for ray casting algorithm int stepDirection[3]; // +/- step in each cardinal direction float accumulatedError[3]; // error accumulated in each direction float deltaError[3]; // change in error accumulated for a step in a direction int currentIndex[3]; // for tracking the index as we trace int pointIndex[3]; // index of final occupied point bool usePI = true; // we only check for the final point if it is on the map, // otherwise we are done when we leave the map // Set the starting position to the sensor position, and the final index for(int i = 0; i < 3; i++) { currentIndex[i] = iStart[i]; } pointIndex[0] = iPoints[index]; pointIndex[1] = iPoints[cloudSize+index]; pointIndex[2] = iPoints[2*cloudSize+index]; // If the occupied point is in the map, we use it as a stopping point if(pointIndex[0] < min[0] || pointIndex[0] > max[0] || pointIndex[1] < min[1] || pointIndex[1] > max[1] || pointIndex[2] < min[2] || pointIndex[2] > max[2]) usePI = false; // check direction magnitude for divide by zero or same cell if(fabs(directionMagnitude) < resolution) { d_error = true; return; } // set up initial values in each direction for(int dir = 0; dir < 3; dir++) { direction[dir] = fdividef(direction[dir],directionMagnitude); if(direction[dir] > 0.0) stepDirection[dir] = 1; else if(direction[dir] < 0.0) stepDirection[dir] = -1; float voxelBorder = fStartVoxel[dir] + stepDirection[dir]*resolution*0.5; accumulatedError[dir] = fdividef((voxelBorder - fStart[dir]),direction[dir]); deltaError[dir] = fdividef(resolution,fabs(direction[dir])); } int count = 0; // loop until we are out of map bounds while(!done) { // find direction of min error int dim = 2; if(fabs(accumulatedError[0]) < fabs(accumulatedError[1]) && fabs(accumulatedError[0]) < fabs(accumulatedError[2])) dim = 0; else if(fabs(accumulatedError[1]) < fabs(accumulatedError[0]) && fabs(accumulatedError[1]) < fabs(accumulatedError[2])) dim = 1; // advance in direction of min error currentIndex[dim] = currentIndex[dim] + stepDirection[dim]; accumulatedError[dim] = accumulatedError[dim] + deltaError[dim]; // done if we are at occ point if(usePI) { if(currentIndex[0] == pointIndex[0] && currentIndex[1] == pointIndex[1] && currentIndex[2] == pointIndex[2]) { done = true; } } // if we are off the map, we are done. if(currentIndex[0] < min[0] || currentIndex[0] > max[0] || currentIndex[1] < min[1] || currentIndex[1] > max[1] || currentIndex[2] < min[2] || currentIndex[2] > max[2]) { done = true; } //otherwise we mark the current index as unoccupied if(!done) { outPoints[index*maxRay+count] = currentIndex[0]; outPoints[index*maxRay+(cloudSize*maxRay)+count] = currentIndex[1]; outPoints[index*maxRay+(2*cloudSize*maxRay)+count] = currentIndex[2]; } count = count + 1; } outSizes[index] = count; return; } bool castRays(float* fPoints, int* iPoints, int cloudSize, int maxRay, float* fStart, int* iStart, float* fStartVoxel, int* outPoints, int* outSizes, int minX, int maxX, int minY, int maxY, int minZ, int maxZ, float resolution) { // Device copies of three inputs and output, size of allocated memory, num of threads and blocks float *d_fPoints, *d_fStart, *d_fStartVoxel; int *d_iPoints, *d_outPoints, *d_iStart, *d_outSizes, *d_min, *d_max; int min[3] = {minX, minY, minZ}; int max[3] = {maxX, maxY, maxZ}; int thr, blk; bool h_error = false; int temp; for(int i = 0; i < 3; i ++) { if(min[i] > max[i]) { temp = min[i]; min[i] = max[i]; max[i] = temp; } } //cudaMemset(&d_error,0,sizeof(bool)); // Alloc memory for device copies of inputs and outputs hipMalloc((void**)&d_fPoints, ((cloudSize*3) * sizeof(float))); hipMalloc((void**)&d_iPoints, ((cloudSize*3) * sizeof(int))); hipMalloc((void**)&d_fStart, (3 * sizeof(float))); hipMalloc((void**)&d_iStart, (3 * sizeof(int))); hipMalloc((void**)&d_fStartVoxel, (3 * sizeof(float))); hipMalloc((void**)&d_min, (3 * sizeof(int))); hipMalloc((void**)&d_max, (3 * sizeof(int))); hipMalloc((void**)&d_outPoints, ((cloudSize*maxRay*3) * sizeof(int))); hipMalloc((void**)&d_outSizes, (cloudSize * sizeof(int))); // Copy inputs to device hipMemcpy(d_fPoints, fPoints, ((cloudSize*3) * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(d_iPoints, iPoints, ((cloudSize*3) * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(d_fStart, fStart, (3 * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(d_iStart, iStart, (3 * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(d_fStartVoxel, fStartVoxel, (3 * sizeof(float)), hipMemcpyHostToDevice); hipMemcpy(d_min, min, (3 * sizeof(int)), hipMemcpyHostToDevice); hipMemcpy(d_max, max, (3 * sizeof(int)), hipMemcpyHostToDevice); // Calculates blocks and threads and launch average3 kernel on GPU thr=THREADS_PER_BLOCK; blk=cloudSize/THREADS_PER_BLOCK+1; crs<<<blk,thr>>>(d_fPoints, d_iPoints, cloudSize, maxRay, d_fStart, d_iStart, d_fStartVoxel, d_outPoints, d_outSizes, d_min, d_max, resolution); // Wait for the GPU to finish hipDeviceSynchronize(); //// Copy result back to host and cleanup hipMemcpy(outPoints, d_outPoints, (cloudSize*maxRay*3) * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(outSizes, d_outSizes, cloudSize * sizeof(int), hipMemcpyDeviceToHost); //cudaMemcpyFromSymbol(&h_error, "d_error", sizeof(bool), 0, cudaMemcpyDeviceToHost); hipFree(d_outSizes); hipFree(d_outPoints); hipFree(d_max); hipFree(d_min); hipFree(d_fStartVoxel); hipFree(d_iStart); hipFree(d_fStart); hipFree(d_iPoints); hipFree(d_fPoints); return !h_error; }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" /*numCirs: num of total circles */ __global__ void kernelCompact(float* devSrc, float* devDst, unsigned int* devPredicate, unsigned int* devPos, int numCirs, int offset) { int idx = blockDim.x * blockIdx.x + threadIdx.x; //index of the circles if (idx >= numCirs) { return; } unsigned int isIn = devPredicate[idx]; if (isIn != 1) { return; } unsigned int pos = devPos[idx]; idx *= offset; pos *= offset; for (int i = 0; i < offset; ++i) { devDst[pos + i] = devSrc[idx + i]; } }
#include <hip/hip_runtime.h> #include "includes.h" /*numCirs: num of total circles */ __global__ void kernelCompact(float* devSrc, float* devDst, unsigned int* devPredicate, unsigned int* devPos, int numCirs, int offset) { int idx = blockDim.x * blockIdx.x + threadIdx.x; //index of the circles if (idx >= numCirs) { return; } unsigned int isIn = devPredicate[idx]; if (isIn != 1) { return; } unsigned int pos = devPos[idx]; idx *= offset; pos *= offset; for (int i = 0; i < offset; ++i) { devDst[pos + i] = devSrc[idx + i]; } }
Convert the following CUDA code to AMD GPU code: cuda #include <iostream> #include <cuda.h> #include <cuda_runtime.h> using namespace std; __global__ void func(int *dev_arr1, int *dev_arr2, int *dev_dot){ __shared__ int temp[3]; int index = threadIdx.x; if (index < 3){ temp[index] = dev_arr1[index] * dev_arr2[index]; } __syncthreads(); if (index == 0){ *dev_dot = temp[0] + temp[1] + temp[2]; } } int main(){ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int *arr1, *arr2, *dot; int *dev_arr1, *dev_arr2, *dev_dot; int size = 3 * sizeof(int); // i j k arr1 = (int*) malloc(size); arr2 = (int*) malloc(size); dot = (int*) malloc(sizeof(int)); cudaMalloc(&dev_arr1, size); cudaMalloc(&dev_arr2, size); cudaMalloc(&dev_dot, sizeof(int)); cout << "Enter values of x, y, z. Vector1: xi + yj + zk: "; cin>>arr1[0]>>arr1[1]>>arr1[2]; cout << "Enter values of x, y, z. Vector2: xi + yj + zk: "; cin>>arr2[0]>>arr2[1]>>arr2[2]; cudaMemcpy(dev_arr1, arr1, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_arr2, arr2, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_dot, dot, sizeof(int), cudaMemcpyHostToDevice); cudaEventRecord(start); func<<<1,3>>>(dev_arr1, dev_arr2, dev_dot); cudaEventRecord(stop); cudaMemcpy(dot, dev_dot, sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cout << "Dot product is: " << *dot << endl; float millis = 0; cudaEventElapsedTime(&millis, start, stop); cout << "Elasped Time: " << millis << endl; return 0; }
#include <iostream> #include <hip/hip_runtime.h> using namespace std; __global__ void func(int *dev_arr1, int *dev_arr2, int *dev_dot){ __shared__ int temp[3]; int index = threadIdx.x; if (index < 3){ temp[index] = dev_arr1[index] * dev_arr2[index]; } __syncthreads(); if (index == 0){ *dev_dot = temp[0] + temp[1] + temp[2]; } } int main(){ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); int *arr1, *arr2, *dot; int *dev_arr1, *dev_arr2, *dev_dot; int size = 3 * sizeof(int); // i j k arr1 = (int*) malloc(size); arr2 = (int*) malloc(size); dot = (int*) malloc(sizeof(int)); hipMalloc(&dev_arr1, size); hipMalloc(&dev_arr2, size); hipMalloc(&dev_dot, sizeof(int)); cout << "Enter values of x, y, z. Vector1: xi + yj + zk: "; cin>>arr1[0]>>arr1[1]>>arr1[2]; cout << "Enter values of x, y, z. Vector2: xi + yj + zk: "; cin>>arr2[0]>>arr2[1]>>arr2[2]; hipMemcpy(dev_arr1, arr1, size, hipMemcpyHostToDevice); hipMemcpy(dev_arr2, arr2, size, hipMemcpyHostToDevice); hipMemcpy(dev_dot, dot, sizeof(int), hipMemcpyHostToDevice); hipEventRecord(start); func<<<1,3>>>(dev_arr1, dev_arr2, dev_dot); hipEventRecord(stop); hipMemcpy(dot, dev_dot, sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); cout << "Dot product is: " << *dot << endl; float millis = 0; hipEventElapsedTime(&millis, start, stop); cout << "Elasped Time: " << millis << endl; return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <stdio.h> #include <stdlib.h> #include <algorithm> // Change the code here: // This should be changed to GPU kernel definition void dot(int numElements, const float3* a, const float3* b, float* c) { for (int i = 0; i < numElements; i++) { c[i] = a[i].x*b[i].x + a[i].y*b[i].y + a[i].z*b[i].z; } } int main() { int numElements = 10000; float3* a = (float3*)calloc(numElements, sizeof(float3)); float3* b = (float3*)calloc(numElements, sizeof(float3)); float* c = (float*)calloc(numElements, sizeof(float)); srand(1214134); for (int i = 0; i < numElements; i++) { a[i].x = float(rand())/float(RAND_MAX + 1.0); a[i].y = float(rand())/float(RAND_MAX + 1.0); a[i].z = float(rand())/float(RAND_MAX + 1.0); b[i].x = float(rand())/float(RAND_MAX + 1.0); b[i].y = float(rand())/float(RAND_MAX + 1.0); b[i].z = float(rand())/float(RAND_MAX + 1.0); } // Insert your code here: // 1. Create GPU device buffers // 2. Copy input data from host to device (vectors a and b) // 3. Change the CPU function call to the GPU kernel call dot(numElements, a, b, c); // 4. Copy the result back (vector c) for (int i = 0; i < std::min(10, numElements); i++) { printf("%f*%f + %f*%f + %f*%f = %f\n", a[i].x, b[i].x, a[i].y, b[i].y, a[i].z, b[i].z, c[i]); } printf("...\n"); free(a); free(b); free(c); // Free GPU memory here return 0; }
#include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <algorithm> // Change the code here: // This should be changed to GPU kernel definition void dot(int numElements, const float3* a, const float3* b, float* c) { for (int i = 0; i < numElements; i++) { c[i] = a[i].x*b[i].x + a[i].y*b[i].y + a[i].z*b[i].z; } } int main() { int numElements = 10000; float3* a = (float3*)calloc(numElements, sizeof(float3)); float3* b = (float3*)calloc(numElements, sizeof(float3)); float* c = (float*)calloc(numElements, sizeof(float)); srand(1214134); for (int i = 0; i < numElements; i++) { a[i].x = float(rand())/float(RAND_MAX + 1.0); a[i].y = float(rand())/float(RAND_MAX + 1.0); a[i].z = float(rand())/float(RAND_MAX + 1.0); b[i].x = float(rand())/float(RAND_MAX + 1.0); b[i].y = float(rand())/float(RAND_MAX + 1.0); b[i].z = float(rand())/float(RAND_MAX + 1.0); } // Insert your code here: // 1. Create GPU device buffers // 2. Copy input data from host to device (vectors a and b) // 3. Change the CPU function call to the GPU kernel call dot(numElements, a, b, c); // 4. Copy the result back (vector c) for (int i = 0; i < std::min(10, numElements); i++) { printf("%f*%f + %f*%f + %f*%f = %f\n", a[i].x, b[i].x, a[i].y, b[i].y, a[i].z, b[i].z, c[i]); } printf("...\n"); free(a); free(b); free(c); // Free GPU memory here return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include <assert.h> #include <limits.h> #include <math.h> #include <stdio.h> #include <stdlib.h> /*! * \brief calculate normalized frequency matrix * * Normalized value(x) = \f$ \frac{ X - min }{ max - min }\f$ * \return normalized matrix * */ int normalize( int *in_mat, /*!< [in] input matrix */ float *out_mat, /*!< [out] output matrix */ int in_rows, /*!< [in] number of rows in the input matrix */ int in_columns /*!< [in] number of columns in the output matrix */ ) { int i,j; int max, min; for ( i=0; i<in_rows; i++) { max = INT_MIN; min = INT_MAX; for ( j=0;j<in_columns; j++) { if( min > in_mat[ (i*in_columns)+j ] ) min = in_mat[ (i*in_columns)+j ] ; if( max < in_mat[ (i*in_columns)+j ] ) max = in_mat[ (i*in_columns)+j ]; } for ( j=0; j<in_columns; j++) { out_mat[(i*in_columns)+j] =(float)(in_mat[(i*in_columns)+j] - min)/(float)(max-min) ; } } return 0; } /*! * \brief Calculates the total frequency of each file for each class * * function goes through all the files, calculates total frequency of each total * frequency for each file is stored in last column of the output matrix * in_mat: * numfiles X totalnumopcodes matrix, containing frequency of each opcode * out_mat: * numclass X totalnumopcodes+1 matrix, containing total frequency of each opcode and * each file in last column * in_inrows: * = numfiles * in_incolumns: * = totalnumopcodes * in_outrows: * = number of classes * * \return modified outmat contains total frequency for each opcode and each file * */ int createClassWiseData( int *in_mat, /*!< [in] input matrix */ int *out_mat, /*!< [out] output matrix */ int in_rows, /*!< [in] number of rows in input matrix */ int in_columns, /*!< [in] number of columns in input matrix */ int in_outrows /*!< [in] number of rows in output matrix */ ) { int i,j; int sum=0; // for each file for ( i=0; i<in_rows; i++) { sum = 0; // for each opcode for ( j=0; j<in_columns-1; j++) { // add the frequency for each opcode out_mat[ (i*in_columns)+j ] += in_mat [ (i*in_columns)+j ]; // save the frequency for total collection sum += in_mat[ (i*in_columns)+j ]; } out_mat[j] = sum; } return 0; } /*! * \brief gives probability of occurrence of each opcode in each class * * takes as input a matrix containing normalized frequencies of each opcode in a file, * probability of opcode occurring in a class is found, by adding frequencies of each * opcode in each file of each class and then dividing by sum of all frequencies in that * class. We take log so the values are in a smaller range * * in_inmat: * numfiles X totalnumopcodes matrix, has normalized frequency value for each opcode * * in_cvect: * numfiles in length, contains,class ,the current row in in_inmat belongs to * * in_inrows: * = numfiles * * in_incolumns: * = totalnumopcodes * * in_outrows: * = number of classes * * in_outcolumns: * = totalnumopcodes * * out_outmat: * in_outrows X in_outcolumns, probability of each opcode in each class * * out_cprob: * probability of each class * * \return out_outmat is the modified matrix which contains the required probabilities * */ int createProbablityMatrix( int *in_inmat, /*!< [in] input matrix */ float *out_outmat, /*!< [out] output matrix */ int *in_cvect, /*!< [in] class vector */ float *out_cprob, /*!< [out] probability of each class */ int in_inrows, /*!< [in] number of rows in input matrix */ int in_incolumns, /*!< [in] number of columns in input matrix */ int in_outrows, /*!< [in] number of rows in output matrix */ int in_outcolumns /*!< [in] number of columns in output matrix */ ) { #define INDEX(i,j,cols) ((i*cols)+j) int i,j; int cls; float *class_wise_total=(float*) calloc(sizeof(float),in_outrows); assert(class_wise_total != NULL ); // For each file for ( i=0; i<in_inrows; i++) { // get class for current file cls = in_cvect[ i ]; // For each opcode for ( j=0; j<in_outcolumns; j++) { // add frequency of each opcode to appropriate a class opcode out_outmat[ INDEX(cls,j,in_outcolumns) ] += (float)in_inmat[ INDEX(i,j,in_incolumns) ]; // add frequency to current class frequency class_wise_total [ cls ] += (float)in_inmat[ INDEX(i,j,in_incolumns) ]; } // increase the count of file in current class out_cprob[ cls ] += 1; } // For each class for ( i=0; i<in_outrows; i++) { // For each opcode for ( j=0; j<in_outcolumns; j++) { // store the probability of current opcode float temp = (log10((out_outmat[ INDEX(i,j,in_outcolumns) ]+1) / (class_wise_total[ i ]+1) )); out_outmat[ INDEX(i,j,in_outcolumns) ] = (-1)*temp; // multiply by -1, because log[0-1] < 0 } // save probability of current class out_cprob[ i ] = (-1)*log10(out_cprob[ i ]/ in_inrows); // multiply by -1 because log[0-1] < 0 } free(class_wise_total); return 0; #undef INDEX } /*! * \brief Displays the float matrix * * Should not be used on huge matrices * * \return * */ void printFloatMatrix( float *in_mat, /*!< [in] pointer to the matrix */ int in_rows, /*!< [in] number of rows in the matrix */ int in_columns /*!< [in] number of columns in the matrix */ ) { int i,j; for ( i=0; i<in_rows; i++) { for ( j=0; j<in_columns; j++) { printf(" %f",in_mat[ (i*in_columns)+j ]); } printf("\n"); } } /*! * \brief Displays the int matrix * * Should not be used on huge matrices * * \return * */ void printIntMatrix( int *in_mat, /*!< [in] pointer to the matrix */ int in_rows, /*!< [in] number of rows in the matrix */ int in_columns /*!< [in] number of columns in the matrix */ ) { int i,j; for ( i=0; i<in_rows; i++) { for ( j=0; j<in_columns; j++) { printf(" %d",in_mat[ (i*in_columns)+j ]); } printf("\n"); } } /*! * \brief Assigns class to all the test files * * takes a matrix of normalized frequency values for each test file, and probability * matrix for each opcode of each class, calculates probability of each opcode for each * class, assigns a class to a file with maximum probability * * in_mat: * numfiles X total_number_of_opcodes matrix, containing normalized frequencies * * in_cprob: * numclass wide vector, containing probability of each class * * out_pridict: * numfiles wide vector, containing assigned class for each file * * in_rows: * numfiles * * in_classes: * numclasses * * in_columns: * total number of opcodes * * \return predicted class vector * */ void assignClass( int *in_mat, /*!< [in] input matrix */ float *in_prob, /*!< [in] probability matrix */ float *in_cprob, /*!< [in] class probability matrix */ int *out_pridict, /*!< [out] predicted class */ int in_rows, /*!< [in] number of input rows */ int in_classes, /*!< [in] number of classes */ int in_columns /*!< [in] number of columns */ ) { #define INDEX(i,j,cols) ((i*cols)+j) int i,j,k; double *classprob = (double*) calloc( sizeof(double), in_classes); // for each file for ( i=0; i<in_rows; i++) { // for each class for ( k=0; k<in_classes; k++) classprob[ k ] = in_cprob[k]; // for each opcode for ( j=0; j<in_columns; j++) { // for each class for ( k=0; k<in_classes; k++) { // for opcodes having normalized frequency greater than 1 if ( in_mat [ INDEX(i,j,in_columns) ] > 0 ) { // add the probability to current class ( add because we already have // log10 of those values) // TODO remove multiplication and check classprob[ k ] += in_mat [ INDEX(i,j,in_columns) ]*in_prob [ INDEX(k,j,in_columns) ]; } } } int maxClass=0; // for each class for ( k=0; k<in_classes; k++) { // save the max class if( classprob[ maxClass ] > classprob[k] ) maxClass = k; } // assign the max class out_pridict[i] = maxClass; } free(classprob); #undef INDEX } /*! * \brief creates a int matrix of row X column dimensions * * This is actually a vector of size rows X columns X sizeof(int) * use the traditional way for accessing the vector elements * * \return pointer to the allocated matrix * */ int* createIntMatrix( int in_rows, /*!< [in] number of rows in the matrix */ int in_columns /*!< [in] number of columns in the matrix */ ) { int *temp = (int*) calloc ( sizeof(int) , in_rows*in_columns ); return temp; } /*! * \brief creates a float matrix of row X column dimensions * * This is actually a vector of size rows X columns X sizeof(float) * use the traditional way for accessing the vector elements * * \return pointer to the allocated matrix * */ float* createFloatMatrix( int in_rows, /*!< [in] number of rows in the matrix */ int in_columns /*!< [in] number of columns in the matrix */ ) { float *temp = (float*) calloc( sizeof(float), in_columns*in_rows); return temp; } /*! * \brief creates a integer vector of length size * * More Details ... * \return pointer to the newly created vector * */ int *createVector( int in_size /*!< [in] length of the vector */ ) { int *temp = (int*) calloc( sizeof(int), in_size); return temp; } /*! * \brief Gives accuracy of current configuration * * More Details ... * * \return ratio of correct predictions to number of predictions * */ float getAccuracy( int *in_pmat, /*!< [in] predicated class vector */ int *in_cvect, /*!< [in] actual class vector */ int in_total /*!< [in] length of the vector */ ) { int i; float ans=0.0; for ( i=0; i<in_total; i++) { if( in_pmat[i] == in_cvect[i]) ans ++; } return ans/in_total; } /*! * \brief Gives transpose of a matrix * * The rows of in_mat are converted to columns of the out_mat, and columns of in_mat are * converted to rows of out_mat * * in_mat = in_rows X in_columns * * out_mat = in_columns X in_rows * * \return transpose of a matrix in out_mat * */ void rotateMatrix( int *in_mat, /*!< [in] input matrix */ int *out_mat, /*!< [out] output matrix */ int in_rows, /*!< [in] number of rows in input matrix */ int in_columns /*!< [in] number of columns in output matrix */ ) { int outcolumns=in_rows; int i,j; for ( i=0; i<in_rows; i++) { for ( j=0;j<in_columns;j++) { out_mat[ j*outcolumns + i ] = in_mat[ i*in_columns+j ]; } } } void rotateMatrixF( float *in_mat, /*!< [in] input matrix */ float *out_mat, /*!< [out] output matrix */ int in_rows, /*!< [in] number of rows in input matrix */ int in_columns /*!< [in] number of columns in output matrix */ ) { int outcolumns=in_rows; int i,j; for ( i=0; i<in_rows; i++) { for ( j=0;j<in_columns;j++) { out_mat[ j*outcolumns + i ] = in_mat[ i*in_columns+j ]; } } } /*! * \brief Gives the probablity for the current in_val * * probability = \f$ \frac{1}{\sqrt{2\Pi\sigma^{2}}}\exp^{\frac{(x-\mu)^{2}}{2\sigma^{2}}} \f$ * * \return probablity in float * \see * */ float getTheProbablity( float in_vval, /*!< [in] x as in above formulae */ float in_vmean, /*!< [in] mean value */ float in_vvar /*!< [in] variance value */ ) { float result=0.0; float val1 = 1/sqrt( 2.0* M_PI* in_vvar); float val2 = (in_vval-in_vmean)*(in_vval-in_vmean)/(2.0*in_vvar); val2 = 1 / exp( val2); result = log10( val1*val2); if( isnan(result) || isinf(result) ) return 0.0; return result; } /*! * \brief Assigns class to the test inputs * * For all the test files in testArray, gets the group index from in_group_index( which * is decided based on file size), selects the probablites from in_trainMatrix only for * those opcodes whose normalized occurances are greater than 0 * * in_trainMatrix: * four rows in in_trainMatrix are considered as one row, structure is as follows * * | | | | in_num_opcodes number of columns | * |:-------------:|:-------:|:--------:|:--------------------------------:| * | group [0-99] | benign | mean | | * | in_num_groups | | variance | | * | | malware | mean | | * | | | variance | | * * in_testMatrix: * | | in_num_opcodes number of columns | * |:---------------:|:--------------------------------:| * | inumber of rows=n_numtestfiles || * * in_group_index: * has in_numtestfiles number of values containg group number of file, can be thought of * extra column in in_testMatrix * * \return predicted class for files in in_testMatrix * */ void assignClassUsingMeanVarianceData( float *in_trainMatrix, /*!< [in] trained probablity matrix */ float *in_testMatrix, /*!< [in] testing matrix */ int in_num_groups, /*!< [in] number of groups / number of rows in train matrix */ int in_num_opcodes, /*!< [in] number of opcodes / number of columns in test,train matrix */ int in_numtestfiles, /*!< [in] number of test files / number of rows in test matrix */ int *in_group_index, /*!< [in] vector containing group index of each file in test matrix( 1:1 mapping) */ int *out_predict_vect /*!< [out] predicted class */ ) { int i,j; float pmal=0.0, pben=0.0; int mean =0, var =1; /// \todo make this genric or avoid it somehow float vmean =0, vvar =1; int index=0; // Iterate through each file in in_testMatrix for ( i=0; i<in_numtestfiles; i++) { index = in_group_index[i]*4; pmal=0.0; pben=0.0; // For all the opcodes for ( j=0; j<in_num_opcodes; j++) { // If any opcode in current file has normalized freq > 0 if( in_testMatrix[i*in_num_opcodes+j] > 0 ) { // get vmean and var considering it is a benign vmean = in_trainMatrix[(index+0+mean)*in_num_opcodes+j]; vvar = in_trainMatrix[(index+0+var )*in_num_opcodes+j]; assert( vmean >= 0.0f && vvar >= 0.0f ); // add the probablity in benign pben += getTheProbablity( in_testMatrix[i*in_num_opcodes+j], vmean, vvar); // get vmean and var considering it is a malware vmean = in_trainMatrix[(index+2+mean)*in_num_opcodes+j]; vvar = in_trainMatrix[(index+2+var )*in_num_opcodes+j]; assert( vmean >= 0.0f && vvar >= 0.0f ); // add the probablity in malware pmal += getTheProbablity( in_testMatrix[i*in_num_opcodes+j], vmean, vvar); } } // assign class depending on which probability is higher if( pmal > 0 && pben > 0) out_predict_vect[i] = pmal > pben ? 1 : 0; else { if( pmal > 0 ) out_predict_vect[i] = 1; else if ( pben > 0 ) out_predict_vect[i] = 0; else //( pmal < 0 && pben < 0) out_predict_vect[i] = pmal > pben ? 1 : 0; } } } /*! * \brief This function assigns class based on the selective features * * Most of the details similar to function assignClassUsingMeanVarianceData, except for * addition of in_feature list * * in_feature_list: * contains list of prominant features for each group * | | each row points to the feature vector in s_group | * | :----------------------:| :------------------------------------------------: | * | rows = number of groups | | * * the feature vector is in_num_opcodes in len, vector has bit set only if the opcode * having id = index of this vector is amongst the prominant feature * * \todo huge dependency !!! try and remove it viz feature list is pointing to feature vector * in s_group * * \return predicted class matrix * */ void assignClassUsingMeanVarianceDataAndFeatureSelection( float *in_trainMatrix, /*!< [in] trained probability matrix */ float *in_testMatrix, /*!< [in] testing matrix */ int *in_feature_list, /*!< [in] array of list of feature vector for each group, number of lists = number of groups */ int in_num_groups, /*!< [in] number of groups / number of rows in train matrix */ int in_num_opcodes, /*!< [in] number of opcodes / number of columns in test,train matrix */ int in_num_features, /*!< [in] number of prominent features */ int in_numtestfiles, /*!< [in] number of test files / number of rows in test matrix */ int *in_group_index, /*!< [in] vector containing group index of each file in test matrix( 1:1 mapping) */ int *out_predict_vect /*!< [out] predicted class */ ) { int i,j; float pmal=0.0, pben=0.0; int mean =0, var =1; /// \todo make this genric or avoid it somehow float vmean =0, vvar =1; int index=0; int grpindex=0; // for each file in in_testMatrix for ( i=0; i<in_numtestfiles; i++) { grpindex = in_group_index[i]; index = grpindex*4; pmal=0.0; pben=0.0; // for each opcode for ( j=0; j<in_num_features; j++) { int opcindex = in_feature_list[grpindex*in_num_features+j]; // if the normalized frequency in in_testMatrix is greater than 0 and current // opcode is amongst the prominent opcode for the group,to which the current // file belongs. if( in_testMatrix[i*in_num_opcodes+opcindex] > 0 ) { // get variance and mean assuming it is a benign file vmean = in_trainMatrix[(index+0+mean)*in_num_opcodes+opcindex]; vvar = in_trainMatrix[(index+0+var )*in_num_opcodes+opcindex]; assert( vmean >= 0.0f && vvar >= 0.0f ); // add to probability of file being benign pben += getTheProbablity( in_testMatrix[i*in_num_opcodes+opcindex], vmean, vvar); // get variance and mean assuming it is a malware file vmean = in_trainMatrix[(index+2+mean)*in_num_opcodes+opcindex]; vvar = in_trainMatrix[(index+2+var )*in_num_opcodes+opcindex]; assert( vmean >= 0.0f && vvar >= 0.0f ); // add to probability of file being malware pmal += getTheProbablity( in_testMatrix[i*in_num_opcodes+opcindex], vmean, vvar); } } /// assign class whos probablity is greater if( pmal > 0 && pben > 0) out_predict_vect[i] = pmal > pben ? 1 : 0; else { if( pmal > 0 ) out_predict_vect[i] = 1; else if ( pben > 0 ) out_predict_vect[i] = 0; else //( pmal < 0 && pben < 0) out_predict_vect[i] = pmal > pben ? 1 : 0; } } }
#include <hip/hip_runtime.h> #include <assert.h> #include <limits.h> #include <math.h> #include <stdio.h> #include <stdlib.h> /*! * \brief calculate normalized frequency matrix * * Normalized value(x) = \f$ \frac{ X - min }{ max - min }\f$ * \return normalized matrix * */ int normalize( int *in_mat, /*!< [in] input matrix */ float *out_mat, /*!< [out] output matrix */ int in_rows, /*!< [in] number of rows in the input matrix */ int in_columns /*!< [in] number of columns in the output matrix */ ) { int i,j; int max, min; for ( i=0; i<in_rows; i++) { max = INT_MIN; min = INT_MAX; for ( j=0;j<in_columns; j++) { if( min > in_mat[ (i*in_columns)+j ] ) min = in_mat[ (i*in_columns)+j ] ; if( max < in_mat[ (i*in_columns)+j ] ) max = in_mat[ (i*in_columns)+j ]; } for ( j=0; j<in_columns; j++) { out_mat[(i*in_columns)+j] =(float)(in_mat[(i*in_columns)+j] - min)/(float)(max-min) ; } } return 0; } /*! * \brief Calculates the total frequency of each file for each class * * function goes through all the files, calculates total frequency of each total * frequency for each file is stored in last column of the output matrix * in_mat: * numfiles X totalnumopcodes matrix, containing frequency of each opcode * out_mat: * numclass X totalnumopcodes+1 matrix, containing total frequency of each opcode and * each file in last column * in_inrows: * = numfiles * in_incolumns: * = totalnumopcodes * in_outrows: * = number of classes * * \return modified outmat contains total frequency for each opcode and each file * */ int createClassWiseData( int *in_mat, /*!< [in] input matrix */ int *out_mat, /*!< [out] output matrix */ int in_rows, /*!< [in] number of rows in input matrix */ int in_columns, /*!< [in] number of columns in input matrix */ int in_outrows /*!< [in] number of rows in output matrix */ ) { int i,j; int sum=0; // for each file for ( i=0; i<in_rows; i++) { sum = 0; // for each opcode for ( j=0; j<in_columns-1; j++) { // add the frequency for each opcode out_mat[ (i*in_columns)+j ] += in_mat [ (i*in_columns)+j ]; // save the frequency for total collection sum += in_mat[ (i*in_columns)+j ]; } out_mat[j] = sum; } return 0; } /*! * \brief gives probability of occurrence of each opcode in each class * * takes as input a matrix containing normalized frequencies of each opcode in a file, * probability of opcode occurring in a class is found, by adding frequencies of each * opcode in each file of each class and then dividing by sum of all frequencies in that * class. We take log so the values are in a smaller range * * in_inmat: * numfiles X totalnumopcodes matrix, has normalized frequency value for each opcode * * in_cvect: * numfiles in length, contains,class ,the current row in in_inmat belongs to * * in_inrows: * = numfiles * * in_incolumns: * = totalnumopcodes * * in_outrows: * = number of classes * * in_outcolumns: * = totalnumopcodes * * out_outmat: * in_outrows X in_outcolumns, probability of each opcode in each class * * out_cprob: * probability of each class * * \return out_outmat is the modified matrix which contains the required probabilities * */ int createProbablityMatrix( int *in_inmat, /*!< [in] input matrix */ float *out_outmat, /*!< [out] output matrix */ int *in_cvect, /*!< [in] class vector */ float *out_cprob, /*!< [out] probability of each class */ int in_inrows, /*!< [in] number of rows in input matrix */ int in_incolumns, /*!< [in] number of columns in input matrix */ int in_outrows, /*!< [in] number of rows in output matrix */ int in_outcolumns /*!< [in] number of columns in output matrix */ ) { #define INDEX(i,j,cols) ((i*cols)+j) int i,j; int cls; float *class_wise_total=(float*) calloc(sizeof(float),in_outrows); assert(class_wise_total != NULL ); // For each file for ( i=0; i<in_inrows; i++) { // get class for current file cls = in_cvect[ i ]; // For each opcode for ( j=0; j<in_outcolumns; j++) { // add frequency of each opcode to appropriate a class opcode out_outmat[ INDEX(cls,j,in_outcolumns) ] += (float)in_inmat[ INDEX(i,j,in_incolumns) ]; // add frequency to current class frequency class_wise_total [ cls ] += (float)in_inmat[ INDEX(i,j,in_incolumns) ]; } // increase the count of file in current class out_cprob[ cls ] += 1; } // For each class for ( i=0; i<in_outrows; i++) { // For each opcode for ( j=0; j<in_outcolumns; j++) { // store the probability of current opcode float temp = (log10((out_outmat[ INDEX(i,j,in_outcolumns) ]+1) / (class_wise_total[ i ]+1) )); out_outmat[ INDEX(i,j,in_outcolumns) ] = (-1)*temp; // multiply by -1, because log[0-1] < 0 } // save probability of current class out_cprob[ i ] = (-1)*log10(out_cprob[ i ]/ in_inrows); // multiply by -1 because log[0-1] < 0 } free(class_wise_total); return 0; #undef INDEX } /*! * \brief Displays the float matrix * * Should not be used on huge matrices * * \return * */ void printFloatMatrix( float *in_mat, /*!< [in] pointer to the matrix */ int in_rows, /*!< [in] number of rows in the matrix */ int in_columns /*!< [in] number of columns in the matrix */ ) { int i,j; for ( i=0; i<in_rows; i++) { for ( j=0; j<in_columns; j++) { printf(" %f",in_mat[ (i*in_columns)+j ]); } printf("\n"); } } /*! * \brief Displays the int matrix * * Should not be used on huge matrices * * \return * */ void printIntMatrix( int *in_mat, /*!< [in] pointer to the matrix */ int in_rows, /*!< [in] number of rows in the matrix */ int in_columns /*!< [in] number of columns in the matrix */ ) { int i,j; for ( i=0; i<in_rows; i++) { for ( j=0; j<in_columns; j++) { printf(" %d",in_mat[ (i*in_columns)+j ]); } printf("\n"); } } /*! * \brief Assigns class to all the test files * * takes a matrix of normalized frequency values for each test file, and probability * matrix for each opcode of each class, calculates probability of each opcode for each * class, assigns a class to a file with maximum probability * * in_mat: * numfiles X total_number_of_opcodes matrix, containing normalized frequencies * * in_cprob: * numclass wide vector, containing probability of each class * * out_pridict: * numfiles wide vector, containing assigned class for each file * * in_rows: * numfiles * * in_classes: * numclasses * * in_columns: * total number of opcodes * * \return predicted class vector * */ void assignClass( int *in_mat, /*!< [in] input matrix */ float *in_prob, /*!< [in] probability matrix */ float *in_cprob, /*!< [in] class probability matrix */ int *out_pridict, /*!< [out] predicted class */ int in_rows, /*!< [in] number of input rows */ int in_classes, /*!< [in] number of classes */ int in_columns /*!< [in] number of columns */ ) { #define INDEX(i,j,cols) ((i*cols)+j) int i,j,k; double *classprob = (double*) calloc( sizeof(double), in_classes); // for each file for ( i=0; i<in_rows; i++) { // for each class for ( k=0; k<in_classes; k++) classprob[ k ] = in_cprob[k]; // for each opcode for ( j=0; j<in_columns; j++) { // for each class for ( k=0; k<in_classes; k++) { // for opcodes having normalized frequency greater than 1 if ( in_mat [ INDEX(i,j,in_columns) ] > 0 ) { // add the probability to current class ( add because we already have // log10 of those values) // TODO remove multiplication and check classprob[ k ] += in_mat [ INDEX(i,j,in_columns) ]*in_prob [ INDEX(k,j,in_columns) ]; } } } int maxClass=0; // for each class for ( k=0; k<in_classes; k++) { // save the max class if( classprob[ maxClass ] > classprob[k] ) maxClass = k; } // assign the max class out_pridict[i] = maxClass; } free(classprob); #undef INDEX } /*! * \brief creates a int matrix of row X column dimensions * * This is actually a vector of size rows X columns X sizeof(int) * use the traditional way for accessing the vector elements * * \return pointer to the allocated matrix * */ int* createIntMatrix( int in_rows, /*!< [in] number of rows in the matrix */ int in_columns /*!< [in] number of columns in the matrix */ ) { int *temp = (int*) calloc ( sizeof(int) , in_rows*in_columns ); return temp; } /*! * \brief creates a float matrix of row X column dimensions * * This is actually a vector of size rows X columns X sizeof(float) * use the traditional way for accessing the vector elements * * \return pointer to the allocated matrix * */ float* createFloatMatrix( int in_rows, /*!< [in] number of rows in the matrix */ int in_columns /*!< [in] number of columns in the matrix */ ) { float *temp = (float*) calloc( sizeof(float), in_columns*in_rows); return temp; } /*! * \brief creates a integer vector of length size * * More Details ... * \return pointer to the newly created vector * */ int *createVector( int in_size /*!< [in] length of the vector */ ) { int *temp = (int*) calloc( sizeof(int), in_size); return temp; } /*! * \brief Gives accuracy of current configuration * * More Details ... * * \return ratio of correct predictions to number of predictions * */ float getAccuracy( int *in_pmat, /*!< [in] predicated class vector */ int *in_cvect, /*!< [in] actual class vector */ int in_total /*!< [in] length of the vector */ ) { int i; float ans=0.0; for ( i=0; i<in_total; i++) { if( in_pmat[i] == in_cvect[i]) ans ++; } return ans/in_total; } /*! * \brief Gives transpose of a matrix * * The rows of in_mat are converted to columns of the out_mat, and columns of in_mat are * converted to rows of out_mat * * in_mat = in_rows X in_columns * * out_mat = in_columns X in_rows * * \return transpose of a matrix in out_mat * */ void rotateMatrix( int *in_mat, /*!< [in] input matrix */ int *out_mat, /*!< [out] output matrix */ int in_rows, /*!< [in] number of rows in input matrix */ int in_columns /*!< [in] number of columns in output matrix */ ) { int outcolumns=in_rows; int i,j; for ( i=0; i<in_rows; i++) { for ( j=0;j<in_columns;j++) { out_mat[ j*outcolumns + i ] = in_mat[ i*in_columns+j ]; } } } void rotateMatrixF( float *in_mat, /*!< [in] input matrix */ float *out_mat, /*!< [out] output matrix */ int in_rows, /*!< [in] number of rows in input matrix */ int in_columns /*!< [in] number of columns in output matrix */ ) { int outcolumns=in_rows; int i,j; for ( i=0; i<in_rows; i++) { for ( j=0;j<in_columns;j++) { out_mat[ j*outcolumns + i ] = in_mat[ i*in_columns+j ]; } } } /*! * \brief Gives the probablity for the current in_val * * probability = \f$ \frac{1}{\sqrt{2\Pi\sigma^{2}}}\exp^{\frac{(x-\mu)^{2}}{2\sigma^{2}}} \f$ * * \return probablity in float * \see * */ float getTheProbablity( float in_vval, /*!< [in] x as in above formulae */ float in_vmean, /*!< [in] mean value */ float in_vvar /*!< [in] variance value */ ) { float result=0.0; float val1 = 1/sqrt( 2.0* M_PI* in_vvar); float val2 = (in_vval-in_vmean)*(in_vval-in_vmean)/(2.0*in_vvar); val2 = 1 / exp( val2); result = log10( val1*val2); if( isnan(result) || isinf(result) ) return 0.0; return result; } /*! * \brief Assigns class to the test inputs * * For all the test files in testArray, gets the group index from in_group_index( which * is decided based on file size), selects the probablites from in_trainMatrix only for * those opcodes whose normalized occurances are greater than 0 * * in_trainMatrix: * four rows in in_trainMatrix are considered as one row, structure is as follows * * | | | | in_num_opcodes number of columns | * |:-------------:|:-------:|:--------:|:--------------------------------:| * | group [0-99] | benign | mean | | * | in_num_groups | | variance | | * | | malware | mean | | * | | | variance | | * * in_testMatrix: * | | in_num_opcodes number of columns | * |:---------------:|:--------------------------------:| * | inumber of rows=n_numtestfiles || * * in_group_index: * has in_numtestfiles number of values containg group number of file, can be thought of * extra column in in_testMatrix * * \return predicted class for files in in_testMatrix * */ void assignClassUsingMeanVarianceData( float *in_trainMatrix, /*!< [in] trained probablity matrix */ float *in_testMatrix, /*!< [in] testing matrix */ int in_num_groups, /*!< [in] number of groups / number of rows in train matrix */ int in_num_opcodes, /*!< [in] number of opcodes / number of columns in test,train matrix */ int in_numtestfiles, /*!< [in] number of test files / number of rows in test matrix */ int *in_group_index, /*!< [in] vector containing group index of each file in test matrix( 1:1 mapping) */ int *out_predict_vect /*!< [out] predicted class */ ) { int i,j; float pmal=0.0, pben=0.0; int mean =0, var =1; /// \todo make this genric or avoid it somehow float vmean =0, vvar =1; int index=0; // Iterate through each file in in_testMatrix for ( i=0; i<in_numtestfiles; i++) { index = in_group_index[i]*4; pmal=0.0; pben=0.0; // For all the opcodes for ( j=0; j<in_num_opcodes; j++) { // If any opcode in current file has normalized freq > 0 if( in_testMatrix[i*in_num_opcodes+j] > 0 ) { // get vmean and var considering it is a benign vmean = in_trainMatrix[(index+0+mean)*in_num_opcodes+j]; vvar = in_trainMatrix[(index+0+var )*in_num_opcodes+j]; assert( vmean >= 0.0f && vvar >= 0.0f ); // add the probablity in benign pben += getTheProbablity( in_testMatrix[i*in_num_opcodes+j], vmean, vvar); // get vmean and var considering it is a malware vmean = in_trainMatrix[(index+2+mean)*in_num_opcodes+j]; vvar = in_trainMatrix[(index+2+var )*in_num_opcodes+j]; assert( vmean >= 0.0f && vvar >= 0.0f ); // add the probablity in malware pmal += getTheProbablity( in_testMatrix[i*in_num_opcodes+j], vmean, vvar); } } // assign class depending on which probability is higher if( pmal > 0 && pben > 0) out_predict_vect[i] = pmal > pben ? 1 : 0; else { if( pmal > 0 ) out_predict_vect[i] = 1; else if ( pben > 0 ) out_predict_vect[i] = 0; else //( pmal < 0 && pben < 0) out_predict_vect[i] = pmal > pben ? 1 : 0; } } } /*! * \brief This function assigns class based on the selective features * * Most of the details similar to function assignClassUsingMeanVarianceData, except for * addition of in_feature list * * in_feature_list: * contains list of prominant features for each group * | | each row points to the feature vector in s_group | * | :----------------------:| :------------------------------------------------: | * | rows = number of groups | | * * the feature vector is in_num_opcodes in len, vector has bit set only if the opcode * having id = index of this vector is amongst the prominant feature * * \todo huge dependency !!! try and remove it viz feature list is pointing to feature vector * in s_group * * \return predicted class matrix * */ void assignClassUsingMeanVarianceDataAndFeatureSelection( float *in_trainMatrix, /*!< [in] trained probability matrix */ float *in_testMatrix, /*!< [in] testing matrix */ int *in_feature_list, /*!< [in] array of list of feature vector for each group, number of lists = number of groups */ int in_num_groups, /*!< [in] number of groups / number of rows in train matrix */ int in_num_opcodes, /*!< [in] number of opcodes / number of columns in test,train matrix */ int in_num_features, /*!< [in] number of prominent features */ int in_numtestfiles, /*!< [in] number of test files / number of rows in test matrix */ int *in_group_index, /*!< [in] vector containing group index of each file in test matrix( 1:1 mapping) */ int *out_predict_vect /*!< [out] predicted class */ ) { int i,j; float pmal=0.0, pben=0.0; int mean =0, var =1; /// \todo make this genric or avoid it somehow float vmean =0, vvar =1; int index=0; int grpindex=0; // for each file in in_testMatrix for ( i=0; i<in_numtestfiles; i++) { grpindex = in_group_index[i]; index = grpindex*4; pmal=0.0; pben=0.0; // for each opcode for ( j=0; j<in_num_features; j++) { int opcindex = in_feature_list[grpindex*in_num_features+j]; // if the normalized frequency in in_testMatrix is greater than 0 and current // opcode is amongst the prominent opcode for the group,to which the current // file belongs. if( in_testMatrix[i*in_num_opcodes+opcindex] > 0 ) { // get variance and mean assuming it is a benign file vmean = in_trainMatrix[(index+0+mean)*in_num_opcodes+opcindex]; vvar = in_trainMatrix[(index+0+var )*in_num_opcodes+opcindex]; assert( vmean >= 0.0f && vvar >= 0.0f ); // add to probability of file being benign pben += getTheProbablity( in_testMatrix[i*in_num_opcodes+opcindex], vmean, vvar); // get variance and mean assuming it is a malware file vmean = in_trainMatrix[(index+2+mean)*in_num_opcodes+opcindex]; vvar = in_trainMatrix[(index+2+var )*in_num_opcodes+opcindex]; assert( vmean >= 0.0f && vvar >= 0.0f ); // add to probability of file being malware pmal += getTheProbablity( in_testMatrix[i*in_num_opcodes+opcindex], vmean, vvar); } } /// assign class whos probablity is greater if( pmal > 0 && pben > 0) out_predict_vect[i] = pmal > pben ? 1 : 0; else { if( pmal > 0 ) out_predict_vect[i] = 1; else if ( pben > 0 ) out_predict_vect[i] = 0; else //( pmal < 0 && pben < 0) out_predict_vect[i] = pmal > pben ? 1 : 0; } } }
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> static __device__ __forceinline__ unsigned int bfind32_cuda(unsigned int x) { unsigned int ret; asm volatile("bfind.u32 %0, %1;" : "=r"(ret) : "r"(x)); return 31 - ret; } __device__ __host__ unsigned int nlz32_IEEE(unsigned int x) { /* Hacker's Delight 2nd by H. S. Warren Jr., 5.3, p. 104 -- */ double d = x; d += 0.5; unsigned int *p = ((unsigned int*)&d) + 1; return 0x41e - (*p >> 20); // 31 - ((*(p+1)>>20) - 0x3FF) } __device__ __host__ unsigned int ceil2pow32(unsigned int x) { return (-(x != 0)) & (1 << (32 - nlz32_IEEE(x - 1))); } cudaError_t prefixScan(int *a, unsigned const int nsize); __global__ void prefscan(int *a, const int width) { int thidx = threadIdx.x; if ( !(thidx < (width>>1)) ) { a[thidx] = a[thidx - (width >> 1)] + a[thidx]; } } int main() { int a[] = { 11, 21, 13, 24, 8, -3, 15, 31 }; const unsigned int arraySize = sizeof(a)/sizeof(int); printf("{ "); for (int i = 0; i < arraySize; i++) { printf("%3d, ", a[i]); } printf("} = \n"); // Add vectors in parallel. cudaError_t cudaStatus = prefixScan(a, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "prefscan failed!"); return 1; } printf("{ "); for (int i = 0; i < arraySize; i++) { printf("%3d, ", a[i]); } printf("}\n"); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } cudaError_t prefixScan(int *a, unsigned const int nsize) { int *dev_a = 0; unsigned int arraySize = ceil2pow32(nsize); cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for vectors a and b. cudaStatus = cudaMalloc((void**)&dev_a, arraySize * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, nsize * sizeof(int), cudaMemcpyHostToDevice); if ( nsize < arraySize) cudaStatus = cudaMemset(dev_a + nsize, 0, arraySize - nsize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy/Memset failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. for(int width = 2; width <= arraySize; width <<= 1) prefscan<<<1, arraySize>>>(dev_a, width); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "prefixScanKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(a, dev_a, nsize * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_a); return cudaStatus; }
#include "hip/hip_runtime.h" #include <stdio.h> static __device__ __forceinline__ unsigned int bfind32_cuda(unsigned int x) { unsigned int ret; asm volatile("bfind.u32 %0, %1;" : "=r"(ret) : "r"(x)); return 31 - ret; } __device__ __host__ unsigned int nlz32_IEEE(unsigned int x) { /* Hacker's Delight 2nd by H. S. Warren Jr., 5.3, p. 104 -- */ double d = x; d += 0.5; unsigned int *p = ((unsigned int*)&d) + 1; return 0x41e - (*p >> 20); // 31 - ((*(p+1)>>20) - 0x3FF) } __device__ __host__ unsigned int ceil2pow32(unsigned int x) { return (-(x != 0)) & (1 << (32 - nlz32_IEEE(x - 1))); } hipError_t prefixScan(int *a, unsigned const int nsize); __global__ void prefscan(int *a, const int width) { int thidx = threadIdx.x; if ( !(thidx < (width>>1)) ) { a[thidx] = a[thidx - (width >> 1)] + a[thidx]; } } int main() { int a[] = { 11, 21, 13, 24, 8, -3, 15, 31 }; const unsigned int arraySize = sizeof(a)/sizeof(int); printf("{ "); for (int i = 0; i < arraySize; i++) { printf("%3d, ", a[i]); } printf("} = \n"); // Add vectors in parallel. hipError_t cudaStatus = prefixScan(a, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "prefscan failed!"); return 1; } printf("{ "); for (int i = 0; i < arraySize; i++) { printf("%3d, ", a[i]); } printf("}\n"); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } hipError_t prefixScan(int *a, unsigned const int nsize) { int *dev_a = 0; unsigned int arraySize = ceil2pow32(nsize); hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for vectors a and b. cudaStatus = hipMalloc((void**)&dev_a, arraySize * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, nsize * sizeof(int), hipMemcpyHostToDevice); if ( nsize < arraySize) cudaStatus = hipMemset(dev_a + nsize, 0, arraySize - nsize); if (cudaStatus != hipSuccess) { fprintf(stderr, "cudaMemcpy/Memset failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. for(int width = 2; width <= arraySize; width <<= 1) prefscan<<<1, arraySize>>>(dev_a, width); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "prefixScanKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(a, dev_a, nsize * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_a); return cudaStatus; }
Convert the following CUDA code to AMD GPU code: cuda #include <thrust/scan.h> #include <thrust/device_vector.h> #include <thrust/count.h> #include <thrust/functional.h> #include <iostream> #include <iomanip> struct not_zero{ __host__ __device__ bool operator()(const float a) { return a != 0; } }; int main() { thrust::device_vector<int> V(10, 0); V[0] = 1; V[3] = 5; V[5] = 10; V[7] = 15; V[9] = 25; int result = thrust::count(V.begin(), V.end(), 0); std::cout << "Zero Occured " << result << " Times." << std::endl; result = thrust::count_if(V.begin(), V.end(), not_zero()); std::cout << "There were " << result << " non-zero values." << std::endl; thrust::device_vector<int> E(10); thrust::device_vector<int> I(10); thrust::inclusive_scan(V.begin(), V.end(), I.begin()); thrust::exclusive_scan(V.begin(), V.end(), E.begin()); std::cout << "V I E" << std::endl; for (int i = 0; i < V.size(); i++) { std::cout << V[i] << " , " << I[i] << " , " << E[i] << std::endl; } }
#include <hip/hip_runtime.h> #include <thrust/scan.h> #include <thrust/device_vector.h> #include <thrust/count.h> #include <thrust/functional.h> #include <iostream> #include <iomanip> struct not_zero{ __host__ __device__ bool operator()(const float a) { return a != 0; } }; int main() { thrust::device_vector<int> V(10, 0); V[0] = 1; V[3] = 5; V[5] = 10; V[7] = 15; V[9] = 25; int result = thrust::count(V.begin(), V.end(), 0); std::cout << "Zero Occured " << result << " Times." << std::endl; result = thrust::count_if(V.begin(), V.end(), not_zero()); std::cout << "There were " << result << " non-zero values." << std::endl; thrust::device_vector<int> E(10); thrust::device_vector<int> I(10); thrust::inclusive_scan(V.begin(), V.end(), I.begin()); thrust::exclusive_scan(V.begin(), V.end(), E.begin()); std::cout << "V I E" << std::endl; for (int i = 0; i < V.size(); i++) { std::cout << V[i] << " , " << I[i] << " , " << E[i] << std::endl; } }
Convert the following CUDA code to AMD GPU code: cuda /* Two Dimensional (2D) Image Convolution : A Basic Approch Image Convolution is a very basic operation in the field of Image Processing. It is required in many algorithms in Image Processing. Also it is very compute intensive task as it involves operation with pixels. Its a transformation which involves a Mask and an Image on which that mask will be performing the operation. General steps of Convolution: The center point of Mask is first placed on to the Image Pixel. Each pixel of Mask is multiplied to corresponding pixel of Image. A complete sum (Cumulative sum) of all multiplications performed between Mask and Image pixels are then put in the related Image pixel value as a result of Convolution. Following is the sample code of Image convolution. The Mask_width, Mask_height are set to 3 as its a 3 X 3 2D array with all values set to 1. Also width and height are also set to 3 as I considered image of size 3 X 3 only for the sake of the example. You may change the values as per your need. */ #include<stdio.h> #include<stdlib.h> #define Mask_width 3 #define Mask_height 3 #define width 3 #define height 3 float convolution_2D_OnHost(float * N,float * M,int i,int j); int main() { float * input; float * Mask; float * output; input=(float *)malloc(sizeof(float)*width*height); Mask=(float *)malloc(sizeof(float)*Mask_width*Mask_height); output=(float *)malloc(sizeof(float)*width*height); for(int i=0;i<width*height;i++) { input[i]=1.0; } for(int i=0;i<Mask_width*Mask_height;i++) { Mask[i]=1.0; } printf("\nInput Array:\n"); for(int i=0;i<width*height;i++) { if(i>0 && (i%width==0)) printf("\n"); printf("%0.2f ",input[i]); }printf("\n"); printf("\nMask:\n"); for(int i=0;i<Mask_width*Mask_height;i++) { if(i>0 && (i%Mask_width==0)) printf("\n"); printf("%0.2f ",Mask[i]); }printf("\n"); for(int i=0;i<width;i++) { for(int j=0;j<height;j++) { output[(i*width)+j]=convolution_2D_OnHost(input,Mask,i,j); } } printf("\nOutput:\n"); for(int i=0;i<width*height;i++) { if(i>0 && (i%width==0)) printf("\n"); printf("%d = %0.2f \t",i,*(output+i)); } free(input); free(Mask); free(output); return 0; } float convolution_2D_OnHost(float * N,float * M,int i,int j) { float Pvalue=0.0; int N_start_point_i=i-(Mask_width/2); int N_start_point_j=j-(Mask_height/2); for(int k=0;k<Mask_width;k++) { for(int l=0;l<Mask_height;l++) { if(((N_start_point_i+k)>=0) && ((N_start_point_i+k)<width)&&((N_start_point_j+l)>=0)&&((N_start_point_j+l)<height)) { Pvalue+=N[(N_start_point_i+k)*width+(N_start_point_j+l)]*M[(k*Mask_width)+l]; } } } return(Pvalue); }
/* Two Dimensional (2D) Image Convolution : A Basic Approch Image Convolution is a very basic operation in the field of Image Processing. It is required in many algorithms in Image Processing. Also it is very compute intensive task as it involves operation with pixels. Its a transformation which involves a Mask and an Image on which that mask will be performing the operation. General steps of Convolution: The center point of Mask is first placed on to the Image Pixel. Each pixel of Mask is multiplied to corresponding pixel of Image. A complete sum (Cumulative sum) of all multiplications performed between Mask and Image pixels are then put in the related Image pixel value as a result of Convolution. Following is the sample code of Image convolution. The Mask_width, Mask_height are set to 3 as its a 3 X 3 2D array with all values set to 1. Also width and height are also set to 3 as I considered image of size 3 X 3 only for the sake of the example. You may change the values as per your need. */ #include <hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> #define Mask_width 3 #define Mask_height 3 #define width 3 #define height 3 float convolution_2D_OnHost(float * N,float * M,int i,int j); int main() { float * input; float * Mask; float * output; input=(float *)malloc(sizeof(float)*width*height); Mask=(float *)malloc(sizeof(float)*Mask_width*Mask_height); output=(float *)malloc(sizeof(float)*width*height); for(int i=0;i<width*height;i++) { input[i]=1.0; } for(int i=0;i<Mask_width*Mask_height;i++) { Mask[i]=1.0; } printf("\nInput Array:\n"); for(int i=0;i<width*height;i++) { if(i>0 && (i%width==0)) printf("\n"); printf("%0.2f ",input[i]); }printf("\n"); printf("\nMask:\n"); for(int i=0;i<Mask_width*Mask_height;i++) { if(i>0 && (i%Mask_width==0)) printf("\n"); printf("%0.2f ",Mask[i]); }printf("\n"); for(int i=0;i<width;i++) { for(int j=0;j<height;j++) { output[(i*width)+j]=convolution_2D_OnHost(input,Mask,i,j); } } printf("\nOutput:\n"); for(int i=0;i<width*height;i++) { if(i>0 && (i%width==0)) printf("\n"); printf("%d = %0.2f \t",i,*(output+i)); } free(input); free(Mask); free(output); return 0; } float convolution_2D_OnHost(float * N,float * M,int i,int j) { float Pvalue=0.0; int N_start_point_i=i-(Mask_width/2); int N_start_point_j=j-(Mask_height/2); for(int k=0;k<Mask_width;k++) { for(int l=0;l<Mask_height;l++) { if(((N_start_point_i+k)>=0) && ((N_start_point_i+k)<width)&&((N_start_point_j+l)>=0)&&((N_start_point_j+l)<height)) { Pvalue+=N[(N_start_point_i+k)*width+(N_start_point_j+l)]*M[(k*Mask_width)+l]; } } } return(Pvalue); }
Convert the following CUDA code to AMD GPU code: cuda #include<stdio.h> #include<stdlib.h> __global__ void matadd(int *d_a,int *d_b,int *d_c, int n){ int idx=threadIdx.x; if(idx<n) d_c[idx]=d_a[idx]+d_b[idx]; } int main(){ int n; scanf("%d",&n); cudaEvent_t start,stop; float escap_time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); int *h_a,*h_b,*h_c; cudaHostAlloc((void**)&h_a,20*n*sizeof(int),cudaHostAllocDefault); cudaHostAlloc((void**)&h_b,20*n*sizeof(int),cudaHostAllocDefault); cudaHostAlloc((void**)&h_c,20*n*sizeof(int),cudaHostAllocDefault); return 0; }
#include <hip/hip_runtime.h> #include<stdio.h> #include<stdlib.h> __global__ void matadd(int *d_a,int *d_b,int *d_c, int n){ int idx=threadIdx.x; if(idx<n) d_c[idx]=d_a[idx]+d_b[idx]; } int main(){ int n; scanf("%d",&n); hipEvent_t start,stop; float escap_time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); int *h_a,*h_b,*h_c; hipHostAlloc((void**)&h_a,20*n*sizeof(int),hipHostMallocDefault); hipHostAlloc((void**)&h_b,20*n*sizeof(int),hipHostMallocDefault); hipHostAlloc((void**)&h_c,20*n*sizeof(int),hipHostMallocDefault); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include "cuda_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> #include<iostream> #include<stdlib.h> #include<getopt.h> #include <assert.h> #include <cuda.h> #include <chrono> #define RAND_RANGE_MIN -10.0 #define RAND_RANGE_MAX 10.0 #define SEED 123 #define JACOBI_DEBUG 0 enum ERROR_TYPE { MEMCPY, CMALLOC, ADDK, DEVSYNC }; void init_const(float* v, float x, int dim) { for (int i = 0; i < dim; i++) v[i] = x; return; } void init_matrix(float **A, int matrix_order) { for (int i = 0; i < matrix_order; i++) { A[i] = new float[matrix_order]; if (A[i] == NULL) { std::cerr << "Error while allocating resources." << std::endl; exit(-1); } } } /** Generates a random number in a specific range. @param fMin The lower bound of the range. @param fMax The upper bound of the range. @return The generated number. */ float generate_random_number(float fMin, float fMax) { float f = (float)rand() / RAND_MAX; return fMin + f * (fMax - fMin); } /** Generates a random square matrix. @param A The matrix. */ void generate_random_matrix(float **A, int matrix_order) { for (int i = 0; i < matrix_order; i++) { float sum = 0.0; for (int j = 0; j < matrix_order; j++) if (j != i) { float val = generate_random_number(RAND_RANGE_MIN, RAND_RANGE_MAX); sum += abs(val); A[i][j] = val; } /* Change back A[i][i] to be > then sum(A[i][j]) */ A[i][i] = sum + generate_random_number(1.0, RAND_RANGE_MAX); } } /** Generates a random vector. @param v . */ void generate_random_vector(float *v, int matrix_order) { /* generate vector v */ for (int j = 0; j < matrix_order; j++) { float val = generate_random_number(RAND_RANGE_MIN, RAND_RANGE_MAX); v[j] = val; } } /** Generate a random number in a specific range. @param A The square matrix. @param v The vector. @param start . @param end . */ void matrix_vector_multiplication(float *x, float **A, float *v, int matrix_order) { for (int i = 0; i < matrix_order; i++) { x[i] = 0; for (int j = 0; j < matrix_order; j++) x[i] += A[i][j] * v[j]; } return; } void error_on_computation(float* x, float ** A, float *b, int matrix_order, float *err) { float error = 0.0, sum = 0.0; for (size_t i = 0; i < matrix_order; i++) { sum = 0.0; for (size_t j = 0; j < matrix_order; j++) { sum = sum + A[i][j] * x[j]; } error = error + abs(sum - b[i]); } *err = error / matrix_order; return; } std::chrono::duration<double> delta_time(std::chrono::time_point<std::chrono::system_clock> start, std::chrono::time_point<std::chrono::system_clock> end) { return end - start; } cudaError_t error_check(cudaError_t cudaStatus, ERROR_TYPE msgtype, float*dev_a, float*dev_x_solution, float*dev_b, float*dev_prec_values){ if (cudaStatus != cudaSuccess) { switch(msgtype) { case (CMALLOC):{ std::cerr << "cudaMalloc failed!" << std::endl; } case (MEMCPY):{ std::cerr << "cudaMemcpy failed!" << std::endl; } case (ADDK):{ std::cerr << "addKernel launch failed:" << cudaGetErrorString(cudaStatus) << std::endl; } case(DEVSYNC):{ std::cerr << "cudaDeviceSynchronize returned error code " << cudaStatus << " after launching jacobi!" << std::endl; } } cudaFree(dev_a); cudaFree(dev_x_solution); cudaFree(dev_prec_values); cudaFree(dev_b); return cudaStatus; } } __global__ void iteration(float * a, float * x_solution, float * b, float * prec_values, unsigned int matrix_order) { unsigned int j, i; float sigma = 0.0, newValue = 0.0; int bx = blockIdx.x, tx = threadIdx.x; i = tx + bx*blockDim.x; if (i >= matrix_order) return; if (i < matrix_order){ sigma = b[i]; int idx_Ai = i*matrix_order; for (j = 0; j < matrix_order; j++) { if (i != j) { sigma = sigma - a[idx_Ai + j] * x_solution[j]; } } newValue = sigma / a[idx_Ai + i]; prec_values[i] = (x_solution[i] - newValue)*(x_solution[i] - newValue); x_solution[i] = newValue; __syncthreads(); } } cudaError_t cuda_jacobi_solve(float * a, float * x_solution, float * b, float eps, unsigned int matrix_order, int * max_iter, float *prec) { unsigned int i, j; int k = 0, nTiles; float *dev_a = 0, *dev_x_solution = 0, *dev_b = 0, *dev_prec_values = 0; float accur = 1.0, sum = 0.0; float *prec_values = new float[matrix_order]; init_const(prec_values, 0.0, matrix_order); size_t matrix_size = matrix_order*matrix_order*sizeof(float); size_t vector_size = matrix_order*sizeof(float); cudaError_t cudaStatus; cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); cudaStatus = cudaMalloc((void**)&dev_a, matrix_size); error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = cudaMalloc((void**)&dev_x_solution, vector_size); error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = cudaMalloc((void**)&dev_b, vector_size); error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = cudaMalloc((void**)&dev_prec_values, vector_size); error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = cudaMemcpy(dev_a, a, matrix_size, cudaMemcpyHostToDevice); error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = cudaMemcpy(dev_x_solution, x_solution, vector_size, cudaMemcpyHostToDevice); error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = cudaMemcpy(dev_b, b, vector_size, cudaMemcpyHostToDevice); error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = cudaMemcpy(dev_prec_values, prec_values, vector_size, cudaMemcpyHostToDevice); error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values); int tileSize = 16; nTiles = matrix_order/tileSize + (matrix_order%tileSize == 0?0:1); for (i = 0; i < *max_iter; i++) { iteration <<<nTiles,tileSize>>> (dev_a, dev_x_solution, dev_b, dev_prec_values, matrix_order); k++; // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); error_check(cudaStatus, ADDK, dev_a, dev_x_solution, dev_b, dev_prec_values); // cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); error_check(cudaStatus, DEVSYNC, dev_a, dev_x_solution, dev_b, dev_prec_values); // Retreive the dev_prec_values vector with all the precision values cudaStatus = cudaMemcpy(prec_values, dev_prec_values, vector_size, cudaMemcpyDeviceToHost); error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values); // Computes the precision sum = 0.0; for (j = 0; j < matrix_order; j++) { sum = sum + fabs(prec_values[j]); } accur = sqrt(sum); if (accur <= eps) break; } *max_iter = k; *prec = accur; cudaStatus = cudaMemcpy(x_solution, dev_x_solution, vector_size, cudaMemcpyDeviceToHost); error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaFree(dev_a); cudaFree(dev_x_solution); cudaFree(dev_prec_values); cudaFree(dev_b); } int main(int argc, char *argv[]){ const int matrix_order = atoi(argv[1]); // order of the matrix int max_iter = atoi(argv[2]); // number of max_iterations const float epsilon = atof(argv[3]); // precision int iterations = max_iter; std::chrono::time_point<std::chrono::system_clock> start_time, end_time; float *x_solution_h, *b_h, **A_h, *rand_x_h; float *extended_a = 0; // Allocate memory for CPU. A_h = new float *[matrix_order]; b_h = new float[matrix_order]; x_solution_h = new float[matrix_order]; rand_x_h = new float[matrix_order]; if (A_h == NULL || b_h == NULL || rand_x_h == NULL || x_solution_h == NULL) { std::cerr << "Error while allocating resources." << std::endl; exit(-1); } init_matrix(A_h, matrix_order); srand(SEED); generate_random_matrix(A_h, matrix_order); extended_a = (float*)malloc(matrix_order*matrix_order*sizeof(float)); for (int i = 0; i < matrix_order; i++) { for (int j = 0; j < matrix_order; j++) { extended_a[i*matrix_order + j] = A_h[i][j]; } } generate_random_vector(rand_x_h, matrix_order); int repetitions = 20; float precision = 1.0, err = 0.0; matrix_vector_multiplication(b_h, A_h, rand_x_h, matrix_order); for (int m=0; m<repetitions; m++) { init_const(x_solution_h, 0.0, matrix_order); iterations = max_iter; err=0.0; start_time = std::chrono::system_clock::now(); cuda_jacobi_solve(extended_a, x_solution_h, b_h, epsilon, matrix_order, &iterations, &precision); end_time = std::chrono::system_clock::now(); std::cout << delta_time(start_time, end_time).count() << "\t" ; error_on_computation(x_solution_h, A_h, b_h, matrix_order, &err); } std::cout << "\t" << iterations << "\t" << precision << "\t" << err << std::endl; // Release resources for (int i = 0; i < matrix_order; i++) delete[] A_h[i]; delete[] A_h; delete[] b_h; delete[] rand_x_h; delete[] x_solution_h; free(extended_a); return 0; }
#include "hip/hip_runtime.h" #include<stdio.h> #include<iostream> #include<stdlib.h> #include<getopt.h> #include <assert.h> #include <chrono> #define RAND_RANGE_MIN -10.0 #define RAND_RANGE_MAX 10.0 #define SEED 123 #define JACOBI_DEBUG 0 enum ERROR_TYPE { MEMCPY, CMALLOC, ADDK, DEVSYNC }; void init_const(float* v, float x, int dim) { for (int i = 0; i < dim; i++) v[i] = x; return; } void init_matrix(float **A, int matrix_order) { for (int i = 0; i < matrix_order; i++) { A[i] = new float[matrix_order]; if (A[i] == NULL) { std::cerr << "Error while allocating resources." << std::endl; exit(-1); } } } /** Generates a random number in a specific range. @param fMin The lower bound of the range. @param fMax The upper bound of the range. @return The generated number. */ float generate_random_number(float fMin, float fMax) { float f = (float)rand() / RAND_MAX; return fMin + f * (fMax - fMin); } /** Generates a random square matrix. @param A The matrix. */ void generate_random_matrix(float **A, int matrix_order) { for (int i = 0; i < matrix_order; i++) { float sum = 0.0; for (int j = 0; j < matrix_order; j++) if (j != i) { float val = generate_random_number(RAND_RANGE_MIN, RAND_RANGE_MAX); sum += abs(val); A[i][j] = val; } /* Change back A[i][i] to be > then sum(A[i][j]) */ A[i][i] = sum + generate_random_number(1.0, RAND_RANGE_MAX); } } /** Generates a random vector. @param v . */ void generate_random_vector(float *v, int matrix_order) { /* generate vector v */ for (int j = 0; j < matrix_order; j++) { float val = generate_random_number(RAND_RANGE_MIN, RAND_RANGE_MAX); v[j] = val; } } /** Generate a random number in a specific range. @param A The square matrix. @param v The vector. @param start . @param end . */ void matrix_vector_multiplication(float *x, float **A, float *v, int matrix_order) { for (int i = 0; i < matrix_order; i++) { x[i] = 0; for (int j = 0; j < matrix_order; j++) x[i] += A[i][j] * v[j]; } return; } void error_on_computation(float* x, float ** A, float *b, int matrix_order, float *err) { float error = 0.0, sum = 0.0; for (size_t i = 0; i < matrix_order; i++) { sum = 0.0; for (size_t j = 0; j < matrix_order; j++) { sum = sum + A[i][j] * x[j]; } error = error + abs(sum - b[i]); } *err = error / matrix_order; return; } std::chrono::duration<double> delta_time(std::chrono::time_point<std::chrono::system_clock> start, std::chrono::time_point<std::chrono::system_clock> end) { return end - start; } hipError_t error_check(hipError_t cudaStatus, ERROR_TYPE msgtype, float*dev_a, float*dev_x_solution, float*dev_b, float*dev_prec_values){ if (cudaStatus != hipSuccess) { switch(msgtype) { case (CMALLOC):{ std::cerr << "hipMalloc failed!" << std::endl; } case (MEMCPY):{ std::cerr << "hipMemcpy failed!" << std::endl; } case (ADDK):{ std::cerr << "addKernel launch failed:" << hipGetErrorString(cudaStatus) << std::endl; } case(DEVSYNC):{ std::cerr << "hipDeviceSynchronize returned error code " << cudaStatus << " after launching jacobi!" << std::endl; } } hipFree(dev_a); hipFree(dev_x_solution); hipFree(dev_prec_values); hipFree(dev_b); return cudaStatus; } } __global__ void iteration(float * a, float * x_solution, float * b, float * prec_values, unsigned int matrix_order) { unsigned int j, i; float sigma = 0.0, newValue = 0.0; int bx = blockIdx.x, tx = threadIdx.x; i = tx + bx*blockDim.x; if (i >= matrix_order) return; if (i < matrix_order){ sigma = b[i]; int idx_Ai = i*matrix_order; for (j = 0; j < matrix_order; j++) { if (i != j) { sigma = sigma - a[idx_Ai + j] * x_solution[j]; } } newValue = sigma / a[idx_Ai + i]; prec_values[i] = (x_solution[i] - newValue)*(x_solution[i] - newValue); x_solution[i] = newValue; __syncthreads(); } } hipError_t cuda_jacobi_solve(float * a, float * x_solution, float * b, float eps, unsigned int matrix_order, int * max_iter, float *prec) { unsigned int i, j; int k = 0, nTiles; float *dev_a = 0, *dev_x_solution = 0, *dev_b = 0, *dev_prec_values = 0; float accur = 1.0, sum = 0.0; float *prec_values = new float[matrix_order]; init_const(prec_values, 0.0, matrix_order); size_t matrix_size = matrix_order*matrix_order*sizeof(float); size_t vector_size = matrix_order*sizeof(float); hipError_t cudaStatus; hipDeviceSetCacheConfig(hipFuncCachePreferShared); cudaStatus = hipMalloc((void**)&dev_a, matrix_size); error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = hipMalloc((void**)&dev_x_solution, vector_size); error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = hipMalloc((void**)&dev_b, vector_size); error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = hipMalloc((void**)&dev_prec_values, vector_size); error_check(cudaStatus, CMALLOC, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = hipMemcpy(dev_a, a, matrix_size, hipMemcpyHostToDevice); error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = hipMemcpy(dev_x_solution, x_solution, vector_size, hipMemcpyHostToDevice); error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = hipMemcpy(dev_b, b, vector_size, hipMemcpyHostToDevice); error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values); cudaStatus = hipMemcpy(dev_prec_values, prec_values, vector_size, hipMemcpyHostToDevice); error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values); int tileSize = 16; nTiles = matrix_order/tileSize + (matrix_order%tileSize == 0?0:1); for (i = 0; i < *max_iter; i++) { iteration <<<nTiles,tileSize>>> (dev_a, dev_x_solution, dev_b, dev_prec_values, matrix_order); k++; // Check for any errors launching the kernel cudaStatus = hipGetLastError(); error_check(cudaStatus, ADDK, dev_a, dev_x_solution, dev_b, dev_prec_values); // cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); error_check(cudaStatus, DEVSYNC, dev_a, dev_x_solution, dev_b, dev_prec_values); // Retreive the dev_prec_values vector with all the precision values cudaStatus = hipMemcpy(prec_values, dev_prec_values, vector_size, hipMemcpyDeviceToHost); error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values); // Computes the precision sum = 0.0; for (j = 0; j < matrix_order; j++) { sum = sum + fabs(prec_values[j]); } accur = sqrt(sum); if (accur <= eps) break; } *max_iter = k; *prec = accur; cudaStatus = hipMemcpy(x_solution, dev_x_solution, vector_size, hipMemcpyDeviceToHost); error_check(cudaStatus, MEMCPY, dev_a, dev_x_solution, dev_b, dev_prec_values); hipFree(dev_a); hipFree(dev_x_solution); hipFree(dev_prec_values); hipFree(dev_b); } int main(int argc, char *argv[]){ const int matrix_order = atoi(argv[1]); // order of the matrix int max_iter = atoi(argv[2]); // number of max_iterations const float epsilon = atof(argv[3]); // precision int iterations = max_iter; std::chrono::time_point<std::chrono::system_clock> start_time, end_time; float *x_solution_h, *b_h, **A_h, *rand_x_h; float *extended_a = 0; // Allocate memory for CPU. A_h = new float *[matrix_order]; b_h = new float[matrix_order]; x_solution_h = new float[matrix_order]; rand_x_h = new float[matrix_order]; if (A_h == NULL || b_h == NULL || rand_x_h == NULL || x_solution_h == NULL) { std::cerr << "Error while allocating resources." << std::endl; exit(-1); } init_matrix(A_h, matrix_order); srand(SEED); generate_random_matrix(A_h, matrix_order); extended_a = (float*)malloc(matrix_order*matrix_order*sizeof(float)); for (int i = 0; i < matrix_order; i++) { for (int j = 0; j < matrix_order; j++) { extended_a[i*matrix_order + j] = A_h[i][j]; } } generate_random_vector(rand_x_h, matrix_order); int repetitions = 20; float precision = 1.0, err = 0.0; matrix_vector_multiplication(b_h, A_h, rand_x_h, matrix_order); for (int m=0; m<repetitions; m++) { init_const(x_solution_h, 0.0, matrix_order); iterations = max_iter; err=0.0; start_time = std::chrono::system_clock::now(); cuda_jacobi_solve(extended_a, x_solution_h, b_h, epsilon, matrix_order, &iterations, &precision); end_time = std::chrono::system_clock::now(); std::cout << delta_time(start_time, end_time).count() << "\t" ; error_on_computation(x_solution_h, A_h, b_h, matrix_order, &err); } std::cout << "\t" << iterations << "\t" << precision << "\t" << err << std::endl; // Release resources for (int i = 0; i < matrix_order; i++) delete[] A_h[i]; delete[] A_h; delete[] b_h; delete[] rand_x_h; delete[] x_solution_h; free(extended_a); return 0; }
Convert the following CUDA code to AMD GPU code: cuda #include<iostream> #include<cuda.h> #include<cuda_runtime.h> #define N 10 using namespace std; __global__ void mul(int* a_d, int n){ // printf("%d %d %d\n", blockIdx.x,blockDim.x,threadIdx.x); int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < n){ a_d[index] *= 5; } } int main(){ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int *a, *a_d; int size = N * sizeof(int); a = (int*) malloc(size); cout << "Enter " << N << " numbers: "; for(int i=0; i<N; i++){ cin>>a[i]; } cudaMalloc(&a_d, size); cudaMemcpy(a_d, a, size, cudaMemcpyHostToDevice); cudaEventRecord(start); mul<<<1,10>>>(a_d,N); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaMemcpy(a, a_d, size, cudaMemcpyDeviceToHost); cout<<"Matrix After Multiplying:\n"; for(int i=0; i<N; i++){ cout<<a[i]<<" "; } float millis = 0; cudaEventElapsedTime(&millis, start, stop); cout << "\nElasped Time: " << millis << endl; return 0; }
#include<iostream> #include<hip/hip_runtime.h> #define N 10 using namespace std; __global__ void mul(int* a_d, int n){ // printf("%d %d %d\n", blockIdx.x,blockDim.x,threadIdx.x); int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < n){ a_d[index] *= 5; } } int main(){ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); int *a, *a_d; int size = N * sizeof(int); a = (int*) malloc(size); cout << "Enter " << N << " numbers: "; for(int i=0; i<N; i++){ cin>>a[i]; } hipMalloc(&a_d, size); hipMemcpy(a_d, a, size, hipMemcpyHostToDevice); hipEventRecord(start); mul<<<1,10>>>(a_d,N); hipDeviceSynchronize(); hipEventRecord(stop); hipMemcpy(a, a_d, size, hipMemcpyDeviceToHost); cout<<"Matrix After Multiplying:\n"; for(int i=0; i<N; i++){ cout<<a[i]<<" "; } float millis = 0; hipEventElapsedTime(&millis, start, stop); cout << "\nElasped Time: " << millis << endl; return 0; }
Convert the following CUDA code to AMD GPU code: cuda /* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ static __global__ void example_cuda_kernel(int& r, int x, int y) { r = x * y + (x * 4 - (y / 2)); } int static_launch_kernelB(int x, int y) { int r; example_cuda_kernel<<<1, 1>>>(r, x, y); return r; }
#include <hip/hip_runtime.h> /* * Copyright (c) 2021-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ static __global__ void example_cuda_kernel(int& r, int x, int y) { r = x * y + (x * 4 - (y / 2)); } int static_launch_kernelB(int x, int y) { int r; example_cuda_kernel<<<1, 1>>>(r, x, y); return r; }
Convert the following CUDA code to AMD GPU code: cuda #include <algorithm> #include <iostream> #include <vector> #include <time.h> typedef unsigned long long data_t; static inline void check(cudaError_t err, const char* context) { if (err != cudaSuccess) { std::cerr << "CUDA error: " << context << ": " << cudaGetErrorString(err) << std::endl; std::exit(EXIT_FAILURE); } } #define CHECK(x) check(x, #x) const int MAX_CONSTANT = 16*1024; __constant__ unsigned int const_sum[MAX_CONSTANT]; template <class T> void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) { CHECK(cudaMemcpy(target, source, num * sizeof(T), direction)); } static inline int divup(int a, int b) { return (a + b - 1)/b; } // get the 0 bit of each number by bit_shift // example: number : 10001, bit_shit: 1, One: 1, // // it means check if the second bit is 1 or not. __global__ void getMask(data_t *d_in, unsigned int *d_out, const int len, const unsigned int n, data_t bit_shift, unsigned int One) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; data_t bit = 0; data_t one=1; data_t shift=one<<bit_shift; unsigned int start=index*len; if (start>=n) return; unsigned int end=start+len; for(unsigned int i=start;i<end && i<n; i++ ){ bit=d_in[i]&shift; bit = (bit > 0) ? 1 : 0; d_out[i] = (One ? bit : 1 - bit); } } __global__ void getIndex(unsigned int *d_index, unsigned int *d_sum, unsigned int* d_mask, const int len, const unsigned int n, unsigned int total_pre) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; unsigned int start=index*len; if (start>=n) return; unsigned int end=start+len; for (unsigned int i=start; i<end && i<n; i++){ d_index[i]=d_mask[i]?d_sum[i]:i-d_sum[i]+total_pre; if(d_index[i]>=n){ printf(" d_sum[i] : %d, total_pre : %d, d_mask[i] : %d \n", d_sum[i], total_pre, d_mask[i]); } // if(d_mask[i]==1){ // d_index[i]=total_pre+d_sum[i]; // } } } __global__ void scatter(data_t *d_in, unsigned int *d_index, data_t *d_out, const int len, const unsigned int n) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; unsigned int start=index*len; if (start>=n) return; unsigned int end=start+len; for(unsigned int i=start;i<end && i<n; i++ ){ d_out[d_index[i]]=d_in[i]; } } // // pay attention that blockDim.x must be power of 2 // __global__ void blellochScan(unsigned int *out, unsigned int *in, // unsigned int *sum, unsigned int inputSize) { // __shared__ unsigned int temp[2 * 256]; // unsigned int start = blockIdx.x * blockDim.x << 1; // unsigned int tx = threadIdx.x; // unsigned int index = 0; // temp[tx] = (start + tx < inputSize)? in[start+tx]:0; // temp[tx+blockDim.x] = (start + tx + blockDim.x < inputSize)? in[start + tx + blockDim.x] : 0; // // Blelloch Scan // __syncthreads(); // // reduction step // unsigned int stride = 1; // while (stride <= blockDim.x) { // index = (tx + 1) * (stride << 1) - 1; // if (index < (blockDim.x << 1)) { // temp[index] += temp[index - stride]; // } // stride <<= 1; // __syncthreads(); // } // // first store the reduction sum in sum array // // make it zero since it is exclusive scan // if (tx == 0) { // // sum array contains the prefix sum of each // // 2*blockDim blocks of element. // if (sum != NULL) { // sum[blockIdx.x] = temp[(blockDim.x << 1) - 1]; // } // temp[(blockDim.x << 1) - 1] = 0; // } // // wait for thread zero to write // __syncthreads(); // // post scan step // stride = blockDim.x; // index = 0; // unsigned int var = 0; // while (stride > 0) { // index = ((stride << 1) * (tx + 1)) - 1; // if (index < (blockDim.x << 1)) { // var = temp[index]; // temp[index] += temp[index - stride]; // temp[index - stride] = var; // } // stride >>= 1; // __syncthreads(); // } // // now write the temp array to output // if (start + tx < inputSize) { // out[start + tx] = temp[tx]; // } // if (start + tx + blockDim.x < inputSize) { // out[start + tx + blockDim.x] = temp[tx + blockDim.x]; // } // } // /* // sum out the blocks' accumulated sums to each element // */ // __global__ void mergeScanBlocks(unsigned int *sum, unsigned int *output, // unsigned int opSize) { // unsigned int index = (blockDim.x * blockIdx.x << 1) + threadIdx.x; // if (index < opSize) { // // output[index] += sum[blockIdx.x]; // output[index] += (opSize > MAX_CONSTANT)? sum[blockIdx.x]:const_sum[blockIdx.x]; // // output[index] += tex1Dfetch(tex_sum, blockIdx.x); // } // if (index + blockDim.x < opSize) { // // output[index + blockDim.x] += sum[blockIdx.x]; // output[index + blockDim.x] += (opSize > MAX_CONSTANT)? sum[blockIdx.x]:const_sum[blockIdx.x]; // // output[index + blockDim.x] += tex1Dfetch(tex_sum, blockIdx.x); // } // } // /* // api for exclusiveScan // */ // void exclusiveScan(unsigned int *out, unsigned int *in, unsigned int in_size, unsigned int block_size) { // unsigned int numBlocks1 = in_size / block_size; // if (in_size % block_size) numBlocks1++; // unsigned int numBlocks2 = numBlocks1 / 2; // if (numBlocks1 % 2) numBlocks2++; // dim3 dimThreadBlock; // dimThreadBlock.x = block_size; // dimThreadBlock.y = 1; // dimThreadBlock.z = 1; // dim3 dimGrid; // dimGrid.x = numBlocks2; // dimGrid.y = 1; // dimGrid.z = 1; // unsigned int *d_sumArr = NULL; // if (in_size > (2 * block_size)) { // // we need the sum auxilarry array only if nuFmblocks2 > 1 // CHECK(cudaMalloc((void **)&d_sumArr, numBlocks2 * sizeof(unsigned int))); // } // blellochScan<<<dimGrid, dimThreadBlock>>>(out, in, d_sumArr, in_size); // if (in_size <= (2 * block_size)) { // // out has proper exclusive scan. just return // CHECK(cudaDeviceSynchronize()); // return; // } else { // // now we need to perform exclusive scan on the auxilliary sum array // unsigned int *d_sumArr_scan; // CHECK(cudaMalloc((void **)&d_sumArr_scan, numBlocks2 * sizeof(unsigned int))); // exclusiveScan(d_sumArr_scan, d_sumArr, numBlocks2, block_size); // // d_sumArr_scan now contains the exclusive scan op of individual blocks // // now just do a one-one addition of blocks // // cudaBindTexture(0, tex_sum, d_sumArr_scan, numBlocks2 * sizeof(unsigned int)); // if(numBlocks2 <= MAX_CONSTANT) { // CHECK(cudaMemcpyToSymbol(const_sum, d_sumArr_scan, numBlocks2 * sizeof(unsigned int), 0, cudaMemcpyDeviceToDevice)); // } // mergeScanBlocks<<<dimGrid, dimThreadBlock>>>(d_sumArr_scan, out, in_size); // // cudaUnbindTexture(tex_sum); // cudaFree(d_sumArr); // cudaFree(d_sumArr_scan); // } // } #define MAX_BLOCK_SZ 128 #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 //#define ZERO_BANK_CONFLICTS #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(n) \ ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS) #endif __global__ void gpu_add_block_sums(unsigned int* const d_out, const unsigned int* const d_in, unsigned int* const d_block_sums, const size_t numElems) { unsigned int d_block_sum_val = d_block_sums[blockIdx.x]; // Simple implementation's performance is not significantly (if at all) // better than previous verbose implementation unsigned int cpy_idx = 2 * blockIdx.x * blockDim.x + threadIdx.x; if (cpy_idx < numElems) { d_out[cpy_idx] = d_in[cpy_idx] + d_block_sum_val; if (cpy_idx + blockDim.x < numElems) d_out[cpy_idx + blockDim.x] = d_in[cpy_idx + blockDim.x] + d_block_sum_val; } } __global__ void gpu_prescan(unsigned int* const d_out, const unsigned int* const d_in, unsigned int* const d_block_sums, const unsigned int len, const unsigned int shmem_sz, const unsigned int max_elems_per_block) { // Allocated on invocation extern __shared__ unsigned int s_out[]; int thid = threadIdx.x; int ai = thid; int bi = thid + blockDim.x; // Zero out the shared memory // Helpful especially when input size is not power of two s_out[thid] = 0; s_out[thid + blockDim.x] = 0; // If CONFLICT_FREE_OFFSET is used, shared memory size // must be a 2 * blockDim.x + blockDim.x/num_banks s_out[thid + blockDim.x + (blockDim.x >> LOG_NUM_BANKS)] = 0; __syncthreads(); // Copy d_in to shared memory // Note that d_in's elements are scattered into shared memory // in light of avoiding bank conflicts unsigned int cpy_idx = max_elems_per_block * blockIdx.x + threadIdx.x; if (cpy_idx < len) { s_out[ai + CONFLICT_FREE_OFFSET(ai)] = d_in[cpy_idx]; if (cpy_idx + blockDim.x < len) s_out[bi + CONFLICT_FREE_OFFSET(bi)] = d_in[cpy_idx + blockDim.x]; } // For both upsweep and downsweep: // Sequential indices with conflict free padding // Amount of padding = target index / num banks // This "shifts" the target indices by one every multiple // of the num banks // offset controls the stride and starting index of // target elems at every iteration // d just controls which threads are active // Sweeps are pivoted on the last element of shared memory // Upsweep/Reduce step int offset = 1; for (int d = max_elems_per_block >> 1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset * ((thid << 1) + 1) - 1; int bi = offset * ((thid << 1) + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_out[bi] += s_out[ai]; } offset <<= 1; } // Save the total sum on the global block sums array // Then clear the last element on the shared memory if (thid == 0) { d_block_sums[blockIdx.x] = s_out[max_elems_per_block - 1 + CONFLICT_FREE_OFFSET(max_elems_per_block - 1)]; s_out[max_elems_per_block - 1 + CONFLICT_FREE_OFFSET(max_elems_per_block - 1)] = 0; } // Downsweep step for (int d = 1; d < max_elems_per_block; d <<= 1) { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset * ((thid << 1) + 1) - 1; int bi = offset * ((thid << 1) + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); unsigned int temp = s_out[ai]; s_out[ai] = s_out[bi]; s_out[bi] += temp; } } __syncthreads(); // Copy contents of shared memory to global memory if (cpy_idx < len) { d_out[cpy_idx] = s_out[ai + CONFLICT_FREE_OFFSET(ai)]; if (cpy_idx + blockDim.x < len) d_out[cpy_idx + blockDim.x] = s_out[bi + CONFLICT_FREE_OFFSET(bi)]; } } void sum_scan_blelloch(unsigned int* const d_out, const unsigned int* const d_in, const size_t numElems) { // Zero out d_out CHECK(cudaMemset(d_out, 0, numElems * sizeof(unsigned int))); // Set up number of threads and blocks unsigned int block_sz = MAX_BLOCK_SZ / 2; unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm // If input size is not power of two, the remainder will still need a whole block // Thus, number of blocks must be the ceiling of input size / max elems that a block can handle //unsigned int grid_sz = (unsigned int) std::ceil((double) numElems / (double) max_elems_per_block); // UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically // add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity unsigned int grid_sz = numElems / max_elems_per_block; // Take advantage of the fact that integer division drops the decimals if (numElems % max_elems_per_block != 0) grid_sz += 1; // Conflict free padding requires that shared memory be more than 2 * block_sz unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block) >> LOG_NUM_BANKS); // Allocate memory for array of total sums produced by each block // Array length must be the same as number of blocks unsigned int* d_block_sums; CHECK(cudaMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz)); CHECK(cudaMemset(d_block_sums, 0, sizeof(unsigned int) * grid_sz)); // Sum scan data allocated to each block //gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems); gpu_prescan<<<grid_sz, block_sz, sizeof(unsigned int) * shmem_sz>>>(d_out, d_in, d_block_sums, numElems, shmem_sz, max_elems_per_block); // Sum scan total sums produced by each block // Use basic implementation if number of total sums is <= 2 * block_sz // (This requires only one block to do the scan) if (grid_sz <= max_elems_per_block) { unsigned int* d_dummy_blocks_sums; CHECK(cudaMalloc(&d_dummy_blocks_sums, sizeof(unsigned int))); CHECK(cudaMemset(d_dummy_blocks_sums, 0, sizeof(unsigned int))); //gpu_sum_scan_blelloch<<<1, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz); gpu_prescan<<<1, block_sz, sizeof(unsigned int) * shmem_sz>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz, shmem_sz, max_elems_per_block); CHECK(cudaFree(d_dummy_blocks_sums)); } // Else, recurse on this same function as you'll need the full-blown scan // for the block sums else { unsigned int* d_in_block_sums; CHECK(cudaMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz)); CHECK(cudaMemcpy(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, cudaMemcpyDeviceToDevice)); sum_scan_blelloch(d_block_sums, d_in_block_sums, grid_sz); CHECK(cudaFree(d_in_block_sums)); } // Add each block's total sum to its scan output // in order to get the final, global scanned array gpu_add_block_sums<<<grid_sz, block_sz>>>(d_out, d_out, d_block_sums, numElems); CHECK(cudaFree(d_block_sums)); } // idea to do exclusive prefix is similar to my ppc course https://www.youtube.com/watch?v=HVhCtl96gUs // I will use y,z,s to specify which step I am in. // in particular, I split the whole array into multiple smaller array. each small array has [len] numbers // Thread level y: each thread will do addition sequentially. threads are working independently, dealing with [len] numbers. // Thread level z: each threads in the same block will do sequentially. threads are working independently, dealing with one block. // Thread level s: each thread will add the result from its previous thread. threads are working independently, dealing with [len] numbers. // Block level y: this will get prefix sum in block level. // Block level z: only one block and one thread are used here, do addition sequentially. // Block level s: each threads will add the result from its previous block. __global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){ unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; int start=index*len+1;//exclusive if (start>n) return; //exclusive, could equal to n int end=start+step; output[start]=mask[start-1]; for(unsigned int i=start+1;i<end&&i<n;i++){ output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1] } } __global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){ unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; int offset=2*step; unsigned int start=step*blockDim.x*index+offset; unsigned int end=step*blockDim.x*(index+1)+1; for(unsigned int i=start;i<end && i<n; i+=step){ sum[i]+=sum[i-step]; } } __global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){ if (threadIdx.x==0) return; unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; unsigned int start=index*step+1;//exclusive unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum unsigned int base=sum[start-1]; for(unsigned int i=start; i<end && i<n; i++){ sum[i]+=base; } } // void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){ // int step=len*block_size;//each block has step number // int start=2*step; // for(unsigned int i=start; i<n; i+=step){ // sum[i]+=sum[i-step]; // } // } __global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){ //only one block and one thread int step=len*block_size;//each block has step number int start=2*step; for(unsigned int i=start; i<n; i+=step){ sum[i]+=sum[i-step]; } } // __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){ // unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; // if (index==0) return; //the first block is not needed to merge // int step=len*blockDim.x; // int start=index*step+1; //exclusive // int end=start+step-1;// -1 is important, this position has been added in serial sum // int base=sum[start-1];//last element at last block // for(int i=start; i<end && i<n; i++){ // sum[i]+=base; // } // } __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){ if (blockIdx.x==0) return;//the first block is not needed to merge unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; unsigned int base_index=blockIdx.x*step*blockDim.x; unsigned int base=sum[base_index]; int start=index*step; //only the first thread in block should excluded the first element int end=start+step; start=(start==base_index)?start+1:start; // int base=sum[start-1];//last element at last block for(int i=start; i<end && i<n; i++){ sum[i]+=base; } } void psort(int n, data_t *data) { if(n<=0) return; // FIXME: Implement a more efficient parallel sorting algorithm for the GPU. const int block_size=256;//64 threads per block; const int len=2000; // add 1000 prefix sum per thread; data_t *d_temp; data_t *d_in=NULL; CHECK(cudaMalloc((void**)&d_in,n*sizeof(data_t))); data_t *d_out_long=NULL; CHECK(cudaMalloc((void**)&d_out_long,n*sizeof(data_t))); unsigned int *d_out=NULL; CHECK(cudaMalloc((void**)&d_out,n*sizeof(unsigned int))); unsigned int *d_sum=NULL; CHECK(cudaMalloc((void**)&d_sum,n*sizeof(unsigned int))); unsigned int *d_index=NULL; CHECK(cudaMalloc((void**)&d_index,n*sizeof(unsigned int))); // std::vector<unsigned int> inter_sum(n); // unsigned int inter_sum[n]; cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice); data_t bits=sizeof(data_t)*8; // unsigned int out[n]; // unsigned int sum[n]; unsigned int total_zeros, mask_last; //one pass here clock_t test = clock(); for(data_t i=0; i<bits; i++){ CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int))); getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 0); CHECK(cudaGetLastError()); // CHECK(cudaMemcpy(out, d_out, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // std::cout<<"out "<<std::endl; // for(int j=0;j<n;j++){ // std::cout<<out[j]<<" "; // } // std::cout<<std::endl; // //inclusive prefix sum // prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n); // CHECK(cudaGetLastError()); // serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); // mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); // serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size); // CHECK(cudaGetLastError()); // // CHECK(cudaMemcpy(inter_sum.data(), d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // // serialsum_accrossblock(inter_sum.data(), len, n, block_size); // // CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice)); // // CHECK(cudaGetLastError()); // mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); clock_t start = clock(); sum_scan_blelloch(d_sum, d_out, n); std::cout<<"time: "<<double(clock()-start)/CLOCKS_PER_SEC<<std::endl; // exclusiveScan(d_sum, d_out, n, block_size); // CHECK(cudaMemcpy(sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // std::cout<<"sum "<<std::endl; // for(int j=0;j<n;j++){ // std::cout<<sum[j]<<" "; // } // std::cout<<std::endl; CHECK(cudaMemcpy(&total_zeros, d_sum+n-1, sizeof(unsigned int), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(&mask_last, d_out+n-1, sizeof(unsigned int), cudaMemcpyDeviceToHost)); total_zeros+=(mask_last==1)?1:0; getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros); // std::cout<<"index "<<std::endl; // CHECK(cudaMemcpy(sum, d_index, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // for(int j=0;j<n;j++){ // std::cout<<sum[j]<<" "; // } // std::cout<<std::endl; CHECK(cudaGetLastError()); // // get mask for 1 and store in d_out // getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 1); // CHECK(cudaGetLastError()); // //inclusive prefix sum // CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int))); // prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n); // CHECK(cudaGetLastError()); // serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); // mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); // // CHECK(cudaMemcpy(inter_sum.data() , d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // // serialsum_accrossblock(inter_sum.data(), len, n, block_size); // // CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice)); // serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size); // CHECK(cudaGetLastError()); // mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); // getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros); // CHECK(cudaGetLastError()); scatter<<<divup(n,block_size*len),block_size>>>(d_in, d_index, d_out_long, len, n); CHECK(cudaGetLastError()); //must swap pointers d_temp = d_in; d_in = d_out_long; d_out_long = d_temp; } std::cout<<"total: "<<double(clock()-test)/CLOCKS_PER_SEC<<std::endl; cuda_memcpy(data, d_in, n, cudaMemcpyDeviceToHost); CHECK(cudaFree(d_in)); CHECK(cudaFree(d_out_long)); CHECK(cudaFree(d_out)); CHECK(cudaFree(d_sum)); CHECK(cudaFree(d_index)); // std::sort(data, data + n); }
#include <hip/hip_runtime.h> #include <algorithm> #include <iostream> #include <vector> #include <time.h> typedef unsigned long long data_t; static inline void check(hipError_t err, const char* context) { if (err != hipSuccess) { std::cerr << "CUDA error: " << context << ": " << hipGetErrorString(err) << std::endl; std::exit(EXIT_FAILURE); } } #define CHECK(x) check(x, #x) const int MAX_CONSTANT = 16*1024; __constant__ unsigned int const_sum[MAX_CONSTANT]; template <class T> void cuda_memcpy(T* target, const T* source, std::size_t num, hipMemcpyKind direction) { CHECK(hipMemcpy(target, source, num * sizeof(T), direction)); } static inline int divup(int a, int b) { return (a + b - 1)/b; } // get the 0 bit of each number by bit_shift // example: number : 10001, bit_shit: 1, One: 1, // // it means check if the second bit is 1 or not. __global__ void getMask(data_t *d_in, unsigned int *d_out, const int len, const unsigned int n, data_t bit_shift, unsigned int One) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; data_t bit = 0; data_t one=1; data_t shift=one<<bit_shift; unsigned int start=index*len; if (start>=n) return; unsigned int end=start+len; for(unsigned int i=start;i<end && i<n; i++ ){ bit=d_in[i]&shift; bit = (bit > 0) ? 1 : 0; d_out[i] = (One ? bit : 1 - bit); } } __global__ void getIndex(unsigned int *d_index, unsigned int *d_sum, unsigned int* d_mask, const int len, const unsigned int n, unsigned int total_pre) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; unsigned int start=index*len; if (start>=n) return; unsigned int end=start+len; for (unsigned int i=start; i<end && i<n; i++){ d_index[i]=d_mask[i]?d_sum[i]:i-d_sum[i]+total_pre; if(d_index[i]>=n){ printf(" d_sum[i] : %d, total_pre : %d, d_mask[i] : %d \n", d_sum[i], total_pre, d_mask[i]); } // if(d_mask[i]==1){ // d_index[i]=total_pre+d_sum[i]; // } } } __global__ void scatter(data_t *d_in, unsigned int *d_index, data_t *d_out, const int len, const unsigned int n) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; unsigned int start=index*len; if (start>=n) return; unsigned int end=start+len; for(unsigned int i=start;i<end && i<n; i++ ){ d_out[d_index[i]]=d_in[i]; } } // // pay attention that blockDim.x must be power of 2 // __global__ void blellochScan(unsigned int *out, unsigned int *in, // unsigned int *sum, unsigned int inputSize) { // __shared__ unsigned int temp[2 * 256]; // unsigned int start = blockIdx.x * blockDim.x << 1; // unsigned int tx = threadIdx.x; // unsigned int index = 0; // temp[tx] = (start + tx < inputSize)? in[start+tx]:0; // temp[tx+blockDim.x] = (start + tx + blockDim.x < inputSize)? in[start + tx + blockDim.x] : 0; // // Blelloch Scan // __syncthreads(); // // reduction step // unsigned int stride = 1; // while (stride <= blockDim.x) { // index = (tx + 1) * (stride << 1) - 1; // if (index < (blockDim.x << 1)) { // temp[index] += temp[index - stride]; // } // stride <<= 1; // __syncthreads(); // } // // first store the reduction sum in sum array // // make it zero since it is exclusive scan // if (tx == 0) { // // sum array contains the prefix sum of each // // 2*blockDim blocks of element. // if (sum != NULL) { // sum[blockIdx.x] = temp[(blockDim.x << 1) - 1]; // } // temp[(blockDim.x << 1) - 1] = 0; // } // // wait for thread zero to write // __syncthreads(); // // post scan step // stride = blockDim.x; // index = 0; // unsigned int var = 0; // while (stride > 0) { // index = ((stride << 1) * (tx + 1)) - 1; // if (index < (blockDim.x << 1)) { // var = temp[index]; // temp[index] += temp[index - stride]; // temp[index - stride] = var; // } // stride >>= 1; // __syncthreads(); // } // // now write the temp array to output // if (start + tx < inputSize) { // out[start + tx] = temp[tx]; // } // if (start + tx + blockDim.x < inputSize) { // out[start + tx + blockDim.x] = temp[tx + blockDim.x]; // } // } // /* // sum out the blocks' accumulated sums to each element // */ // __global__ void mergeScanBlocks(unsigned int *sum, unsigned int *output, // unsigned int opSize) { // unsigned int index = (blockDim.x * blockIdx.x << 1) + threadIdx.x; // if (index < opSize) { // // output[index] += sum[blockIdx.x]; // output[index] += (opSize > MAX_CONSTANT)? sum[blockIdx.x]:const_sum[blockIdx.x]; // // output[index] += tex1Dfetch(tex_sum, blockIdx.x); // } // if (index + blockDim.x < opSize) { // // output[index + blockDim.x] += sum[blockIdx.x]; // output[index + blockDim.x] += (opSize > MAX_CONSTANT)? sum[blockIdx.x]:const_sum[blockIdx.x]; // // output[index + blockDim.x] += tex1Dfetch(tex_sum, blockIdx.x); // } // } // /* // api for exclusiveScan // */ // void exclusiveScan(unsigned int *out, unsigned int *in, unsigned int in_size, unsigned int block_size) { // unsigned int numBlocks1 = in_size / block_size; // if (in_size % block_size) numBlocks1++; // unsigned int numBlocks2 = numBlocks1 / 2; // if (numBlocks1 % 2) numBlocks2++; // dim3 dimThreadBlock; // dimThreadBlock.x = block_size; // dimThreadBlock.y = 1; // dimThreadBlock.z = 1; // dim3 dimGrid; // dimGrid.x = numBlocks2; // dimGrid.y = 1; // dimGrid.z = 1; // unsigned int *d_sumArr = NULL; // if (in_size > (2 * block_size)) { // // we need the sum auxilarry array only if nuFmblocks2 > 1 // CHECK(cudaMalloc((void **)&d_sumArr, numBlocks2 * sizeof(unsigned int))); // } // blellochScan<<<dimGrid, dimThreadBlock>>>(out, in, d_sumArr, in_size); // if (in_size <= (2 * block_size)) { // // out has proper exclusive scan. just return // CHECK(cudaDeviceSynchronize()); // return; // } else { // // now we need to perform exclusive scan on the auxilliary sum array // unsigned int *d_sumArr_scan; // CHECK(cudaMalloc((void **)&d_sumArr_scan, numBlocks2 * sizeof(unsigned int))); // exclusiveScan(d_sumArr_scan, d_sumArr, numBlocks2, block_size); // // d_sumArr_scan now contains the exclusive scan op of individual blocks // // now just do a one-one addition of blocks // // cudaBindTexture(0, tex_sum, d_sumArr_scan, numBlocks2 * sizeof(unsigned int)); // if(numBlocks2 <= MAX_CONSTANT) { // CHECK(cudaMemcpyToSymbol(const_sum, d_sumArr_scan, numBlocks2 * sizeof(unsigned int), 0, cudaMemcpyDeviceToDevice)); // } // mergeScanBlocks<<<dimGrid, dimThreadBlock>>>(d_sumArr_scan, out, in_size); // // cudaUnbindTexture(tex_sum); // cudaFree(d_sumArr); // cudaFree(d_sumArr_scan); // } // } #define MAX_BLOCK_SZ 128 #define NUM_BANKS 32 #define LOG_NUM_BANKS 5 //#define ZERO_BANK_CONFLICTS #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(n) \ ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS) #endif __global__ void gpu_add_block_sums(unsigned int* const d_out, const unsigned int* const d_in, unsigned int* const d_block_sums, const size_t numElems) { unsigned int d_block_sum_val = d_block_sums[blockIdx.x]; // Simple implementation's performance is not significantly (if at all) // better than previous verbose implementation unsigned int cpy_idx = 2 * blockIdx.x * blockDim.x + threadIdx.x; if (cpy_idx < numElems) { d_out[cpy_idx] = d_in[cpy_idx] + d_block_sum_val; if (cpy_idx + blockDim.x < numElems) d_out[cpy_idx + blockDim.x] = d_in[cpy_idx + blockDim.x] + d_block_sum_val; } } __global__ void gpu_prescan(unsigned int* const d_out, const unsigned int* const d_in, unsigned int* const d_block_sums, const unsigned int len, const unsigned int shmem_sz, const unsigned int max_elems_per_block) { // Allocated on invocation extern __shared__ unsigned int s_out[]; int thid = threadIdx.x; int ai = thid; int bi = thid + blockDim.x; // Zero out the shared memory // Helpful especially when input size is not power of two s_out[thid] = 0; s_out[thid + blockDim.x] = 0; // If CONFLICT_FREE_OFFSET is used, shared memory size // must be a 2 * blockDim.x + blockDim.x/num_banks s_out[thid + blockDim.x + (blockDim.x >> LOG_NUM_BANKS)] = 0; __syncthreads(); // Copy d_in to shared memory // Note that d_in's elements are scattered into shared memory // in light of avoiding bank conflicts unsigned int cpy_idx = max_elems_per_block * blockIdx.x + threadIdx.x; if (cpy_idx < len) { s_out[ai + CONFLICT_FREE_OFFSET(ai)] = d_in[cpy_idx]; if (cpy_idx + blockDim.x < len) s_out[bi + CONFLICT_FREE_OFFSET(bi)] = d_in[cpy_idx + blockDim.x]; } // For both upsweep and downsweep: // Sequential indices with conflict free padding // Amount of padding = target index / num banks // This "shifts" the target indices by one every multiple // of the num banks // offset controls the stride and starting index of // target elems at every iteration // d just controls which threads are active // Sweeps are pivoted on the last element of shared memory // Upsweep/Reduce step int offset = 1; for (int d = max_elems_per_block >> 1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset * ((thid << 1) + 1) - 1; int bi = offset * ((thid << 1) + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_out[bi] += s_out[ai]; } offset <<= 1; } // Save the total sum on the global block sums array // Then clear the last element on the shared memory if (thid == 0) { d_block_sums[blockIdx.x] = s_out[max_elems_per_block - 1 + CONFLICT_FREE_OFFSET(max_elems_per_block - 1)]; s_out[max_elems_per_block - 1 + CONFLICT_FREE_OFFSET(max_elems_per_block - 1)] = 0; } // Downsweep step for (int d = 1; d < max_elems_per_block; d <<= 1) { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset * ((thid << 1) + 1) - 1; int bi = offset * ((thid << 1) + 2) - 1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); unsigned int temp = s_out[ai]; s_out[ai] = s_out[bi]; s_out[bi] += temp; } } __syncthreads(); // Copy contents of shared memory to global memory if (cpy_idx < len) { d_out[cpy_idx] = s_out[ai + CONFLICT_FREE_OFFSET(ai)]; if (cpy_idx + blockDim.x < len) d_out[cpy_idx + blockDim.x] = s_out[bi + CONFLICT_FREE_OFFSET(bi)]; } } void sum_scan_blelloch(unsigned int* const d_out, const unsigned int* const d_in, const size_t numElems) { // Zero out d_out CHECK(hipMemset(d_out, 0, numElems * sizeof(unsigned int))); // Set up number of threads and blocks unsigned int block_sz = MAX_BLOCK_SZ / 2; unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm // If input size is not power of two, the remainder will still need a whole block // Thus, number of blocks must be the ceiling of input size / max elems that a block can handle //unsigned int grid_sz = (unsigned int) std::ceil((double) numElems / (double) max_elems_per_block); // UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically // add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity unsigned int grid_sz = numElems / max_elems_per_block; // Take advantage of the fact that integer division drops the decimals if (numElems % max_elems_per_block != 0) grid_sz += 1; // Conflict free padding requires that shared memory be more than 2 * block_sz unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block) >> LOG_NUM_BANKS); // Allocate memory for array of total sums produced by each block // Array length must be the same as number of blocks unsigned int* d_block_sums; CHECK(hipMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz)); CHECK(hipMemset(d_block_sums, 0, sizeof(unsigned int) * grid_sz)); // Sum scan data allocated to each block //gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems); gpu_prescan<<<grid_sz, block_sz, sizeof(unsigned int) * shmem_sz>>>(d_out, d_in, d_block_sums, numElems, shmem_sz, max_elems_per_block); // Sum scan total sums produced by each block // Use basic implementation if number of total sums is <= 2 * block_sz // (This requires only one block to do the scan) if (grid_sz <= max_elems_per_block) { unsigned int* d_dummy_blocks_sums; CHECK(hipMalloc(&d_dummy_blocks_sums, sizeof(unsigned int))); CHECK(hipMemset(d_dummy_blocks_sums, 0, sizeof(unsigned int))); //gpu_sum_scan_blelloch<<<1, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz); gpu_prescan<<<1, block_sz, sizeof(unsigned int) * shmem_sz>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz, shmem_sz, max_elems_per_block); CHECK(hipFree(d_dummy_blocks_sums)); } // Else, recurse on this same function as you'll need the full-blown scan // for the block sums else { unsigned int* d_in_block_sums; CHECK(hipMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz)); CHECK(hipMemcpy(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, hipMemcpyDeviceToDevice)); sum_scan_blelloch(d_block_sums, d_in_block_sums, grid_sz); CHECK(hipFree(d_in_block_sums)); } // Add each block's total sum to its scan output // in order to get the final, global scanned array gpu_add_block_sums<<<grid_sz, block_sz>>>(d_out, d_out, d_block_sums, numElems); CHECK(hipFree(d_block_sums)); } // idea to do exclusive prefix is similar to my ppc course https://www.youtube.com/watch?v=HVhCtl96gUs // I will use y,z,s to specify which step I am in. // in particular, I split the whole array into multiple smaller array. each small array has [len] numbers // Thread level y: each thread will do addition sequentially. threads are working independently, dealing with [len] numbers. // Thread level z: each threads in the same block will do sequentially. threads are working independently, dealing with one block. // Thread level s: each thread will add the result from its previous thread. threads are working independently, dealing with [len] numbers. // Block level y: this will get prefix sum in block level. // Block level z: only one block and one thread are used here, do addition sequentially. // Block level s: each threads will add the result from its previous block. __global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){ unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; int start=index*len+1;//exclusive if (start>n) return; //exclusive, could equal to n int end=start+step; output[start]=mask[start-1]; for(unsigned int i=start+1;i<end&&i<n;i++){ output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1] } } __global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){ unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; int offset=2*step; unsigned int start=step*blockDim.x*index+offset; unsigned int end=step*blockDim.x*(index+1)+1; for(unsigned int i=start;i<end && i<n; i+=step){ sum[i]+=sum[i-step]; } } __global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){ if (threadIdx.x==0) return; unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; unsigned int start=index*step+1;//exclusive unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum unsigned int base=sum[start-1]; for(unsigned int i=start; i<end && i<n; i++){ sum[i]+=base; } } // void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){ // int step=len*block_size;//each block has step number // int start=2*step; // for(unsigned int i=start; i<n; i+=step){ // sum[i]+=sum[i-step]; // } // } __global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){ //only one block and one thread int step=len*block_size;//each block has step number int start=2*step; for(unsigned int i=start; i<n; i+=step){ sum[i]+=sum[i-step]; } } // __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){ // unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; // if (index==0) return; //the first block is not needed to merge // int step=len*blockDim.x; // int start=index*step+1; //exclusive // int end=start+step-1;// -1 is important, this position has been added in serial sum // int base=sum[start-1];//last element at last block // for(int i=start; i<end && i<n; i++){ // sum[i]+=base; // } // } __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){ if (blockIdx.x==0) return;//the first block is not needed to merge unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; int step=len; unsigned int base_index=blockIdx.x*step*blockDim.x; unsigned int base=sum[base_index]; int start=index*step; //only the first thread in block should excluded the first element int end=start+step; start=(start==base_index)?start+1:start; // int base=sum[start-1];//last element at last block for(int i=start; i<end && i<n; i++){ sum[i]+=base; } } void psort(int n, data_t *data) { if(n<=0) return; // FIXME: Implement a more efficient parallel sorting algorithm for the GPU. const int block_size=256;//64 threads per block; const int len=2000; // add 1000 prefix sum per thread; data_t *d_temp; data_t *d_in=NULL; CHECK(hipMalloc((void**)&d_in,n*sizeof(data_t))); data_t *d_out_long=NULL; CHECK(hipMalloc((void**)&d_out_long,n*sizeof(data_t))); unsigned int *d_out=NULL; CHECK(hipMalloc((void**)&d_out,n*sizeof(unsigned int))); unsigned int *d_sum=NULL; CHECK(hipMalloc((void**)&d_sum,n*sizeof(unsigned int))); unsigned int *d_index=NULL; CHECK(hipMalloc((void**)&d_index,n*sizeof(unsigned int))); // std::vector<unsigned int> inter_sum(n); // unsigned int inter_sum[n]; cuda_memcpy(d_in,data,n,hipMemcpyHostToDevice); data_t bits=sizeof(data_t)*8; // unsigned int out[n]; // unsigned int sum[n]; unsigned int total_zeros, mask_last; //one pass here clock_t test = clock(); for(data_t i=0; i<bits; i++){ CHECK(hipMemset(d_sum,0,n*sizeof(unsigned int))); getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 0); CHECK(hipGetLastError()); // CHECK(cudaMemcpy(out, d_out, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // std::cout<<"out "<<std::endl; // for(int j=0;j<n;j++){ // std::cout<<out[j]<<" "; // } // std::cout<<std::endl; // //inclusive prefix sum // prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n); // CHECK(cudaGetLastError()); // serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); // mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); // serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size); // CHECK(cudaGetLastError()); // // CHECK(cudaMemcpy(inter_sum.data(), d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // // serialsum_accrossblock(inter_sum.data(), len, n, block_size); // // CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice)); // // CHECK(cudaGetLastError()); // mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); clock_t start = clock(); sum_scan_blelloch(d_sum, d_out, n); std::cout<<"time: "<<double(clock()-start)/CLOCKS_PER_SEC<<std::endl; // exclusiveScan(d_sum, d_out, n, block_size); // CHECK(cudaMemcpy(sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // std::cout<<"sum "<<std::endl; // for(int j=0;j<n;j++){ // std::cout<<sum[j]<<" "; // } // std::cout<<std::endl; CHECK(hipMemcpy(&total_zeros, d_sum+n-1, sizeof(unsigned int), hipMemcpyDeviceToHost)); CHECK(hipMemcpy(&mask_last, d_out+n-1, sizeof(unsigned int), hipMemcpyDeviceToHost)); total_zeros+=(mask_last==1)?1:0; getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros); // std::cout<<"index "<<std::endl; // CHECK(cudaMemcpy(sum, d_index, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // for(int j=0;j<n;j++){ // std::cout<<sum[j]<<" "; // } // std::cout<<std::endl; CHECK(hipGetLastError()); // // get mask for 1 and store in d_out // getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 1); // CHECK(cudaGetLastError()); // //inclusive prefix sum // CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int))); // prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n); // CHECK(cudaGetLastError()); // serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); // mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); // // CHECK(cudaMemcpy(inter_sum.data() , d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // // serialsum_accrossblock(inter_sum.data(), len, n, block_size); // // CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice)); // serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size); // CHECK(cudaGetLastError()); // mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n); // CHECK(cudaGetLastError()); // getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros); // CHECK(cudaGetLastError()); scatter<<<divup(n,block_size*len),block_size>>>(d_in, d_index, d_out_long, len, n); CHECK(hipGetLastError()); //must swap pointers d_temp = d_in; d_in = d_out_long; d_out_long = d_temp; } std::cout<<"total: "<<double(clock()-test)/CLOCKS_PER_SEC<<std::endl; cuda_memcpy(data, d_in, n, hipMemcpyDeviceToHost); CHECK(hipFree(d_in)); CHECK(hipFree(d_out_long)); CHECK(hipFree(d_out)); CHECK(hipFree(d_sum)); CHECK(hipFree(d_index)); // std::sort(data, data + n); }
Convert the following CUDA code to AMD GPU code: cuda __global__ void CuKnlSetField( double xCells, double yCells, double* energy0, double* energy1) { const int gid = threadIdx.x+blockIdx.x*blockDim.x; energy1[gid] = energy0[gid]; }
#include <hip/hip_runtime.h> __global__ void CuKnlSetField( double xCells, double yCells, double* energy0, double* energy1) { const int gid = threadIdx.x+blockIdx.x*blockDim.x; energy1[gid] = energy0[gid]; }
Convert the following CUDA code to AMD GPU code: cuda #include "probe.cuh" Probe::Probe(ProbeConfig cfg) : chan_indices_(cfg.n_active()), site_labels(cfg.n_active()), chan_grps(cfg.n_active()), x_coords(cfg.n_active()), y_coords(cfg.n_active()), is_active_(cfg.n_total), site_dists(cfg.n_active()) { n_total_ = cfg.n_total; if (n_total_ < cfg.n_active()) { throw std::domain_error( "Number of active channels cannot exceed total number of channels."); } if (cfg.srate_hz <= 0.0) { throw std::domain_error("Sample rate must be positive."); } srate_hz_ = cfg.srate_hz; unsigned k = 0; // nested for loop go brr for (const auto &channel_group : cfg.channel_groups) { const ChannelGroup grp = channel_group.second; for (auto j = 0; j < grp.n_channels(); ++j) { chan_indices_.at(k) = grp.channels.at(j); site_labels.at(k) = grp.site_labels.at(j); x_coords.at(k) = grp.x_coords.at(j); y_coords.at(k) = grp.y_coords.at(j); chan_grps[k++] = channel_group.first; } } SortChannels(); EnsureUnique(); FindInactive(); } /** * @brief Create the matrix of distances between channels on the probe. */ void Probe::MakeDistanceMatrix() { if (dist_mat_complete || site_dists.n_cols() != n_active()) return; for (unsigned i = 0; i < n_active(); ++i) { for (unsigned j = i + 1; j < n_active(); ++j) { auto dx = x_coords.at(i) - x_coords.at(j), dy = y_coords.at(i) - y_coords.at(j); site_dists.set_at(i, j, (float) std::hypot(dx, dy)); } } dist_mat_complete = true; } /** * @brief * @param site_idx * @param n_neighbors * @return Site indices of nearest neighbors to `site_idx` (including * `site_idx`). */ std::vector<uint32_t> Probe::NearestNeighbors(uint32_t site_idx, uint32_t n_neighbors) { MakeDistanceMatrix(); return site_dists.closest(site_idx, n_neighbors); } /** * @brief Get the channel index value of the site at `site_idx`. * @param site_idx Index of the site. * @return The channel index value of the site at `site_idx`. */ unsigned Probe::chan_index(unsigned site_idx) const { if (site_idx >= n_active()) { throw std::length_error("Index exceeds array dimensions."); } return chan_indices_.at(site_idx); } /** * @brief Get the site index value of the channel at `chan_idx`. * @param chan_idx Index of the channel. * @return The site index value of the channel at `chan_idx`. */ unsigned Probe::site_index(uint32_t chan_idx) const { auto idx = 0; for (auto it = is_active_.begin(); it < is_active_.begin() + chan_idx; ++it) { idx += *it; } return idx; } /** * @brief Get the label of the ith site. * @param site_idx Index of the site. * @return The ith site label. */ unsigned Probe::label_at(unsigned site_idx) const { if (site_idx >= n_active()) { throw std::length_error("Index exceeds array dimensions."); } return site_labels.at(site_idx); } /** * @brief Get the channel group label of the ith site. * @param site_idx Index of the site. * @return The channel group label of the ith site. */ unsigned Probe::group_at(unsigned site_idx) const { if (site_idx >= n_active()) { throw std::length_error("Index exceeds array dimensions."); } return chan_grps.at(site_idx); } /** * @brief Returns the x coordinate of the ith site. * @param site_idx Index of the site. * @return The x coordinate of the ith site. */ double Probe::x_at(unsigned site_idx) const { if (site_idx > n_active()) { throw std::length_error("Index exceeds array dimensions."); } return x_coords.at(site_idx); } /** * @brief Returns the y coordinate of the ith site. * @param site_idx Index of the site. * @return The y coordinate of the ith site. */ double Probe::y_at(unsigned site_idx) const { if (site_idx > n_active()) { throw std::length_error("Index exceeds array dimensions."); } return y_coords.at(site_idx); } /** * @brief Get the distance between the ith and jth sites. * @param left Site index. * @param right Site index. * @return Distance between left and right. */ float Probe::dist_between(uint32_t left, uint32_t right) { MakeDistanceMatrix(); return site_dists.at(left, right); } /** * @brief Sort site-indexed values by corresponding channel in ascending order. */ void Probe::SortChannels() { if (n_active() == 0) return; // get indices that would sort chan_indices_ auto as = utilities::argsort(chan_indices_); if (std::is_sorted(as.begin(), as.end())) { // nothing to do! return; } std::vector<unsigned> tmp_buf_s(n_active()); std::vector<double> tmp_buf_d(n_active()); // reorder chan_indices_, x_coords for (auto i = 0; i < as.size(); ++i) { tmp_buf_s.at(i) = chan_indices_.at(as.at(i)); tmp_buf_d.at(i) = x_coords.at(as.at(i)); } chan_indices_.assign(tmp_buf_s.begin(), tmp_buf_s.end()); x_coords.assign(tmp_buf_d.begin(), tmp_buf_d.end()); // reorder site_labels, y_coords for (auto i = 0; i < as.size(); ++i) { tmp_buf_s.at(i) = site_labels[as.at(i)]; tmp_buf_d.at(i) = y_coords[as.at(i)]; } site_labels.assign(tmp_buf_s.begin(), tmp_buf_s.end()); y_coords.assign(tmp_buf_d.begin(), tmp_buf_d.end()); // reorder chan_grps for (auto i = 0; i < as.size(); ++i) { tmp_buf_s.at(i) = chan_grps[as.at(i)]; } chan_grps.assign(tmp_buf_s.begin(), tmp_buf_s.end()); } /** * @brief Check that channel indices and site labels are unique, throwing an * error if this is not the case. */ void Probe::EnsureUnique() { // ensure all channel indices are unique unsigned ct; for (auto it = chan_indices_.begin(); it != chan_indices_.end(); ++it) { ct = std::count(it, chan_indices_.end(), *(it)); if (ct > 1) { throw std::domain_error("Channel indices are not unique."); } } for (auto it = site_labels.begin(); it != site_labels.end(); ++it) { ct = std::count(it, site_labels.end(), *(it)); if (ct > 1) { throw std::domain_error("Site labels are not unique."); } } } /** * @brief Find inactive channels and set their bits to 0 in is_active_. * * Assumes that chan_indices_ is sorted. */ void Probe::FindInactive() { for (auto i = 0; i < n_total_; ++i) { is_active_.at(i) = std::binary_search(chan_indices_.begin(), chan_indices_.end(), i); } } /** * @brief Determine whether a channel represents an active site. * @param i Channel index. * @return True iff the channel is an active site. */ bool Probe::is_active(unsigned int i) const { return is_active_.at(i); }
#include <hip/hip_runtime.h> #include "probe.cuh" Probe::Probe(ProbeConfig cfg) : chan_indices_(cfg.n_active()), site_labels(cfg.n_active()), chan_grps(cfg.n_active()), x_coords(cfg.n_active()), y_coords(cfg.n_active()), is_active_(cfg.n_total), site_dists(cfg.n_active()) { n_total_ = cfg.n_total; if (n_total_ < cfg.n_active()) { throw std::domain_error( "Number of active channels cannot exceed total number of channels."); } if (cfg.srate_hz <= 0.0) { throw std::domain_error("Sample rate must be positive."); } srate_hz_ = cfg.srate_hz; unsigned k = 0; // nested for loop go brr for (const auto &channel_group : cfg.channel_groups) { const ChannelGroup grp = channel_group.second; for (auto j = 0; j < grp.n_channels(); ++j) { chan_indices_.at(k) = grp.channels.at(j); site_labels.at(k) = grp.site_labels.at(j); x_coords.at(k) = grp.x_coords.at(j); y_coords.at(k) = grp.y_coords.at(j); chan_grps[k++] = channel_group.first; } } SortChannels(); EnsureUnique(); FindInactive(); } /** * @brief Create the matrix of distances between channels on the probe. */ void Probe::MakeDistanceMatrix() { if (dist_mat_complete || site_dists.n_cols() != n_active()) return; for (unsigned i = 0; i < n_active(); ++i) { for (unsigned j = i + 1; j < n_active(); ++j) { auto dx = x_coords.at(i) - x_coords.at(j), dy = y_coords.at(i) - y_coords.at(j); site_dists.set_at(i, j, (float) std::hypot(dx, dy)); } } dist_mat_complete = true; } /** * @brief * @param site_idx * @param n_neighbors * @return Site indices of nearest neighbors to `site_idx` (including * `site_idx`). */ std::vector<uint32_t> Probe::NearestNeighbors(uint32_t site_idx, uint32_t n_neighbors) { MakeDistanceMatrix(); return site_dists.closest(site_idx, n_neighbors); } /** * @brief Get the channel index value of the site at `site_idx`. * @param site_idx Index of the site. * @return The channel index value of the site at `site_idx`. */ unsigned Probe::chan_index(unsigned site_idx) const { if (site_idx >= n_active()) { throw std::length_error("Index exceeds array dimensions."); } return chan_indices_.at(site_idx); } /** * @brief Get the site index value of the channel at `chan_idx`. * @param chan_idx Index of the channel. * @return The site index value of the channel at `chan_idx`. */ unsigned Probe::site_index(uint32_t chan_idx) const { auto idx = 0; for (auto it = is_active_.begin(); it < is_active_.begin() + chan_idx; ++it) { idx += *it; } return idx; } /** * @brief Get the label of the ith site. * @param site_idx Index of the site. * @return The ith site label. */ unsigned Probe::label_at(unsigned site_idx) const { if (site_idx >= n_active()) { throw std::length_error("Index exceeds array dimensions."); } return site_labels.at(site_idx); } /** * @brief Get the channel group label of the ith site. * @param site_idx Index of the site. * @return The channel group label of the ith site. */ unsigned Probe::group_at(unsigned site_idx) const { if (site_idx >= n_active()) { throw std::length_error("Index exceeds array dimensions."); } return chan_grps.at(site_idx); } /** * @brief Returns the x coordinate of the ith site. * @param site_idx Index of the site. * @return The x coordinate of the ith site. */ double Probe::x_at(unsigned site_idx) const { if (site_idx > n_active()) { throw std::length_error("Index exceeds array dimensions."); } return x_coords.at(site_idx); } /** * @brief Returns the y coordinate of the ith site. * @param site_idx Index of the site. * @return The y coordinate of the ith site. */ double Probe::y_at(unsigned site_idx) const { if (site_idx > n_active()) { throw std::length_error("Index exceeds array dimensions."); } return y_coords.at(site_idx); } /** * @brief Get the distance between the ith and jth sites. * @param left Site index. * @param right Site index. * @return Distance between left and right. */ float Probe::dist_between(uint32_t left, uint32_t right) { MakeDistanceMatrix(); return site_dists.at(left, right); } /** * @brief Sort site-indexed values by corresponding channel in ascending order. */ void Probe::SortChannels() { if (n_active() == 0) return; // get indices that would sort chan_indices_ auto as = utilities::argsort(chan_indices_); if (std::is_sorted(as.begin(), as.end())) { // nothing to do! return; } std::vector<unsigned> tmp_buf_s(n_active()); std::vector<double> tmp_buf_d(n_active()); // reorder chan_indices_, x_coords for (auto i = 0; i < as.size(); ++i) { tmp_buf_s.at(i) = chan_indices_.at(as.at(i)); tmp_buf_d.at(i) = x_coords.at(as.at(i)); } chan_indices_.assign(tmp_buf_s.begin(), tmp_buf_s.end()); x_coords.assign(tmp_buf_d.begin(), tmp_buf_d.end()); // reorder site_labels, y_coords for (auto i = 0; i < as.size(); ++i) { tmp_buf_s.at(i) = site_labels[as.at(i)]; tmp_buf_d.at(i) = y_coords[as.at(i)]; } site_labels.assign(tmp_buf_s.begin(), tmp_buf_s.end()); y_coords.assign(tmp_buf_d.begin(), tmp_buf_d.end()); // reorder chan_grps for (auto i = 0; i < as.size(); ++i) { tmp_buf_s.at(i) = chan_grps[as.at(i)]; } chan_grps.assign(tmp_buf_s.begin(), tmp_buf_s.end()); } /** * @brief Check that channel indices and site labels are unique, throwing an * error if this is not the case. */ void Probe::EnsureUnique() { // ensure all channel indices are unique unsigned ct; for (auto it = chan_indices_.begin(); it != chan_indices_.end(); ++it) { ct = std::count(it, chan_indices_.end(), *(it)); if (ct > 1) { throw std::domain_error("Channel indices are not unique."); } } for (auto it = site_labels.begin(); it != site_labels.end(); ++it) { ct = std::count(it, site_labels.end(), *(it)); if (ct > 1) { throw std::domain_error("Site labels are not unique."); } } } /** * @brief Find inactive channels and set their bits to 0 in is_active_. * * Assumes that chan_indices_ is sorted. */ void Probe::FindInactive() { for (auto i = 0; i < n_total_; ++i) { is_active_.at(i) = std::binary_search(chan_indices_.begin(), chan_indices_.end(), i); } } /** * @brief Determine whether a channel represents an active site. * @param i Channel index. * @return True iff the channel is an active site. */ bool Probe::is_active(unsigned int i) const { return is_active_.at(i); }
Convert the following CUDA code to AMD GPU code: cuda #include "includes.h" __global__ void cudaComputeSignature(double* hyperplanes, double* v, int* dimensions, bool* sig, long* hyperp_length) { long tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < *hyperp_length) { int d_dimensions = *dimensions; long pos = tid * d_dimensions; double sum = 0.0; for (int i = 0; i < d_dimensions; i++) sum += hyperplanes[i+pos] * v[i]; sig[tid] = (sum>=0); } }
#include <hip/hip_runtime.h> #include "includes.h" __global__ void cudaComputeSignature(double* hyperplanes, double* v, int* dimensions, bool* sig, long* hyperp_length) { long tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < *hyperp_length) { int d_dimensions = *dimensions; long pos = tid * d_dimensions; double sum = 0.0; for (int i = 0; i < d_dimensions; i++) sum += hyperplanes[i+pos] * v[i]; sig[tid] = (sum>=0); } }
Convert the following CUDA code to AMD GPU code: cuda #include "stdio.h" // printf() is only supported // for devices of compute capability 2.0 and higher #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) #define printf(f, ...) ((void)(f, __VA_ARGS__),0) #endif __global__ void helloCUDA(float f) { printf("Hello thread %d, f=%f\n", threadIdx.x, f); } int main() { helloCUDA<<<1, 5>>>(1.2345f); cudaDeviceSynchronize(); return 0; }
#include <hip/hip_runtime.h> #include "stdio.h" // printf() is only supported // for devices of compute capability 2.0 and higher #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) #define printf(f, ...) ((void)(f, __VA_ARGS__),0) #endif __global__ void helloCUDA(float f) { printf("Hello thread %d, f=%f\n", threadIdx.x, f); } int main() { helloCUDA<<<1, 5>>>(1.2345f); hipDeviceSynchronize(); return 0; }