problem
stringlengths 66
76k
| answer
stringlengths 29
75.8k
|
|---|---|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
__global__ void calc_pi(double *dev, double step) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double x = (i + 0.5) * step;
dev[i] = 4.0 /(1.0 + x * x);
}
int main() {
static long num_steps = 1000000000;
static int gpu_threads = 1024;
double step;
double pi, sum = 0.0;
step = 1.0 / (double) num_steps;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
thrust::device_vector<double> dev(num_steps);
calc_pi<<<ceil((double) num_steps/gpu_threads), gpu_threads>>>(thrust::raw_pointer_cast(dev.data()), step);
sum = thrust::reduce(dev.begin(), dev.end(), (double) 0, thrust::plus<double>());
pi = step * sum;
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
printf("O valor de pi calculado com %ld passos levou \n", num_steps);
printf("%.2f milisegundo(s) e chegou no valor: \n", msecTotal);
printf("%.17f\n", pi);
}
|
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
__global__ void calc_pi(double *dev, double step) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double x = (i + 0.5) * step;
dev[i] = 4.0 /(1.0 + x * x);
}
int main() {
static long num_steps = 1000000000;
static int gpu_threads = 1024;
double step;
double pi, sum = 0.0;
step = 1.0 / (double) num_steps;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, NULL);
thrust::device_vector<double> dev(num_steps);
calc_pi<<<ceil((double) num_steps/gpu_threads), gpu_threads>>>(thrust::raw_pointer_cast(dev.data()), step);
sum = thrust::reduce(dev.begin(), dev.end(), (double) 0, thrust::plus<double>());
pi = step * sum;
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
printf("O valor de pi calculado com %ld passos levou \n", num_steps);
printf("%.2f milisegundo(s) e chegou no valor: \n", msecTotal);
printf("%.17f\n", pi);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdlib.h>
#include <stdio.h>
// Kernel adding entries of the adjacent array entries (radius of 3) of a 1D array
//
// initial approach
// * 7 kernels, each adding one element to the sum
// * data always read from main memory
__global__ void kernel_add(int n, int offset, int *a, int *b)
{
int i = blockDim.x*blockIdx.x+threadIdx.x;
int j = i + offset;
if( j>-1 && j<n ){
b[i]+=a[j];
}
}
int main() {
int n=2000000;
int memSize = n*sizeof(int);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int *a, *d_a;
a = (int*) malloc (n*sizeof(*a));
cudaMalloc( (void**) &d_a, memSize);
int *b, *d_b;
b = (int*) malloc (n*sizeof(*b));
cudaMalloc( (void**) &d_b, memSize);
for(int j=0; j<n; j++){
a[j] = j;
b[j] = 0;
}
cudaMemcpy( d_a, a, memSize, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, b, memSize, cudaMemcpyHostToDevice);
cudaEventRecord(start);
dim3 blocksize(256);
dim3 gridsize((n+blocksize.x-1)/(blocksize.x));
kernel_add<<<gridsize, blocksize>>>(n, -3, d_a, d_b);
kernel_add<<<gridsize, blocksize>>>(n, -2, d_a, d_b);
kernel_add<<<gridsize, blocksize>>>(n, -1, d_a, d_b);
kernel_add<<<gridsize, blocksize>>>(n, 0, d_a, d_b);
kernel_add<<<gridsize, blocksize>>>(n, 1, d_a, d_b);
kernel_add<<<gridsize, blocksize>>>(n, 2, d_a, d_b);
kernel_add<<<gridsize, blocksize>>>(n, 3, d_a, d_b);
cudaEventRecord(stop);
cudaMemcpy( b, d_b, memSize, cudaMemcpyDeviceToHost);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("runtime [s]: %f\n", milliseconds/1000.0);
for(int j=0; j<10; j++)
printf("%d\n",b[j]);
cudaFree(d_a);
free(a);
cudaFree(d_b);
free(b);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
// Kernel adding entries of the adjacent array entries (radius of 3) of a 1D array
//
// initial approach
// * 7 kernels, each adding one element to the sum
// * data always read from main memory
__global__ void kernel_add(int n, int offset, int *a, int *b)
{
int i = blockDim.x*blockIdx.x+threadIdx.x;
int j = i + offset;
if( j>-1 && j<n ){
b[i]+=a[j];
}
}
int main() {
int n=2000000;
int memSize = n*sizeof(int);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int *a, *d_a;
a = (int*) malloc (n*sizeof(*a));
hipMalloc( (void**) &d_a, memSize);
int *b, *d_b;
b = (int*) malloc (n*sizeof(*b));
hipMalloc( (void**) &d_b, memSize);
for(int j=0; j<n; j++){
a[j] = j;
b[j] = 0;
}
hipMemcpy( d_a, a, memSize, hipMemcpyHostToDevice);
hipMemcpy( d_b, b, memSize, hipMemcpyHostToDevice);
hipEventRecord(start);
dim3 blocksize(256);
dim3 gridsize((n+blocksize.x-1)/(blocksize.x));
kernel_add<<<gridsize, blocksize>>>(n, -3, d_a, d_b);
kernel_add<<<gridsize, blocksize>>>(n, -2, d_a, d_b);
kernel_add<<<gridsize, blocksize>>>(n, -1, d_a, d_b);
kernel_add<<<gridsize, blocksize>>>(n, 0, d_a, d_b);
kernel_add<<<gridsize, blocksize>>>(n, 1, d_a, d_b);
kernel_add<<<gridsize, blocksize>>>(n, 2, d_a, d_b);
kernel_add<<<gridsize, blocksize>>>(n, 3, d_a, d_b);
hipEventRecord(stop);
hipMemcpy( b, d_b, memSize, hipMemcpyDeviceToHost);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("runtime [s]: %f\n", milliseconds/1000.0);
for(int j=0; j<10; j++)
printf("%d\n",b[j]);
hipFree(d_a);
free(a);
hipFree(d_b);
free(b);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__device__ float do_fraction(float numer, float denom) {
float result = 0.f;
if((numer == denom) && (numer != 0.f))
result = 1.f;
else if(denom != 0.f)
result = numer / denom;
return result;
}
__global__ void get_bin_scores(int nbins, int order, int nknots, float * knots, int nsamples, int nx, float * x, int pitch_x, float * bins, int pitch_bins)
{
int
col_x = blockDim.x * blockIdx.x + threadIdx.x;
if(col_x >= nx)
return;
float
ld, rd, z,
term1, term2,
* in_col = x + col_x * pitch_x,
* bin_col = bins + col_x * pitch_bins;
int i0;
for(int k = 0; k < nsamples; k++, bin_col += nbins) {
z = in_col[k];
i0 = (int)floorf(z) + order - 1;
if(i0 >= nbins)
i0 = nbins - 1;
bin_col[i0] = 1.f;
for(int i = 2; i <= order; i++) {
for(int j = i0 - i + 1; j <= i0; j++) {
rd = do_fraction(knots[j + i] - z, knots[j + i] - knots[j + 1]);
if((j < 0) || (j >= nbins) || (j >= nknots) || (j + i - 1 < 0) || (j > nknots))
term1 = 0.f;
else {
ld = do_fraction(z - knots[j],
knots[j + i - 1] - knots[j]);
term1 = ld * bin_col[j];
}
if((j + 1 < 0) || (j + 1 >= nbins) || (j + 1 >= nknots) || (j + i < 0) || (j + i >= nknots))
term2 = 0.f;
else {
rd = do_fraction(knots[j + i] - z,
knots[j + i] - knots[j + 1]);
term2 = rd * bin_col[j + 1];
}
bin_col[j] = term1 + term2;
}
}
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__device__ float do_fraction(float numer, float denom) {
float result = 0.f;
if((numer == denom) && (numer != 0.f))
result = 1.f;
else if(denom != 0.f)
result = numer / denom;
return result;
}
__global__ void get_bin_scores(int nbins, int order, int nknots, float * knots, int nsamples, int nx, float * x, int pitch_x, float * bins, int pitch_bins)
{
int
col_x = blockDim.x * blockIdx.x + threadIdx.x;
if(col_x >= nx)
return;
float
ld, rd, z,
term1, term2,
* in_col = x + col_x * pitch_x,
* bin_col = bins + col_x * pitch_bins;
int i0;
for(int k = 0; k < nsamples; k++, bin_col += nbins) {
z = in_col[k];
i0 = (int)floorf(z) + order - 1;
if(i0 >= nbins)
i0 = nbins - 1;
bin_col[i0] = 1.f;
for(int i = 2; i <= order; i++) {
for(int j = i0 - i + 1; j <= i0; j++) {
rd = do_fraction(knots[j + i] - z, knots[j + i] - knots[j + 1]);
if((j < 0) || (j >= nbins) || (j >= nknots) || (j + i - 1 < 0) || (j > nknots))
term1 = 0.f;
else {
ld = do_fraction(z - knots[j],
knots[j + i - 1] - knots[j]);
term1 = ld * bin_col[j];
}
if((j + 1 < 0) || (j + 1 >= nbins) || (j + 1 >= nknots) || (j + i < 0) || (j + i >= nknots))
term2 = 0.f;
else {
rd = do_fraction(knots[j + i] - z,
knots[j + i] - knots[j + 1]);
term2 = rd * bin_col[j + 1];
}
bin_col[j] = term1 + term2;
}
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/**
Research 4 Fun
metaCuda.cu
Purpose: Calculates the n-th Fibonacci number an the Factorial of a number
from CUDA + Template Meta-Programming
@author O. A. Riveros
@version 1.0 28 May 2014 Santiago Chile.
*/
#include <iostream>
#include <ctime>
using namespace std;
// Begin CUDA
///////////////
// Fibonacci //
///////////////
template<unsigned long N>
__device__ unsigned long cuMetaFibonacci() {
return cuMetaFibonacci<N - 1>() + cuMetaFibonacci<N - 2>();
}
template<>
__device__ unsigned long cuMetaFibonacci<0>() {
return 1;
}
template<>
__device__ unsigned long cuMetaFibonacci<1>() {
return 1;
}
template<>
__device__ unsigned long cuMetaFibonacci<2>() {
return 1;
}
template<unsigned long N>
__global__ void cuFibonacci(unsigned long *out) {
*out = cuMetaFibonacci<N>();
}
///////////////
// Factorial //
///////////////
template<unsigned long N>
__device__ unsigned long cuMetaFactorial() {
return N * cuMetaFactorial<N - 1>();
}
template<>
__device__ unsigned long cuMetaFactorial<1>() {
return 1;
}
template<unsigned long N>
__global__ void cuFactorial(unsigned long *out) {
*out = cuMetaFactorial<N>();
}
// End CUDA
int main() {
///////////////
// Fibonacci //
///////////////
size_t size = sizeof(unsigned long);
unsigned long h_out[] = { 0 };
unsigned long *d_out;
cudaMalloc((void **) &d_out, size);
cudaMemcpy(d_out, h_out, size, cudaMemcpyHostToDevice);
clock_t startTime = clock();
cuFibonacci<20> <<<1, 1>>>(d_out);
clock_t endTime = clock();
clock_t clockTicksTaken = endTime - startTime;
cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost);
cout << h_out[0] << endl;
cudaFree(d_out);
double timeInSeconds = clockTicksTaken / (double) CLOCKS_PER_SEC;
cout << timeInSeconds << endl;
///////////////
// Factorial //
///////////////
cudaMalloc((void **) &d_out, size);
cudaMemcpy(d_out, h_out, size, cudaMemcpyHostToDevice);
startTime = clock();
cuFactorial<20> <<<1, 1>>>(d_out);
endTime = clock();
clockTicksTaken = endTime - startTime;
cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost);
cout << h_out[0] << endl;
cudaFree(d_out);
timeInSeconds = clockTicksTaken / (double) CLOCKS_PER_SEC;
cout << timeInSeconds << endl;
}
// Original Output
// 11:56:05 Build Finished (took 16s.185ms)
// 6765
// 4.2e-05
// 2432902008176640000
// 9e-06
|
/**
Research 4 Fun
metaCuda.cu
Purpose: Calculates the n-th Fibonacci number an the Factorial of a number
from CUDA + Template Meta-Programming
@author O. A. Riveros
@version 1.0 28 May 2014 Santiago Chile.
*/
#include <hip/hip_runtime.h>
#include <iostream>
#include <ctime>
using namespace std;
// Begin CUDA
///////////////
// Fibonacci //
///////////////
template<unsigned long N>
__device__ unsigned long cuMetaFibonacci() {
return cuMetaFibonacci<N - 1>() + cuMetaFibonacci<N - 2>();
}
template<>
__device__ unsigned long cuMetaFibonacci<0>() {
return 1;
}
template<>
__device__ unsigned long cuMetaFibonacci<1>() {
return 1;
}
template<>
__device__ unsigned long cuMetaFibonacci<2>() {
return 1;
}
template<unsigned long N>
__global__ void cuFibonacci(unsigned long *out) {
*out = cuMetaFibonacci<N>();
}
///////////////
// Factorial //
///////////////
template<unsigned long N>
__device__ unsigned long cuMetaFactorial() {
return N * cuMetaFactorial<N - 1>();
}
template<>
__device__ unsigned long cuMetaFactorial<1>() {
return 1;
}
template<unsigned long N>
__global__ void cuFactorial(unsigned long *out) {
*out = cuMetaFactorial<N>();
}
// End CUDA
int main() {
///////////////
// Fibonacci //
///////////////
size_t size = sizeof(unsigned long);
unsigned long h_out[] = { 0 };
unsigned long *d_out;
hipMalloc((void **) &d_out, size);
hipMemcpy(d_out, h_out, size, hipMemcpyHostToDevice);
clock_t startTime = clock();
cuFibonacci<20> <<<1, 1>>>(d_out);
clock_t endTime = clock();
clock_t clockTicksTaken = endTime - startTime;
hipMemcpy(h_out, d_out, size, hipMemcpyDeviceToHost);
cout << h_out[0] << endl;
hipFree(d_out);
double timeInSeconds = clockTicksTaken / (double) CLOCKS_PER_SEC;
cout << timeInSeconds << endl;
///////////////
// Factorial //
///////////////
hipMalloc((void **) &d_out, size);
hipMemcpy(d_out, h_out, size, hipMemcpyHostToDevice);
startTime = clock();
cuFactorial<20> <<<1, 1>>>(d_out);
endTime = clock();
clockTicksTaken = endTime - startTime;
hipMemcpy(h_out, d_out, size, hipMemcpyDeviceToHost);
cout << h_out[0] << endl;
hipFree(d_out);
timeInSeconds = clockTicksTaken / (double) CLOCKS_PER_SEC;
cout << timeInSeconds << endl;
}
// Original Output
// 11:56:05 Build Finished (took 16s.185ms)
// 6765
// 4.2e-05
// 2432902008176640000
// 9e-06
|
Convert the following CUDA code to AMD GPU code:
cuda
/* infix to postfix expression conversion code
by Codingstreet.com */
#include<stdio.h>
#include<string.h>
int push(char *stack,char val,int *top,int *size);
int pop(char *stack,int *top);
int isstack_empty(int *top);
int isstack_full(int *top,int *size);
int isstack_empty(int *top){
if((*top)==0) return 1;
return 0;
}
int isstack_full(int *top,int *size){
if((*top)==(*size)-1) return 1;
return 0;
}
int push(char *stack,char val,int *top,int *size){
if(isstack_full(top,size)){
return 0;
}
stack[(*top)++]=val;
return 1;
}
int pop(char *stack,int *top){
if(isstack_empty(top)){
return -1;
}
else return stack[--(*top)];
}
int get_precedence(char c){
switch(c){
case '+':
case '-':
return 1;
case '*':
case '/':
case '%':
return 2;
case '^':
return 0;
case '(':
return -1;
default :
return -2;
}
}
void infix_to_postfix(char *instring,char *outstring){
int i=0,top,size,pred1,pred2,n=0;
char c,c2;
int len=strlen(instring);
if(instring==NULL) return;
char *stack=(char*)malloc(sizeof(char)*(len-1));
top=0;size=len-1;
while(instring[i]!='\0'){
c=instring[i];
if(c==' ') {i++;continue; }
else if(c=='('){
push(stack,c,&top,&size);
}
else if(c=='+' || c=='-' || c=='*' || c=='/' || c=='%'||c=='^'){
if(isstack_empty(&top)) {
push(stack,c,&top,&size);
}
else {
pred1=get_precedence(stack[top-1]);
pred2=get_precedence(c);
while(pred2<=pred1 && !isstack_empty(&top)){
c2=pop(stack,&top);
outstring[n]=c2;
n++;
pred2=get_precedence(stack[top-1]);
pred1=get_precedence(c);
}
push(stack,c,&top,&size);
}
}
else if(c==')'){
while(stack[top-1]!='('){
c2=pop(stack,&top);
outstring[n]=c2;
n++;
}
pop(stack,&top);
}
else {
outstring[n]=c;
n++;
}
i++;
}
while(!isstack_empty(&top)){
c=pop(stack,&top);
outstring[n]=c;
n++;
}
outstring[n]='\0';
}
/*int main(){
char str[]="((a+t)*((b+(a+c))^(c+d)))";
char out[100];
infix_to_postfix(str,out);
printf("\nInput string :%s \nOutput: %s ",str,out);
return 0;
}*/
|
/* infix to postfix expression conversion code
by Codingstreet.com */
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<string.h>
int push(char *stack,char val,int *top,int *size);
int pop(char *stack,int *top);
int isstack_empty(int *top);
int isstack_full(int *top,int *size);
int isstack_empty(int *top){
if((*top)==0) return 1;
return 0;
}
int isstack_full(int *top,int *size){
if((*top)==(*size)-1) return 1;
return 0;
}
int push(char *stack,char val,int *top,int *size){
if(isstack_full(top,size)){
return 0;
}
stack[(*top)++]=val;
return 1;
}
int pop(char *stack,int *top){
if(isstack_empty(top)){
return -1;
}
else return stack[--(*top)];
}
int get_precedence(char c){
switch(c){
case '+':
case '-':
return 1;
case '*':
case '/':
case '%':
return 2;
case '^':
return 0;
case '(':
return -1;
default :
return -2;
}
}
void infix_to_postfix(char *instring,char *outstring){
int i=0,top,size,pred1,pred2,n=0;
char c,c2;
int len=strlen(instring);
if(instring==NULL) return;
char *stack=(char*)malloc(sizeof(char)*(len-1));
top=0;size=len-1;
while(instring[i]!='\0'){
c=instring[i];
if(c==' ') {i++;continue; }
else if(c=='('){
push(stack,c,&top,&size);
}
else if(c=='+' || c=='-' || c=='*' || c=='/' || c=='%'||c=='^'){
if(isstack_empty(&top)) {
push(stack,c,&top,&size);
}
else {
pred1=get_precedence(stack[top-1]);
pred2=get_precedence(c);
while(pred2<=pred1 && !isstack_empty(&top)){
c2=pop(stack,&top);
outstring[n]=c2;
n++;
pred2=get_precedence(stack[top-1]);
pred1=get_precedence(c);
}
push(stack,c,&top,&size);
}
}
else if(c==')'){
while(stack[top-1]!='('){
c2=pop(stack,&top);
outstring[n]=c2;
n++;
}
pop(stack,&top);
}
else {
outstring[n]=c;
n++;
}
i++;
}
while(!isstack_empty(&top)){
c=pop(stack,&top);
outstring[n]=c;
n++;
}
outstring[n]='\0';
}
/*int main(){
char str[]="((a+t)*((b+(a+c))^(c+d)))";
char out[100];
infix_to_postfix(str,out);
printf("\nInput string :%s \nOutput: %s ",str,out);
return 0;
}*/
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void matrixAddKernel3(float* ans, float* M, float* N, int size) {
int col = blockIdx.x*blockDim.x + threadIdx.x;
if(col < size) {
for(int i = 0; i < size; ++i)
ans[i*size + col] = M[i*size + col] + N[i*size + col];
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void matrixAddKernel3(float* ans, float* M, float* N, int size) {
int col = blockIdx.x*blockDim.x + threadIdx.x;
if(col < size) {
for(int i = 0; i < size; ++i)
ans[i*size + col] = M[i*size + col] + N[i*size + col];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
__global__ void image_conversion(unsigned char *colorImage, unsigned char *grayImage, long long imageWidth, long long imageHeight)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x*blockDim.y + y;
if(idx<imageWidth*imageHeight)
{
int r, g, b;
r = colorImage[3*idx];
g = colorImage[3*idx + 1];
b = colorImage[3*idx + 2];
grayImage[idx] = (unsigned char)((21*r + 71*g + 7*b)/100);
//grayImage[idx] = (pixel);
//grayImage[3*idx+1] = grayImage[3*idx+2] = grayImage[3*idx] = pixel;
}
}
int main( int argc, char* argv[] )
{
FILE *fptr = fopen("parallel_image_conversion.txt", "w");
//fprintf(fptr, "imageHeight x imageWidth \t Time(milli) \n", );
int i;
for (i = 7; i < 28; i++)
{
unsigned char *colorImage_cpu;
unsigned char *grayImage_cpu;
char header[100];
long long imageWidth, imageHeight, ccv;
char filename[50];
snprintf(filename, sizeof(filename), "Lenna_%d.ppm", i);
FILE *color = fopen(filename, "rb");
fscanf(color, "%s\n%lld %lld\n%lld\n", header, &imageWidth, &imageHeight, &ccv);
size_t bytes = imageWidth*imageHeight*sizeof(unsigned char);
colorImage_cpu = (unsigned char*)malloc(bytes*3);
grayImage_cpu = (unsigned char*)malloc(bytes);
FILE *gray = fopen("gray.ppm", "wb");
fprintf(gray, "P5\n%d %d\n255\n", imageWidth, imageHeight);
fread(colorImage_cpu, sizeof(unsigned char), imageWidth*imageHeight*3, color);
fclose(color);
//for(int j=0; j<imageWidth*imageHeight*3; j++)
// printf("%d ", colorImage_cpu[j]);
unsigned char *colorImage_gpu;
unsigned char *grayImage_gpu;
cudaMalloc(&colorImage_gpu, bytes*3);
cudaMalloc(&grayImage_gpu, bytes);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(colorImage_gpu, colorImage_cpu, bytes*3, cudaMemcpyHostToDevice);
cudaMemcpy(grayImage_gpu, grayImage_cpu, bytes, cudaMemcpyHostToDevice);
dim3 blocksize(32, 32);
dim3 gridsize((int)ceil((float)imageHeight/32.0),(int)ceil((float)imageWidth/32.0));
//printf("%d %d\n",(int)ceil((float)imageHeight/32),(int)ceil((float)imageWidth/32));
cudaEventRecord(start);
image_conversion<<<gridsize, blocksize>>>(colorImage_gpu, grayImage_gpu, imageWidth, imageHeight);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaMemcpy( grayImage_cpu, grayImage_gpu, bytes, cudaMemcpyDeviceToHost );
cudaFree(colorImage_gpu);
cudaFree(grayImage_gpu);
fprintf(fptr, "%d %ldx%ld %lf\n", i, imageHeight, imageWidth, milliseconds);
//for(int j=0; j<imageWidth*imageHeight; j++)
// printf("%hhu ", grayImage_cpu[j]);
fwrite(grayImage_cpu, sizeof(unsigned char), imageWidth*imageHeight, gray);
fclose(gray);
free(colorImage_cpu);
free(grayImage_cpu);
}
fclose(fptr);
return 0;
}
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
__global__ void image_conversion(unsigned char *colorImage, unsigned char *grayImage, long long imageWidth, long long imageHeight)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x*blockDim.y + y;
if(idx<imageWidth*imageHeight)
{
int r, g, b;
r = colorImage[3*idx];
g = colorImage[3*idx + 1];
b = colorImage[3*idx + 2];
grayImage[idx] = (unsigned char)((21*r + 71*g + 7*b)/100);
//grayImage[idx] = (pixel);
//grayImage[3*idx+1] = grayImage[3*idx+2] = grayImage[3*idx] = pixel;
}
}
int main( int argc, char* argv[] )
{
FILE *fptr = fopen("parallel_image_conversion.txt", "w");
//fprintf(fptr, "imageHeight x imageWidth \t Time(milli) \n", );
int i;
for (i = 7; i < 28; i++)
{
unsigned char *colorImage_cpu;
unsigned char *grayImage_cpu;
char header[100];
long long imageWidth, imageHeight, ccv;
char filename[50];
snprintf(filename, sizeof(filename), "Lenna_%d.ppm", i);
FILE *color = fopen(filename, "rb");
fscanf(color, "%s\n%lld %lld\n%lld\n", header, &imageWidth, &imageHeight, &ccv);
size_t bytes = imageWidth*imageHeight*sizeof(unsigned char);
colorImage_cpu = (unsigned char*)malloc(bytes*3);
grayImage_cpu = (unsigned char*)malloc(bytes);
FILE *gray = fopen("gray.ppm", "wb");
fprintf(gray, "P5\n%d %d\n255\n", imageWidth, imageHeight);
fread(colorImage_cpu, sizeof(unsigned char), imageWidth*imageHeight*3, color);
fclose(color);
//for(int j=0; j<imageWidth*imageHeight*3; j++)
// printf("%d ", colorImage_cpu[j]);
unsigned char *colorImage_gpu;
unsigned char *grayImage_gpu;
hipMalloc(&colorImage_gpu, bytes*3);
hipMalloc(&grayImage_gpu, bytes);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMemcpy(colorImage_gpu, colorImage_cpu, bytes*3, hipMemcpyHostToDevice);
hipMemcpy(grayImage_gpu, grayImage_cpu, bytes, hipMemcpyHostToDevice);
dim3 blocksize(32, 32);
dim3 gridsize((int)ceil((float)imageHeight/32.0),(int)ceil((float)imageWidth/32.0));
//printf("%d %d\n",(int)ceil((float)imageHeight/32),(int)ceil((float)imageWidth/32));
hipEventRecord(start);
image_conversion<<<gridsize, blocksize>>>(colorImage_gpu, grayImage_gpu, imageWidth, imageHeight);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipMemcpy( grayImage_cpu, grayImage_gpu, bytes, hipMemcpyDeviceToHost );
hipFree(colorImage_gpu);
hipFree(grayImage_gpu);
fprintf(fptr, "%d %ldx%ld %lf\n", i, imageHeight, imageWidth, milliseconds);
//for(int j=0; j<imageWidth*imageHeight; j++)
// printf("%hhu ", grayImage_cpu[j]);
fwrite(grayImage_cpu, sizeof(unsigned char), imageWidth*imageHeight, gray);
fclose(gray);
free(colorImage_cpu);
free(grayImage_cpu);
}
fclose(fptr);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <thrust/device_vector.h>
//#include<iostream>
//using std::cout;
//using std::endl;
//functor
struct Smooth {
//starting points of x, y, and m
thrust::device_vector<float>::iterator dmIt;
thrust::device_vector<float>::iterator dxIt;
thrust::device_vector<float>::iterator dyIt;
int n;
float h;
//will be the arrays for x, y, and m after converted
//from the iterators
float *x, *y, *m;
//constructor for Smooth
Smooth(thrust::device_vector<float>::iterator dm,
thrust::device_vector<float>::iterator dx,
thrust::device_vector<float>::iterator dy,
int _n, float _h):
dmIt(dm), dxIt(dx), dyIt(dy), n(_n), h(_h) {
//convert iterators to arrays so we can use brackets []
m = thrust::raw_pointer_cast(&dm[0]);
x = thrust::raw_pointer_cast(&dx[0]);
y = thrust::raw_pointer_cast(&dy[0]);
};
__device__ float myAbs(float value) {
return (value < 0) ? (value * -1) : value;
} //end abs function
//overloaded operator function called implicity from for_each call
__device__
void operator() (const int me) {
float xi = x[me];
float sum = 0;
float count = 0;
for(int j=0; j<n; j++){
//now iterate through the j values
float xj = x[j]; //not needed - used for better visibility
if(myAbs(xj-xi) < h){
sum += y[j];
count++;
} //end abs if condition
} //end j for loop
//store the average of me in m
m[me] = sum / count;
}
};
void smootht(float *x, float *y, float *m, int n, float h){
//copy data to the device
thrust::device_vector<float> dx(x,x+n);
thrust::device_vector<float> dy(y,y+n);
//setup output vector
thrust::device_vector<float> dm(n);
//sequence iterators to go through for_each
thrust::counting_iterator<float> seqb(0);
thrust::counting_iterator<float> seqe = seqb + n;
//loop through x, y, and m and find averages on device
thrust::for_each(seqb, seqe, Smooth(dm.begin(), dx.begin(), dy.begin(), n, h));
//copy averages from device to m
thrust::copy(dm.begin(), dm.end(), m);
}
/*
int main(void) {
int n = 600000;
float x[n];
float y[n];
float h = 0.1;
float xcount = 15000.0;
//float ycount = 15.0;
for(int i = 0; i < n; i++) {
x[i] = y[i] = xcount;
xcount -= 0.0001;
}
//return array
float m[n];
smootht(x, y, m, n, h);
for(int i = 0; i < n; i++) {
cout<<m[i]<<" ";
} cout<<endl;
}
*/
|
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
//#include<iostream>
//using std::cout;
//using std::endl;
//functor
struct Smooth {
//starting points of x, y, and m
thrust::device_vector<float>::iterator dmIt;
thrust::device_vector<float>::iterator dxIt;
thrust::device_vector<float>::iterator dyIt;
int n;
float h;
//will be the arrays for x, y, and m after converted
//from the iterators
float *x, *y, *m;
//constructor for Smooth
Smooth(thrust::device_vector<float>::iterator dm,
thrust::device_vector<float>::iterator dx,
thrust::device_vector<float>::iterator dy,
int _n, float _h):
dmIt(dm), dxIt(dx), dyIt(dy), n(_n), h(_h) {
//convert iterators to arrays so we can use brackets []
m = thrust::raw_pointer_cast(&dm[0]);
x = thrust::raw_pointer_cast(&dx[0]);
y = thrust::raw_pointer_cast(&dy[0]);
};
__device__ float myAbs(float value) {
return (value < 0) ? (value * -1) : value;
} //end abs function
//overloaded operator function called implicity from for_each call
__device__
void operator() (const int me) {
float xi = x[me];
float sum = 0;
float count = 0;
for(int j=0; j<n; j++){
//now iterate through the j values
float xj = x[j]; //not needed - used for better visibility
if(myAbs(xj-xi) < h){
sum += y[j];
count++;
} //end abs if condition
} //end j for loop
//store the average of me in m
m[me] = sum / count;
}
};
void smootht(float *x, float *y, float *m, int n, float h){
//copy data to the device
thrust::device_vector<float> dx(x,x+n);
thrust::device_vector<float> dy(y,y+n);
//setup output vector
thrust::device_vector<float> dm(n);
//sequence iterators to go through for_each
thrust::counting_iterator<float> seqb(0);
thrust::counting_iterator<float> seqe = seqb + n;
//loop through x, y, and m and find averages on device
thrust::for_each(seqb, seqe, Smooth(dm.begin(), dx.begin(), dy.begin(), n, h));
//copy averages from device to m
thrust::copy(dm.begin(), dm.end(), m);
}
/*
int main(void) {
int n = 600000;
float x[n];
float y[n];
float h = 0.1;
float xcount = 15000.0;
//float ycount = 15.0;
for(int i = 0; i < n; i++) {
x[i] = y[i] = xcount;
xcount -= 0.0001;
}
//return array
float m[n];
smootht(x, y, m, n, h);
for(int i = 0; i < n; i++) {
cout<<m[i]<<" ";
} cout<<endl;
}
*/
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include <stdio.h>
/**
* Given a device array of integers, compute the index of first nonzero
* entry in the array, from left to right.
*
* For example, opearting on the array
*
* 0 1 2 3 4 5 6
* [0, 0, 0, -1, 0, 0, 2]
*
* gets the index 3. The result is stored into deg_ptr (initial value is n).
*
*/
template <int N_THD>
__global__ void degree_ker(const int *X, int n, int* deg_ptr) {
int tid = blockIdx.x * N_THD + threadIdx.x;
if ((tid < n) && (X[tid] != 0)) {
atomicMin(deg_ptr, tid);
}
}
using namespace std;
int main(int argc, char** argv) {
int n = 30;
if (argc > 1) n = atoi(argv[1]);
int *X = new int[n+1]();
srand(time(NULL));
int r = rand() % n + 1;
for (int i = 0; i < n; ++i) { X[i] = i / r; }
X[n] = n;
//for (int i = 0; i <= n; ++i) printf("%2d ", i);
//printf("\n");
//for (int i = 0; i <= n; ++i) printf("%2d ", X[i]);
//printf("\n");
int *X_d;
cudaMalloc((void **)&X_d, sizeof(int)*(n+1));
cudaMemcpy(X_d, X, sizeof(int)*(n+1), cudaMemcpyHostToDevice);
const int nthd = 16;
int nb = (n / nthd) + ((n % nthd) ? 1 : 0);
int *deg_dev = X_d + n;
degree_ker<nthd><<<nb, nthd>>>(X_d, n, deg_dev);
int deg;
cudaMemcpy(°, deg_dev, sizeof(int), cudaMemcpyDeviceToHost);
printf("r = %d, index = %d\n", r, deg);
delete [] X;
cudaFree(X_d);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
/**
* Given a device array of integers, compute the index of first nonzero
* entry in the array, from left to right.
*
* For example, opearting on the array
*
* 0 1 2 3 4 5 6
* [0, 0, 0, -1, 0, 0, 2]
*
* gets the index 3. The result is stored into deg_ptr (initial value is n).
*
*/
template <int N_THD>
__global__ void degree_ker(const int *X, int n, int* deg_ptr) {
int tid = blockIdx.x * N_THD + threadIdx.x;
if ((tid < n) && (X[tid] != 0)) {
atomicMin(deg_ptr, tid);
}
}
using namespace std;
int main(int argc, char** argv) {
int n = 30;
if (argc > 1) n = atoi(argv[1]);
int *X = new int[n+1]();
srand(time(NULL));
int r = rand() % n + 1;
for (int i = 0; i < n; ++i) { X[i] = i / r; }
X[n] = n;
//for (int i = 0; i <= n; ++i) printf("%2d ", i);
//printf("\n");
//for (int i = 0; i <= n; ++i) printf("%2d ", X[i]);
//printf("\n");
int *X_d;
hipMalloc((void **)&X_d, sizeof(int)*(n+1));
hipMemcpy(X_d, X, sizeof(int)*(n+1), hipMemcpyHostToDevice);
const int nthd = 16;
int nb = (n / nthd) + ((n % nthd) ? 1 : 0);
int *deg_dev = X_d + n;
degree_ker<nthd><<<nb, nthd>>>(X_d, n, deg_dev);
int deg;
hipMemcpy(°, deg_dev, sizeof(int), hipMemcpyDeviceToHost);
printf("r = %d, index = %d\n", r, deg);
delete [] X;
hipFree(X_d);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void CalculateDiffSample( float *cur, float *pre, const int wts, const int hts ){
const int yts = blockIdx.y * blockDim.y + threadIdx.y;
const int xts = blockIdx.x * blockDim.x + threadIdx.x;
const int curst = wts * yts + xts;
if (yts < hts && xts < wts){
cur[curst*3+0] -= pre[curst*3+0];
cur[curst*3+1] -= pre[curst*3+1];
cur[curst*3+2] -= pre[curst*3+2];
pre[curst*3+0] = 0;
pre[curst*3+1] = 0;
pre[curst*3+2] = 0;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void CalculateDiffSample( float *cur, float *pre, const int wts, const int hts ){
const int yts = blockIdx.y * blockDim.y + threadIdx.y;
const int xts = blockIdx.x * blockDim.x + threadIdx.x;
const int curst = wts * yts + xts;
if (yts < hts && xts < wts){
cur[curst*3+0] -= pre[curst*3+0];
cur[curst*3+1] -= pre[curst*3+1];
cur[curst*3+2] -= pre[curst*3+2];
pre[curst*3+0] = 0;
pre[curst*3+1] = 0;
pre[curst*3+2] = 0;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
__global__ void kernel(int *out, int *in, const int n)
{
unsigned int i = threadIdx.x;
if (i < n) {
out[i] = in[i] * 2;
}
}
int main(){
thrust::host_vector<int> hVectorIn(1024);
for(int i=0;i<1024;i++){
hVectorIn[i]=i;
}
thrust::device_vector<int> dVectorIn(1024);
thrust::device_vector<int> dVectorOut(1024);
dVectorIn=hVectorIn;
kernel<<<1,1024>>>(
thrust::raw_pointer_cast(dVectorOut.data()),
thrust::raw_pointer_cast(dVectorIn.data()),
1024);
thrust::host_vector<int> hVectorOut = dVectorOut;
for(int i=0;i<1024;i++){
std::cout << i << " " << hVectorOut[i] << " " << hVectorIn[i] << std::endl;
}
}
|
#include <hip/hip_runtime.h>
#include "thrust/host_vector.h"
#include "thrust/device_vector.h"
__global__ void kernel(int *out, int *in, const int n)
{
unsigned int i = threadIdx.x;
if (i < n) {
out[i] = in[i] * 2;
}
}
int main(){
thrust::host_vector<int> hVectorIn(1024);
for(int i=0;i<1024;i++){
hVectorIn[i]=i;
}
thrust::device_vector<int> dVectorIn(1024);
thrust::device_vector<int> dVectorOut(1024);
dVectorIn=hVectorIn;
kernel<<<1,1024>>>(
thrust::raw_pointer_cast(dVectorOut.data()),
thrust::raw_pointer_cast(dVectorIn.data()),
1024);
thrust::host_vector<int> hVectorOut = dVectorOut;
for(int i=0;i<1024;i++){
std::cout << i << " " << hVectorOut[i] << " " << hVectorIn[i] << std::endl;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void box_iou_cuda_kernel(float *box_iou, float4 *box1, float4 *box2, long M, long N, int idxJump) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
size_t b1_idx, b2_idx, b1_row_offset, b2_row_offset;
float xmin1, xmin2, xmax1, xmax2, ymin1, ymin2, ymax1, ymax2;
float x_tl, y_tl, x_br, y_br, w, h, inter, area1, area2, iou;
for (long i = idx; i < M * N; i += idxJump){
b1_idx = i / N;
b2_idx = i % N;
b1_row_offset = b1_idx;
b2_row_offset = b2_idx;
xmin1 = box1[b1_row_offset].x;
ymin1 = box1[b1_row_offset].y;
xmax1 = box1[b1_row_offset].z;
ymax1 = box1[b1_row_offset].w;
xmin2 = box2[b2_row_offset].x;
ymin2 = box2[b2_row_offset].y;
xmax2 = box2[b2_row_offset].z;
ymax2 = box2[b2_row_offset].w;
x_tl = fmaxf(xmin1, xmin2);
y_tl = fmaxf(ymin1, ymin2);
x_br = fminf(xmax1, xmax2);
y_br = fminf(ymax1, ymax2);
w = (x_br - x_tl + 1) < 0 ? 0.0f : (x_br - x_tl + 1);
h = (y_br - y_tl + 1) < 0 ? 0.0f : (y_br - y_tl + 1);
inter = w * h;
area1 = (xmax1 - xmin1 + 1) * (ymax1 - ymin1 + 1);
area2 = (xmax2 - xmin2 + 1) * (ymax2 - ymin2 + 1);
iou = inter / (area1 + area2 - inter);
box_iou[b1_idx * N + b2_idx] = iou;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void box_iou_cuda_kernel(float *box_iou, float4 *box1, float4 *box2, long M, long N, int idxJump) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
size_t b1_idx, b2_idx, b1_row_offset, b2_row_offset;
float xmin1, xmin2, xmax1, xmax2, ymin1, ymin2, ymax1, ymax2;
float x_tl, y_tl, x_br, y_br, w, h, inter, area1, area2, iou;
for (long i = idx; i < M * N; i += idxJump){
b1_idx = i / N;
b2_idx = i % N;
b1_row_offset = b1_idx;
b2_row_offset = b2_idx;
xmin1 = box1[b1_row_offset].x;
ymin1 = box1[b1_row_offset].y;
xmax1 = box1[b1_row_offset].z;
ymax1 = box1[b1_row_offset].w;
xmin2 = box2[b2_row_offset].x;
ymin2 = box2[b2_row_offset].y;
xmax2 = box2[b2_row_offset].z;
ymax2 = box2[b2_row_offset].w;
x_tl = fmaxf(xmin1, xmin2);
y_tl = fmaxf(ymin1, ymin2);
x_br = fminf(xmax1, xmax2);
y_br = fminf(ymax1, ymax2);
w = (x_br - x_tl + 1) < 0 ? 0.0f : (x_br - x_tl + 1);
h = (y_br - y_tl + 1) < 0 ? 0.0f : (y_br - y_tl + 1);
inter = w * h;
area1 = (xmax1 - xmin1 + 1) * (ymax1 - ymin1 + 1);
area2 = (xmax2 - xmin2 + 1) * (ymax2 - ymin2 + 1);
iou = inter / (area1 + area2 - inter);
box_iou[b1_idx * N + b2_idx] = iou;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cuda_runtime.h>
#include <iostream>
#include <stdlib.h>
#include <ctime>
#include <unistd.h>
//workers computing square of rands
__global__ void kerSquare(int *randsDev,int* resDev){
int myId = blockIdx.x * blockDim.x + threadIdx.x;
//std::cout << myId << ", ";
resDev[myId] = randsDev[myId] * randsDev[myId];
}
int main(){
std::srand(std::time(NULL));
int n = 20000;
int arraySize=2000;
int size = arraySize*sizeof(int);
int *randsDev, *resDev, tmp[arraySize], finalRes[n], offs;
int count;
int random_variable;
unsigned int microseconds = 400;
if(microseconds<=400){
cudaMalloc(&resDev, size);
dim3 grid(10,1);
dim3 block(200,1);
for(int i=0;i<n;i+=1){
//emitter
random_variable = std::rand()%100;
tmp[count]=random_variable;
count+=1;
usleep(microseconds);
if(count==arraySize){
cudaMalloc(&randsDev, size);
std::cout << "copying randoms to device mem"<< std::endl;
cudaMemcpy(randsDev, tmp, size, cudaMemcpyHostToDevice);
//worker
std::cout << "calling ker function"<< std::endl;
kerSquare<<<grid,block>>>(randsDev, resDev);
//collector
cudaMemcpy(tmp, resDev, size, cudaMemcpyDeviceToHost);
std::cout << std::endl << "copying back results"<< std::endl;
offs+=count;
std::copy(tmp, tmp+arraySize-1, finalRes+offs);
count=0;
}
}
}
else{
//Execute on CPU?
}
for(int i=0;i<n;i+=1){
std::cout<<"\t"<<finalRes[i];
}
std::cout<<std::endl;
//free mem
cudaFree(randsDev);
cudaFree(resDev);
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdlib.h>
#include <ctime>
#include <unistd.h>
//workers computing square of rands
__global__ void kerSquare(int *randsDev,int* resDev){
int myId = blockIdx.x * blockDim.x + threadIdx.x;
//std::cout << myId << ", ";
resDev[myId] = randsDev[myId] * randsDev[myId];
}
int main(){
std::srand(std::time(NULL));
int n = 20000;
int arraySize=2000;
int size = arraySize*sizeof(int);
int *randsDev, *resDev, tmp[arraySize], finalRes[n], offs;
int count;
int random_variable;
unsigned int microseconds = 400;
if(microseconds<=400){
hipMalloc(&resDev, size);
dim3 grid(10,1);
dim3 block(200,1);
for(int i=0;i<n;i+=1){
//emitter
random_variable = std::rand()%100;
tmp[count]=random_variable;
count+=1;
usleep(microseconds);
if(count==arraySize){
hipMalloc(&randsDev, size);
std::cout << "copying randoms to device mem"<< std::endl;
hipMemcpy(randsDev, tmp, size, hipMemcpyHostToDevice);
//worker
std::cout << "calling ker function"<< std::endl;
kerSquare<<<grid,block>>>(randsDev, resDev);
//collector
hipMemcpy(tmp, resDev, size, hipMemcpyDeviceToHost);
std::cout << std::endl << "copying back results"<< std::endl;
offs+=count;
std::copy(tmp, tmp+arraySize-1, finalRes+offs);
count=0;
}
}
}
else{
//Execute on CPU?
}
for(int i=0;i<n;i+=1){
std::cout<<"\t"<<finalRes[i];
}
std::cout<<std::endl;
//free mem
hipFree(randsDev);
hipFree(resDev);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include<time.h>
#define PerThread 1024*4*8//每个线程计算多少个i
#define N 64*256*1024*4//积分计算PI总共划分为这么多项相加
#define BlockNum 32 //block的数量
#define ThreadNum 64 //每个block中threads的数量
__global__ void Gpu_calPI(double* Gpu_list)
{ //核函数
int tid=blockIdx.x*blockDim.x*blockDim.y+threadIdx.x;//计算线程号
int begin=tid*PerThread;
int end=begin+PerThread-1;//计算每个线程的工作范围
double temp=0;
for(int i=begin;i<end;i++){
temp+=4.0/(1+((i+0.5)/(N))*((i+0.5)/(N)));
}
Gpu_list[tid]=temp;//存入计算结果
}
int main(void)
{
double * cpu_list;
double * Gpu_list;
double outcome=0;
cpu_list=(double*)malloc(sizeof(double)*BlockNum*ThreadNum);
cudaMalloc((void**)&Gpu_list,sizeof(double)*BlockNum*ThreadNum);
// dim3 blocksize=dim3(1,ThreadNum);
// dim3 gridsize=dim3(1,BlockNum);
double begin=clock();
Gpu_calPI<<<BlockNum,ThreadNum>>>(Gpu_list);
cudaMemcpy(cpu_list,Gpu_list,sizeof(double)*BlockNum*ThreadNum,cudaMemcpyDeviceToHost);
for(int i=0;i<BlockNum*ThreadNum;i++){
outcome+=cpu_list[i];
}
outcome=outcome/(N);
double end=clock();
printf("Cu1: N=%d, outcome=%.10f, time =%.10f\n",N,outcome,(end-begin)/(CLOCKS_PER_SEC));
// printf("block x=%d,y=%d\n",blocksize.x,blocksize.y);
// printf("grid x=%d,y=%d\n",gridsize.x,gridsize.y);
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include<time.h>
#define PerThread 1024*4*8//每个线程计算多少个i
#define N 64*256*1024*4//积分计算PI总共划分为这么多项相加
#define BlockNum 32 //block的数量
#define ThreadNum 64 //每个block中threads的数量
__global__ void Gpu_calPI(double* Gpu_list)
{ //核函数
int tid=blockIdx.x*blockDim.x*blockDim.y+threadIdx.x;//计算线程号
int begin=tid*PerThread;
int end=begin+PerThread-1;//计算每个线程的工作范围
double temp=0;
for(int i=begin;i<end;i++){
temp+=4.0/(1+((i+0.5)/(N))*((i+0.5)/(N)));
}
Gpu_list[tid]=temp;//存入计算结果
}
int main(void)
{
double * cpu_list;
double * Gpu_list;
double outcome=0;
cpu_list=(double*)malloc(sizeof(double)*BlockNum*ThreadNum);
hipMalloc((void**)&Gpu_list,sizeof(double)*BlockNum*ThreadNum);
// dim3 blocksize=dim3(1,ThreadNum);
// dim3 gridsize=dim3(1,BlockNum);
double begin=clock();
Gpu_calPI<<<BlockNum,ThreadNum>>>(Gpu_list);
hipMemcpy(cpu_list,Gpu_list,sizeof(double)*BlockNum*ThreadNum,hipMemcpyDeviceToHost);
for(int i=0;i<BlockNum*ThreadNum;i++){
outcome+=cpu_list[i];
}
outcome=outcome/(N);
double end=clock();
printf("Cu1: N=%d, outcome=%.10f, time =%.10f\n",N,outcome,(end-begin)/(CLOCKS_PER_SEC));
// printf("block x=%d,y=%d\n",blocksize.x,blocksize.y);
// printf("grid x=%d,y=%d\n",gridsize.x,gridsize.y);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <fcntl.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
typedef struct
{
long time;
double open;
double high;
double low;
double close;
double volume;
} Minute;
typedef struct
{
int nbrMinutes;
Minute *minutes;
} Data;
typedef struct
{
long seed;
long res;
} Worker;
__global__ void bake(Data data, Worker *workers)
{
int workerNbr = threadIdx.x + blockIdx.x * blockDim.x;
workers[workerNbr].res = (workerNbr * 5 / 2 + 50 / 3 * 5) % 52 == 0 ? 1 : 0;
// for (int i = 0; i < data.nbrMinutes * 0.1; i++)
// {
// if (data.minutes[i].open > workers[workerNbr].seed)
// {
// }
// }
}
Data loadMinutes(char *path)
{
Data data;
int fd = open(path, O_RDONLY);
struct stat buf;
fstat(fd, &buf);
off_t size = buf.st_size;
cudaMallocManaged(&data.minutes, size);
int rd = read(fd, data.minutes, size);
if (rd <= 0)
{
printf("ERROR LOAD FILE\n");
exit(0);
}
data.nbrMinutes = size / sizeof(Minute);
return data;
}
void printMinute(Minute *minute)
{
printf("%ld OPEN: %-10.5lf HIGH: %-10.5lf LOW: %-10.5lf CLOSE: %-10.5lf VOLUME: %-10.5lf\n",
minute->time, minute->open, minute->high, minute->low,
minute->close, minute->volume);
}
void searchPike(Data data)
{
printf("%d\n", data.nbrMinutes);
int founds = 0;
for (int i = 40; i < data.nbrMinutes - 40; i++)
{
double chien = data.minutes[i].open / data.minutes[i + 20].open;
if (chien > 1.025 || chien < 0.975)
{
printf("%lf %d\n", chien, founds);
founds += 1;
}
}
}
int main()
{
Data data = loadMinutes("./data");
// searchPike(data);
int nbrX = 4096 * 8;
int nbrY = 1024;
int nbrThreads = nbrX * nbrY;
// Worker *ramWorkers;
// malloc(ramWorkers, nbrThreads * sizeof(Worker));
Worker *workers;
cudaMalloc(&workers, nbrThreads * sizeof(Worker));
// workers = (Worker *)malloc(nbrThreads * sizeof(Worker));
// cudaMallocManaged(&workers, nbrThreads * sizeof(Worker));
for (long i = 0; 1; i++)
{
bake<<<nbrX, nbrY>>>(data, workers);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
if (i % 10 == 0)
{
printf("DONE %ld - %ld B\n", i, i * nbrX * nbrY / 1000000000);
}
if (i * nbrX * nbrY / 1000000000 >= 100){
// break;
}
}
// for (long i = 0; 1; i++)
// {
// long chien = i * 5 / 2 + 50 / 3 * 5;
// // workers[i % 2 == 0 ? 0 : 1] = chien;
// workers[1].res = chien % 52 == 0 ? 1 : 0;
// if (i % 1000000 == 0)
// {
// printf("DONE %ldM\n", i / 1000000);
// }
// }
// printMinute(&data.minutes[0]);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <fcntl.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
typedef struct
{
long time;
double open;
double high;
double low;
double close;
double volume;
} Minute;
typedef struct
{
int nbrMinutes;
Minute *minutes;
} Data;
typedef struct
{
long seed;
long res;
} Worker;
__global__ void bake(Data data, Worker *workers)
{
int workerNbr = threadIdx.x + blockIdx.x * blockDim.x;
workers[workerNbr].res = (workerNbr * 5 / 2 + 50 / 3 * 5) % 52 == 0 ? 1 : 0;
// for (int i = 0; i < data.nbrMinutes * 0.1; i++)
// {
// if (data.minutes[i].open > workers[workerNbr].seed)
// {
// }
// }
}
Data loadMinutes(char *path)
{
Data data;
int fd = open(path, O_RDONLY);
struct stat buf;
fstat(fd, &buf);
off_t size = buf.st_size;
hipMallocManaged(&data.minutes, size);
int rd = read(fd, data.minutes, size);
if (rd <= 0)
{
printf("ERROR LOAD FILE\n");
exit(0);
}
data.nbrMinutes = size / sizeof(Minute);
return data;
}
void printMinute(Minute *minute)
{
printf("%ld OPEN: %-10.5lf HIGH: %-10.5lf LOW: %-10.5lf CLOSE: %-10.5lf VOLUME: %-10.5lf\n",
minute->time, minute->open, minute->high, minute->low,
minute->close, minute->volume);
}
void searchPike(Data data)
{
printf("%d\n", data.nbrMinutes);
int founds = 0;
for (int i = 40; i < data.nbrMinutes - 40; i++)
{
double chien = data.minutes[i].open / data.minutes[i + 20].open;
if (chien > 1.025 || chien < 0.975)
{
printf("%lf %d\n", chien, founds);
founds += 1;
}
}
}
int main()
{
Data data = loadMinutes("./data");
// searchPike(data);
int nbrX = 4096 * 8;
int nbrY = 1024;
int nbrThreads = nbrX * nbrY;
// Worker *ramWorkers;
// malloc(ramWorkers, nbrThreads * sizeof(Worker));
Worker *workers;
hipMalloc(&workers, nbrThreads * sizeof(Worker));
// workers = (Worker *)malloc(nbrThreads * sizeof(Worker));
// cudaMallocManaged(&workers, nbrThreads * sizeof(Worker));
for (long i = 0; 1; i++)
{
bake<<<nbrX, nbrY>>>(data, workers);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
if (i % 10 == 0)
{
printf("DONE %ld - %ld B\n", i, i * nbrX * nbrY / 1000000000);
}
if (i * nbrX * nbrY / 1000000000 >= 100){
// break;
}
}
// for (long i = 0; 1; i++)
// {
// long chien = i * 5 / 2 + 50 / 3 * 5;
// // workers[i % 2 == 0 ? 0 : 1] = chien;
// workers[1].res = chien % 52 == 0 ? 1 : 0;
// if (i % 1000000 == 0)
// {
// printf("DONE %ldM\n", i / 1000000);
// }
// }
// printMinute(&data.minutes[0]);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<iostream>
using namespace std;
__global__ void add(int *a,int*b,int *c,int n)
{
int index=blockIdx.x*blockDim.x+threadIdx.x;
if(index<n)
{
c[index]=a[index]+b[index];
}
}
int main()
{
cout<<"Enter size of vector";
int n;
cin>>n;
int a[n],b[n],c[n];
for(int i=0;i<n;i++)
{
cin>>a[i];
b[i]=a[i];
}
int *ad,*bd,*cd;
int size;
size=n*sizeof(int);
cudaMalloc(&ad,size);
cudaMalloc(&bd,size);
cudaMalloc(&cd,size);
cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(bd,b,size,cudaMemcpyHostToDevice);
cudaEvent_t start,end;
dim3 grid(256,1);
dim3 block(32,1);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
add <<<grid,block>>>(ad,bd,cd,n);
cudaEventRecord(end);
float time=0;
cudaEventElapsedTime(&time,start,end);
cudaMemcpy(c,cd,size,cudaMemcpyDeviceToHost);
for(int i=0;i<n;i++)
{
cout<<c[i]<<endl;
}
cout<<"The time required is"<<time<<endl;
}
|
#include <hip/hip_runtime.h>
#include<iostream>
using namespace std;
__global__ void add(int *a,int*b,int *c,int n)
{
int index=blockIdx.x*blockDim.x+threadIdx.x;
if(index<n)
{
c[index]=a[index]+b[index];
}
}
int main()
{
cout<<"Enter size of vector";
int n;
cin>>n;
int a[n],b[n],c[n];
for(int i=0;i<n;i++)
{
cin>>a[i];
b[i]=a[i];
}
int *ad,*bd,*cd;
int size;
size=n*sizeof(int);
hipMalloc(&ad,size);
hipMalloc(&bd,size);
hipMalloc(&cd,size);
hipMemcpy(ad,a,size,hipMemcpyHostToDevice);
hipMemcpy(bd,b,size,hipMemcpyHostToDevice);
hipEvent_t start,end;
dim3 grid(256,1);
dim3 block(32,1);
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
add <<<grid,block>>>(ad,bd,cd,n);
hipEventRecord(end);
float time=0;
hipEventElapsedTime(&time,start,end);
hipMemcpy(c,cd,size,hipMemcpyDeviceToHost);
for(int i=0;i<n;i++)
{
cout<<c[i]<<endl;
}
cout<<"The time required is"<<time<<endl;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <cuda.h>
#define TILE_WIDTH 16
#define cudaCheckError() { \
cudaError_t e = cudaGetLastError(); \
if (e != cudaSuccess) { \
printf("CUDA error %s:%d: %s\n", __FILE__, __LINE__, \
cudaGetErrorString(e)); \
exit(1); \
} \
}
__global__ void MatrixMulKernel (float* Nd, float* Pd, int width, int height)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
float tempM, tempN;
if ((0*TILE_WIDTH + ty) < height && Row < width)
tempM = Nd[(0*TILE_WIDTH + ty)*width + Col];
else
tempM = 0.0;
if ((0*TILE_WIDTH + tx) < height && Col < width)
tempN = Nd[(0*TILE_WIDTH + tx)*width + Row];
else
tempN = 0.0;
for (int m=1; m <= (TILE_WIDTH + height - 1)/TILE_WIDTH; ++m)
{
Mds[ty][tx] = tempM;
Nds[tx][ty] = tempN;
__syncthreads();
if ((m*TILE_WIDTH + ty) < height && Row < width)
tempM = Nd[(m*TILE_WIDTH + ty)*width + Col];
else
tempM = 0.0;
if ((m*TILE_WIDTH + tx) < height && Col < width)
tempN = Nd[(m*TILE_WIDTH + tx)*width + Row];
else
tempN = 0.0;
for (int k=0; k<TILE_WIDTH; ++k)
Pvalue+=Mds[k][ty] * Nds[k][tx];
__syncthreads();
}
Pd[Row*width + Col] = Pvalue;
}
int main(int argc, char* argv[])
{
float *A_h, *C_h;
float *A_d, *C_d;
int i, width, height, size_A, size_C;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
srand(time(NULL));
if (argc != 3)
{
printf("Provide the problem size.\n");
return -1;
}
height = atoi(argv[1]);
width = atoi(argv[2]);
size_A = width * height * sizeof(float);
size_C = width* width * sizeof(float);
//memory allocation for host matrixes
A_h = (float *)malloc(size_A);
C_h = (float *)malloc(size_C);
if ((A_h == NULL) || (C_h == NULL))
{
printf("Could not allocate memory.\n");
return -2;
}
//initialization of matrixes
for (i = 0; i < width*height; i++) {
A_h[i] = (rand() % 100) / 100.00;
}
//memory allocation of device matrixes
cudaMalloc((void**) &A_d, size_A); cudaCheckError();
cudaMalloc((void**) &C_d, size_C); cudaCheckError();
//copy Host matrixes to Device matrixes
cudaMemcpy(A_d, A_h, size_A, cudaMemcpyHostToDevice); cudaCheckError();
//dimensions of device
dim3 dimGrid(((width-1)/TILE_WIDTH)+1, ((width-1)/TILE_WIDTH)+1, 1);
dim3 dimBLock(TILE_WIDTH,TILE_WIDTH,1);
cudaEventRecord(start);
//calculation of multiplication
MatrixMulKernel<<<dimGrid, dimBLock>>>(A_d, C_d, width, height);
cudaCheckError();
cudaEventRecord(stop);
//copy device results to host
cudaMemcpy(C_h, C_d, size_C, cudaMemcpyDeviceToHost); cudaCheckError();
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Milliseconds: %f\n", milliseconds);
//free device memory
cudaFree(A_d); cudaCheckError();
cudaFree(C_d); cudaCheckError();
//print results
// for (i = 0; i<width*height; i++)
// {
// if(i % width == 0)
// {
// printf("\n");
// }
// printf("%f, ", A_h[i]);
// }
// printf("\n\n");
// printf("\n");
// for (i = 0; i<width*width; i++)
// {
// if(i % width == 0)
// {
// printf("\n");
// }
// printf("%f, ", C_h[i]);
// }
// printf("\n\n");
// printf("\n");
}
|
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define TILE_WIDTH 16
#define cudaCheckError() { \
hipError_t e = hipGetLastError(); \
if (e != hipSuccess) { \
printf("CUDA error %s:%d: %s\n", __FILE__, __LINE__, \
hipGetErrorString(e)); \
exit(1); \
} \
}
__global__ void MatrixMulKernel (float* Nd, float* Pd, int width, int height)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
float tempM, tempN;
if ((0*TILE_WIDTH + ty) < height && Row < width)
tempM = Nd[(0*TILE_WIDTH + ty)*width + Col];
else
tempM = 0.0;
if ((0*TILE_WIDTH + tx) < height && Col < width)
tempN = Nd[(0*TILE_WIDTH + tx)*width + Row];
else
tempN = 0.0;
for (int m=1; m <= (TILE_WIDTH + height - 1)/TILE_WIDTH; ++m)
{
Mds[ty][tx] = tempM;
Nds[tx][ty] = tempN;
__syncthreads();
if ((m*TILE_WIDTH + ty) < height && Row < width)
tempM = Nd[(m*TILE_WIDTH + ty)*width + Col];
else
tempM = 0.0;
if ((m*TILE_WIDTH + tx) < height && Col < width)
tempN = Nd[(m*TILE_WIDTH + tx)*width + Row];
else
tempN = 0.0;
for (int k=0; k<TILE_WIDTH; ++k)
Pvalue+=Mds[k][ty] * Nds[k][tx];
__syncthreads();
}
Pd[Row*width + Col] = Pvalue;
}
int main(int argc, char* argv[])
{
float *A_h, *C_h;
float *A_d, *C_d;
int i, width, height, size_A, size_C;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
srand(time(NULL));
if (argc != 3)
{
printf("Provide the problem size.\n");
return -1;
}
height = atoi(argv[1]);
width = atoi(argv[2]);
size_A = width * height * sizeof(float);
size_C = width* width * sizeof(float);
//memory allocation for host matrixes
A_h = (float *)malloc(size_A);
C_h = (float *)malloc(size_C);
if ((A_h == NULL) || (C_h == NULL))
{
printf("Could not allocate memory.\n");
return -2;
}
//initialization of matrixes
for (i = 0; i < width*height; i++) {
A_h[i] = (rand() % 100) / 100.00;
}
//memory allocation of device matrixes
hipMalloc((void**) &A_d, size_A); cudaCheckError();
hipMalloc((void**) &C_d, size_C); cudaCheckError();
//copy Host matrixes to Device matrixes
hipMemcpy(A_d, A_h, size_A, hipMemcpyHostToDevice); cudaCheckError();
//dimensions of device
dim3 dimGrid(((width-1)/TILE_WIDTH)+1, ((width-1)/TILE_WIDTH)+1, 1);
dim3 dimBLock(TILE_WIDTH,TILE_WIDTH,1);
hipEventRecord(start);
//calculation of multiplication
MatrixMulKernel<<<dimGrid, dimBLock>>>(A_d, C_d, width, height);
cudaCheckError();
hipEventRecord(stop);
//copy device results to host
hipMemcpy(C_h, C_d, size_C, hipMemcpyDeviceToHost); cudaCheckError();
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Milliseconds: %f\n", milliseconds);
//free device memory
hipFree(A_d); cudaCheckError();
hipFree(C_d); cudaCheckError();
//print results
// for (i = 0; i<width*height; i++)
// {
// if(i % width == 0)
// {
// printf("\n");
// }
// printf("%f, ", A_h[i]);
// }
// printf("\n\n");
// printf("\n");
// for (i = 0; i<width*width; i++)
// {
// if(i % width == 0)
// {
// printf("\n");
// }
// printf("%f, ", C_h[i]);
// }
// printf("\n\n");
// printf("\n");
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void add(int *a, int *b, int *c, int *d, int *e, int *f) {
*c = *a + *b;
*d = *a - *b;
*e = *a * *b;
*f = *a / *b;
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void add(int *a, int *b, int *c, int *d, int *e, int *f) {
*c = *a + *b;
*d = *a - *b;
*e = *a * *b;
*f = *a / *b;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <curand.h>
void output_results(int N, double *g_x);
int main(void)
{
curandGenerator_t generator;
curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(generator, 1234);
int N = 100000;
double *g_x; cudaMalloc((void **)&g_x, sizeof(double) * N);
curandGenerateUniformDouble(generator, g_x, N);
double *x = (double*) calloc(N, sizeof(double));
cudaMemcpy(x, g_x, sizeof(double) * N, cudaMemcpyDeviceToHost);
cudaFree(g_x);
output_results(N, x);
free(x);
return 0;
}
void output_results(int N, double *x)
{
FILE *fid = fopen("x1.txt", "w");
for(int n = 0; n < N; n++)
{
fprintf(fid, "%g\n", x[n]);
}
fclose(fid);
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <hiprand/hiprand.h>
void output_results(int N, double *g_x);
int main(void)
{
hiprandGenerator_t generator;
hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandSetPseudoRandomGeneratorSeed(generator, 1234);
int N = 100000;
double *g_x; hipMalloc((void **)&g_x, sizeof(double) * N);
hiprandGenerateUniformDouble(generator, g_x, N);
double *x = (double*) calloc(N, sizeof(double));
hipMemcpy(x, g_x, sizeof(double) * N, hipMemcpyDeviceToHost);
hipFree(g_x);
output_results(N, x);
free(x);
return 0;
}
void output_results(int N, double *x)
{
FILE *fid = fopen("x1.txt", "w");
for(int n = 0; n < N; n++)
{
fprintf(fid, "%g\n", x[n]);
}
fclose(fid);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <assert.h>
#define N 1000000
__global__ void vecadd(int *a, int *b, int *c){
// determine global thread id
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// do vector add, check if index is < N
if(idx<N) {
c[idx]=a[idx]+b[idx];
}
}
int main (int argc, char **argv){
int a_host[N], b_host[N], c_host[N];
int *a_device, *b_device, *c_device;
int i;
int blocksize=256;
dim3 dimBlock(blocksize);
dim3 dimGrid(ceil(N/(float)blocksize));
for (i=0;i<N;i++) a_host[i]=i;
for (i=0;i<N;i++) b_host[i]=i;
// alloc GPU memory
cudaMalloc((void**)&a_device,N*sizeof(int));
cudaMalloc((void**)&b_device,N*sizeof(int));
cudaMalloc((void**)&c_device,N*sizeof(int));
// transfer data
cudaMemcpy(a_device,a_host,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(b_device,b_host,N*sizeof(int),cudaMemcpyHostToDevice);
// invoke kernel
vecadd<<<dimGrid,dimBlock>>>(a_device,b_device,c_device);
// transfer result
cudaMemcpy(c_host,c_device,N*sizeof(int),cudaMemcpyDeviceToHost);
// check for correctness
for (i=0;i<N;i++) assert (c_host[i] == a_host[i] + b_host[i]);
// free GPU memory
for (i=0;i<N;i++) {
cudaFree(a_device);
cudaFree(b_device);
cudaFree(c_device);
}
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
#define N 1000000
__global__ void vecadd(int *a, int *b, int *c){
// determine global thread id
int idx = threadIdx.x + blockIdx.x * blockDim.x;
// do vector add, check if index is < N
if(idx<N) {
c[idx]=a[idx]+b[idx];
}
}
int main (int argc, char **argv){
int a_host[N], b_host[N], c_host[N];
int *a_device, *b_device, *c_device;
int i;
int blocksize=256;
dim3 dimBlock(blocksize);
dim3 dimGrid(ceil(N/(float)blocksize));
for (i=0;i<N;i++) a_host[i]=i;
for (i=0;i<N;i++) b_host[i]=i;
// alloc GPU memory
hipMalloc((void**)&a_device,N*sizeof(int));
hipMalloc((void**)&b_device,N*sizeof(int));
hipMalloc((void**)&c_device,N*sizeof(int));
// transfer data
hipMemcpy(a_device,a_host,N*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(b_device,b_host,N*sizeof(int),hipMemcpyHostToDevice);
// invoke kernel
vecadd<<<dimGrid,dimBlock>>>(a_device,b_device,c_device);
// transfer result
hipMemcpy(c_host,c_device,N*sizeof(int),hipMemcpyDeviceToHost);
// check for correctness
for (i=0;i<N;i++) assert (c_host[i] == a_host[i] + b_host[i]);
// free GPU memory
for (i=0;i<N;i++) {
hipFree(a_device);
hipFree(b_device);
hipFree(c_device);
}
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
//--------------------------------------------------
// Autor: Ricardo Farias
// Data : 29 Out 2011
// Goal : Increment a variable in the graphics card
//--------------------------------------------------
/***************************************************************************************************
Includes
***************************************************************************************************/
#include <cuda.h>
#include <iostream>
#include <iomanip>
/* Descritores herdados pelos processos filhos */
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
__global__ void somaUm( int *a ) {
atomicAdd( a, 1 );
}
int main() {
bool *lock;
int pid ;
int teste = 10;
lock = (bool*)malloc(1*sizeof(bool));
lock[0] = true;
int h_a = 0;
int deviceCount = 0;
printf("Creating the son process\n");
pid = fork();
printf("Endereco do Lock %d\n", &lock);
if( pid == -1 ) { /* erro */
perror("impossivel de criar um filho") ;
exit(-1);
} else if( pid == 0 ) { /* filho */
teste = 20;
printf("Endereco do Lock do filho %d\n", &teste);
printf("\tO filho espera o pai chamar o kernel primeiro.\n") ;
sleep( 10 );
while( lock[0] ){
//printf("Valor do Lock no filho %d \n.", lock);
sleep(1);
};
printf("\tO Pai liberou o lock.\nChamando o kernel pelo filho com 5 threads.\n") ;
//-------------------------------------------------
int h_a = 0;
int deviceCount = 0;
cudaGetDeviceCount( &deviceCount );
// This function call returns 0 if there are no CUDA capable devices.
if( deviceCount == 0 ) {
printf("There is no device supporting CUDA\n");
exit( 1 );
}
cudaSetDevice(1);
int *d_a; // Pointer to host & device arrays
cudaMalloc( (void **) &d_a, sizeof( int ) ) ;
// Copy array to device
cudaMemcpy( d_a, &h_a, sizeof(int), cudaMemcpyHostToDevice ) ;
printf( "Valor de a antes = %d\n", h_a );
//------------------------------------------------
lock[0] = true;
somaUm<<< 1, 5 >>>( d_a );
cudaMemcpy( &h_a, d_a, sizeof(int), cudaMemcpyDeviceToHost ) ;
printf( "\tValor de a depois da chamada do filho = %d\n", h_a );
printf("\tO Filho se mata!!!\n") ;
lock[0] = false;
exit(1) ;
} else { /* pai */
printf("Endereco do Lock do pai %d\n", &lock);
lock[0] = true;
printf( "O pid do meu filho e': %d\n", pid );
printf( "O Pai pega o controle para chamar o kernel com 1 thread...\n" );
cudaGetDeviceCount( &deviceCount );
// This function call returns 0 if there are no CUDA capable devices.
if( deviceCount == 0 ) {
printf("There is no device supporting CUDA\n");
exit( 1 );
}
cudaSetDevice(1);
int *d_a; // Pointer to host & device arrays
cudaMalloc( (void **) &d_a, sizeof( int ) );
// Copy array to device
cudaMemcpy( d_a, &h_a, sizeof(int), cudaMemcpyHostToDevice ) ;
printf( "Valor de a antes = %d\n", h_a );
somaUm<<< 1, 1 >>>( d_a );
cudaMemcpy( &h_a, d_a, sizeof(int), cudaMemcpyDeviceToHost ) ;
printf( "Valor de a depois da chamada do Pai = %d\n", h_a );
cudaFree( d_a );
printf( "O Pai libera o lock e espera o filho chamar o kernel...\n" );
lock[0] = false;
teste = 30;
printf(" Lock liberado pelo pai lock = %d\n", &teste);
sleep( 3 );
while( lock[0] );
printf( "O Pai se mata!!!\n" );
}
exit(0);
}
|
//--------------------------------------------------
// Autor: Ricardo Farias
// Data : 29 Out 2011
// Goal : Increment a variable in the graphics card
//--------------------------------------------------
/***************************************************************************************************
Includes
***************************************************************************************************/
#include <hip/hip_runtime.h>
#include <iostream>
#include <iomanip>
/* Descritores herdados pelos processos filhos */
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
__global__ void somaUm( int *a ) {
atomicAdd( a, 1 );
}
int main() {
bool *lock;
int pid ;
int teste = 10;
lock = (bool*)malloc(1*sizeof(bool));
lock[0] = true;
int h_a = 0;
int deviceCount = 0;
printf("Creating the son process\n");
pid = fork();
printf("Endereco do Lock %d\n", &lock);
if( pid == -1 ) { /* erro */
perror("impossivel de criar um filho") ;
exit(-1);
} else if( pid == 0 ) { /* filho */
teste = 20;
printf("Endereco do Lock do filho %d\n", &teste);
printf("\tO filho espera o pai chamar o kernel primeiro.\n") ;
sleep( 10 );
while( lock[0] ){
//printf("Valor do Lock no filho %d \n.", lock);
sleep(1);
};
printf("\tO Pai liberou o lock.\nChamando o kernel pelo filho com 5 threads.\n") ;
//-------------------------------------------------
int h_a = 0;
int deviceCount = 0;
hipGetDeviceCount( &deviceCount );
// This function call returns 0 if there are no CUDA capable devices.
if( deviceCount == 0 ) {
printf("There is no device supporting CUDA\n");
exit( 1 );
}
hipSetDevice(1);
int *d_a; // Pointer to host & device arrays
hipMalloc( (void **) &d_a, sizeof( int ) ) ;
// Copy array to device
hipMemcpy( d_a, &h_a, sizeof(int), hipMemcpyHostToDevice ) ;
printf( "Valor de a antes = %d\n", h_a );
//------------------------------------------------
lock[0] = true;
somaUm<<< 1, 5 >>>( d_a );
hipMemcpy( &h_a, d_a, sizeof(int), hipMemcpyDeviceToHost ) ;
printf( "\tValor de a depois da chamada do filho = %d\n", h_a );
printf("\tO Filho se mata!!!\n") ;
lock[0] = false;
exit(1) ;
} else { /* pai */
printf("Endereco do Lock do pai %d\n", &lock);
lock[0] = true;
printf( "O pid do meu filho e': %d\n", pid );
printf( "O Pai pega o controle para chamar o kernel com 1 thread...\n" );
hipGetDeviceCount( &deviceCount );
// This function call returns 0 if there are no CUDA capable devices.
if( deviceCount == 0 ) {
printf("There is no device supporting CUDA\n");
exit( 1 );
}
hipSetDevice(1);
int *d_a; // Pointer to host & device arrays
hipMalloc( (void **) &d_a, sizeof( int ) );
// Copy array to device
hipMemcpy( d_a, &h_a, sizeof(int), hipMemcpyHostToDevice ) ;
printf( "Valor de a antes = %d\n", h_a );
somaUm<<< 1, 1 >>>( d_a );
hipMemcpy( &h_a, d_a, sizeof(int), hipMemcpyDeviceToHost ) ;
printf( "Valor de a depois da chamada do Pai = %d\n", h_a );
hipFree( d_a );
printf( "O Pai libera o lock e espera o filho chamar o kernel...\n" );
lock[0] = false;
teste = 30;
printf(" Lock liberado pelo pai lock = %d\n", &teste);
sleep( 3 );
while( lock[0] );
printf( "O Pai se mata!!!\n" );
}
exit(0);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* NOTES:.
* 1) The tmp variables must each have space for length * batchSize * groupSize * sizeof(complexType).
* 2) Templated types must be (cufftReal, cufftComplex) or (cufftDoubleReal, cufftDoubleComplex)
* 3) Length must be even.
* 4) DCT maps to a type-2 DCT. Inverse DCT maps to a type-3 DCT. IDCT(DCT(x)) == x.
*/
#include <stdio.h>
#include <cufft.h>
// Useful to have
#define ROOT2 1.4142135623730951f
// This is quite system dependent. Slower systems would benefit from a smaller value here.
#define R2C_SWITCH_SIZE (1 << 19)
template<typename realType, typename complexType, bool forward, bool R2C>
__global__ void DCT_setup(int length,
int batchSize,
int groupSize,
const realType * __restrict__ A,
const realType * __restrict__ Ab,
const realType * __restrict__ in,
realType * __restrict__ out) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element >= length) return;
int groupID = blockIdx.y;
realType Alocal;
realType Ablocal;
int index;
if (element < length / 2) {
index = element * 2;
}
else {
index = length - 2 * (element - length / 2) - 1;
}
if (A != NULL) {
Alocal = A[groupID * length + index];
if (Ab != NULL) {
Ablocal = Ab[groupID * length + index];
}
}
for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) {
realType val;
if (forward) val = ((realType*)(in))[length * batchID + index];
else val = ((realType*)(in))[length * (batchID * groupSize + groupID) + index];
if (A != NULL) {
val *= Alocal;
if (Ab != NULL) {
val += Ablocal;
}
}
if (R2C) {
((realType*)(out))[element + length * (batchID * groupSize + groupID)] = (realType)val;
}
else {
complexType outVal;
outVal.x = val;
outVal.y = 0.f;
((complexType*)(out))[element + length * (batchID * groupSize + groupID)] = outVal;
}
}
}
template<typename realType, typename complexType, bool R2C>
__global__ void DCT_final(int length,
int batchSize,
int groupSize,
const realType * __restrict__ A,
const realType * __restrict__ Ab,
const realType * __restrict__ in,
realType * __restrict__ out) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element >= length) return;
int groupID = blockIdx.y;
realType Alocal;
realType Ablocal;
if (A != NULL) {
Alocal = A[groupID * length + element];
if (Ab != NULL) {
Ablocal = Ab[groupID * length + element];
}
}
for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) {
complexType val;
if (R2C) {
if (element <= length / 2) {
val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element];
}
else {
val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element];
val.y = -val.y;
}
}
else {
val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element];
}
complexType val2;
complexType ret;
sincospi(element / (2.f * (length)), &(val2.y), &(val2.x));
val2.y = -val2.y;
ret.x = val.x * val2.x - val.y * val2.y;
// Normalisation
if (element == 0) {
ret.x *= rsqrt((realType)length);
}
else {
ret.x *= ROOT2 * rsqrt((realType)length);
}
if (A != NULL) {
ret.x *= Alocal;
if (Ab != NULL) {
ret.x += Ablocal;
}
}
((realType*)(out))[length * (batchID * groupSize + groupID) + element] = ret.x;
}
}
template<typename realType, typename complexType>
__global__ void IDCT_final(int length,
int batchSize,
int groupSize,
const realType * __restrict__ A,
const realType * __restrict__ Ab,
const realType * __restrict__ in,
realType * __restrict__ out) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element >= length) return;
int groupID = blockIdx.y;
realType Alocal;
realType Ablocal;
int index;
if (element < length / 2) {
index = element * 2;
}
else {
index = length - 2 * (element - length / 2) - 1;
}
if (A != NULL) {
Alocal = A[groupID * length + index];
if (Ab != NULL) {
Ablocal = Ab[groupID * length + index];
}
}
for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) {
complexType val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element];
// "A" for backward pass
if (A != NULL) {
val.x *= Alocal;
if (Ab != NULL) {
val.x += Ablocal;
}
}
((realType*)(out))[length * (batchID * groupSize + groupID) + index] = val.x;
}
}
template<typename realType, typename complexType, bool R2C>
__global__ void DCT_final_IDCT_setup(int length,
int batchSize,
int groupSize,
const realType * __restrict__ D,
const realType * __restrict__ Db,
const realType * __restrict__ in,
realType * __restrict__ out,
realType * __restrict__ deltaMid) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element >= length) return;
int groupID = blockIdx.y;
realType dlocal;
realType dblocal;
if (D != NULL) {
dlocal = D[groupID * length + element];
if (Db != NULL) {
dblocal = Db[groupID * length + element];
}
}
for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) {
complexType val;
if (R2C) {
if (element <= length / 2) {
val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element];
}
else {
val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element];
val.y = -val.y;
}
}
else {
val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element];
}
complexType val2;
complexType ret;
sincospi(element / (2.f * (length)), &(val2.y), &(val2.x));
val2.y = -val2.y;
ret.x = val.x * val2.x - val.y * val2.y;
// Normalisation
if (element == 0) {
ret.x *= rsqrt((realType)length);
}
else {
ret.x *= ROOT2 * rsqrt((realType)length);
}
realType re_in = ret.x;
if (D != NULL) {
re_in *= dlocal;
if (Db != NULL) {
re_in += dblocal;
}
}
if (deltaMid) {
deltaMid[element + length * (batchID * groupSize + groupID)] = re_in;
}
// Un-normalisation
if (element == 0) {
re_in *= rsqrtf((realType)length);
}
else {
re_in *= ROOT2 * rsqrtf((realType)length);
}
sincospi(element / (2.f * length), &(val2.y), &(val2.x));
val.x = re_in * val2.x;
val.y = -re_in * val2.y;
((complexType*)(out))[length * (batchID * groupSize + groupID) + element] = val;
}
}
template<typename realType>
__global__ void updateWeights(int length,
int batchSize,
int groupSize,
const realType * __restrict__ D,
const realType * __restrict__ in,
const realType * __restrict__ gradOutput,
realType * __restrict__ delta_D,
realType * __restrict__ delta_Db) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element >= length) return;
int groupID = blockIdx.y;
D += length * groupID;
delta_D += length * groupID;
delta_Db += length * groupID;
realType recp_localD = 1.f / D[element];
realType localDeltaD = 0.f;
realType localDeltaDb = 0.f;
for (int batchID = 0; batchID < batchSize; batchID++) {
realType val = gradOutput[length * (batchID * groupSize + groupID) + element] * recp_localD;
localDeltaD += val * in[length * batchID + element];
localDeltaDb += val;
}
delta_D[element] += localDeltaD;
delta_Db[element] += localDeltaDb;
}
template<typename realType, typename complexType>
int acdc_fp(cudaStream_t stream,
int length, int batchSize, int groupSize,
cufftHandle planR2C, cufftHandle planC2C,
const realType * __restrict__ in,
const realType * __restrict__ A,
const realType * __restrict__ Ab,
const realType * __restrict__ D,
const realType * __restrict__ Db,
realType * __restrict__ out,
realType * __restrict__ tmp1,
realType * __restrict__ tmp2) {
if (length & 1) {
printf("acdc_fp: length must be even (%d passed)\n", length);
return 1;
}
cufftSetStream(planR2C, stream);
cufftSetStream(planC2C, stream);
dim3 blockDim;
dim3 gridDim;
gridDim.y = groupSize;
blockDim.x = 128;
gridDim.x = (length + blockDim.x - 1) / blockDim.x;
gridDim.z = (batchSize + 1) / 2;
// Two DCTs required. Inverse is handled in the custom setup.
// R2C is only faster for longer sequences (launch latency vs bandwidth)
if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) {
DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, A, Ab, in, tmp1);
cufftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2);
DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL);
}
else {
DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, A, Ab, in, tmp1);
cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD);
DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL);
}
cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD);
IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, NULL, NULL, tmp2, out);
return 0;
}
// NOTE: For the backward pass "in" is bottom, "out" is top, so we write to in.
template<typename realType, typename complexType>
int acdc_bp(cudaStream_t stream,
int length,
int batchSize,
int groupSize,
cufftHandle planR2C, cufftHandle planC2C,
realType * __restrict__ delta_in,
const realType * __restrict__ A,
const realType * __restrict__ Ab,
const realType * __restrict__ D,
const realType * __restrict__ Db,
const realType * __restrict__ delta_out,
realType * __restrict__ delta_mid,
realType * __restrict__ tmp1,
realType * __restrict__ tmp2) {
if (length & 1) {
printf("acdc_bp: length must be even (%d passed)\n", length);
return 1;
}
cufftSetStream(planR2C, stream);
cufftSetStream(planC2C, stream);
dim3 blockDim;
dim3 gridDim;
gridDim.y = groupSize;
blockDim.x = 128;
gridDim.x = (length + blockDim.x - 1) / blockDim.x;
gridDim.z = (batchSize + 1) / 2;
// Backward through CD
// R2C is only faster for longer sequences (launch latency vs bandwidth)
if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) {
DCT_setup<realType, complexType, false, true> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, NULL, NULL, delta_out, tmp1);
cufftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2);
DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid);
}
else {
DCT_setup<realType, complexType, false, false> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, NULL, NULL, delta_out, tmp1);
cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD);
DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid);
}
// Backward through CA
cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD);
IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, A, NULL, tmp2, delta_in);
return 0;
}
template<typename realType, typename complexType>
int acdc_bp_acc(cudaStream_t stream,
int length,
int batchSize,
int groupSize,
cufftHandle planR2C, cufftHandle planC2C,
realType * __restrict__ delta_in,
realType * __restrict__ delta_mid,
const realType * __restrict__ A,
const realType * __restrict__ Ab,
const realType * __restrict__ D,
const realType * __restrict__ inputA,
realType * __restrict__ inputD,
realType * __restrict__ delta_A,
realType * __restrict__ delta_Ab,
realType * __restrict__ delta_D,
realType * __restrict__ delta_Db,
realType * __restrict__ tmp1,
realType * __restrict__ tmp2) {
if (length & 1) {
printf("acdc_bp_acc length must be even (%d passed)\n", length);
return 1;
}
cufftSetStream(planR2C, stream);
cufftSetStream(planC2C, stream);
dim3 blockDim;
dim3 gridDim;
gridDim.y = groupSize;
blockDim.x = 32;
gridDim.x = (length + blockDim.x - 1) / blockDim.x;
updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, A, inputA, delta_in, delta_A, delta_Ab);
blockDim.x = 128;
gridDim.x = (length + blockDim.x - 1) / blockDim.x;
gridDim.z = (batchSize + 1) / 2;
// Forward through AC to calculate input going into D
// R2C is only faster for longer sequences (launch latency vs bandwidth)
if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) {
DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, A, Ab, inputA, tmp1);
cufftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2);
DCT_final<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, NULL, NULL, tmp2, inputD);
}
else {
DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, A, Ab, inputA, tmp1);
cufftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, CUFFT_FORWARD);
DCT_final<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, NULL, NULL, tmp2, inputD);
}
blockDim.x = 32;
gridDim.x = (length + blockDim.x - 1) / blockDim.x;
gridDim.z = 1;
updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, D, inputD, delta_mid, delta_D, delta_Db);
return 0;
}
|
/* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* NOTES:.
* 1) The tmp variables must each have space for length * batchSize * groupSize * sizeof(complexType).
* 2) Templated types must be (cufftReal, cufftComplex) or (cufftDoubleReal, cufftDoubleComplex)
* 3) Length must be even.
* 4) DCT maps to a type-2 DCT. Inverse DCT maps to a type-3 DCT. IDCT(DCT(x)) == x.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <hipfft/hipfft.h>
// Useful to have
#define ROOT2 1.4142135623730951f
// This is quite system dependent. Slower systems would benefit from a smaller value here.
#define R2C_SWITCH_SIZE (1 << 19)
template<typename realType, typename complexType, bool forward, bool R2C>
__global__ void DCT_setup(int length,
int batchSize,
int groupSize,
const realType * __restrict__ A,
const realType * __restrict__ Ab,
const realType * __restrict__ in,
realType * __restrict__ out) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element >= length) return;
int groupID = blockIdx.y;
realType Alocal;
realType Ablocal;
int index;
if (element < length / 2) {
index = element * 2;
}
else {
index = length - 2 * (element - length / 2) - 1;
}
if (A != NULL) {
Alocal = A[groupID * length + index];
if (Ab != NULL) {
Ablocal = Ab[groupID * length + index];
}
}
for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) {
realType val;
if (forward) val = ((realType*)(in))[length * batchID + index];
else val = ((realType*)(in))[length * (batchID * groupSize + groupID) + index];
if (A != NULL) {
val *= Alocal;
if (Ab != NULL) {
val += Ablocal;
}
}
if (R2C) {
((realType*)(out))[element + length * (batchID * groupSize + groupID)] = (realType)val;
}
else {
complexType outVal;
outVal.x = val;
outVal.y = 0.f;
((complexType*)(out))[element + length * (batchID * groupSize + groupID)] = outVal;
}
}
}
template<typename realType, typename complexType, bool R2C>
__global__ void DCT_final(int length,
int batchSize,
int groupSize,
const realType * __restrict__ A,
const realType * __restrict__ Ab,
const realType * __restrict__ in,
realType * __restrict__ out) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element >= length) return;
int groupID = blockIdx.y;
realType Alocal;
realType Ablocal;
if (A != NULL) {
Alocal = A[groupID * length + element];
if (Ab != NULL) {
Ablocal = Ab[groupID * length + element];
}
}
for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) {
complexType val;
if (R2C) {
if (element <= length / 2) {
val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element];
}
else {
val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element];
val.y = -val.y;
}
}
else {
val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element];
}
complexType val2;
complexType ret;
sincospi(element / (2.f * (length)), &(val2.y), &(val2.x));
val2.y = -val2.y;
ret.x = val.x * val2.x - val.y * val2.y;
// Normalisation
if (element == 0) {
ret.x *= rsqrt((realType)length);
}
else {
ret.x *= ROOT2 * rsqrt((realType)length);
}
if (A != NULL) {
ret.x *= Alocal;
if (Ab != NULL) {
ret.x += Ablocal;
}
}
((realType*)(out))[length * (batchID * groupSize + groupID) + element] = ret.x;
}
}
template<typename realType, typename complexType>
__global__ void IDCT_final(int length,
int batchSize,
int groupSize,
const realType * __restrict__ A,
const realType * __restrict__ Ab,
const realType * __restrict__ in,
realType * __restrict__ out) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element >= length) return;
int groupID = blockIdx.y;
realType Alocal;
realType Ablocal;
int index;
if (element < length / 2) {
index = element * 2;
}
else {
index = length - 2 * (element - length / 2) - 1;
}
if (A != NULL) {
Alocal = A[groupID * length + index];
if (Ab != NULL) {
Ablocal = Ab[groupID * length + index];
}
}
for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) {
complexType val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element];
// "A" for backward pass
if (A != NULL) {
val.x *= Alocal;
if (Ab != NULL) {
val.x += Ablocal;
}
}
((realType*)(out))[length * (batchID * groupSize + groupID) + index] = val.x;
}
}
template<typename realType, typename complexType, bool R2C>
__global__ void DCT_final_IDCT_setup(int length,
int batchSize,
int groupSize,
const realType * __restrict__ D,
const realType * __restrict__ Db,
const realType * __restrict__ in,
realType * __restrict__ out,
realType * __restrict__ deltaMid) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element >= length) return;
int groupID = blockIdx.y;
realType dlocal;
realType dblocal;
if (D != NULL) {
dlocal = D[groupID * length + element];
if (Db != NULL) {
dblocal = Db[groupID * length + element];
}
}
for (int batchID = blockIdx.z; batchID < batchSize; batchID += gridDim.z) {
complexType val;
if (R2C) {
if (element <= length / 2) {
val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element];
}
else {
val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + length - element];
val.y = -val.y;
}
}
else {
val = ((complexType*)(in))[length * (batchID * groupSize + groupID) + element];
}
complexType val2;
complexType ret;
sincospi(element / (2.f * (length)), &(val2.y), &(val2.x));
val2.y = -val2.y;
ret.x = val.x * val2.x - val.y * val2.y;
// Normalisation
if (element == 0) {
ret.x *= rsqrt((realType)length);
}
else {
ret.x *= ROOT2 * rsqrt((realType)length);
}
realType re_in = ret.x;
if (D != NULL) {
re_in *= dlocal;
if (Db != NULL) {
re_in += dblocal;
}
}
if (deltaMid) {
deltaMid[element + length * (batchID * groupSize + groupID)] = re_in;
}
// Un-normalisation
if (element == 0) {
re_in *= rsqrtf((realType)length);
}
else {
re_in *= ROOT2 * rsqrtf((realType)length);
}
sincospi(element / (2.f * length), &(val2.y), &(val2.x));
val.x = re_in * val2.x;
val.y = -re_in * val2.y;
((complexType*)(out))[length * (batchID * groupSize + groupID) + element] = val;
}
}
template<typename realType>
__global__ void updateWeights(int length,
int batchSize,
int groupSize,
const realType * __restrict__ D,
const realType * __restrict__ in,
const realType * __restrict__ gradOutput,
realType * __restrict__ delta_D,
realType * __restrict__ delta_Db) {
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element >= length) return;
int groupID = blockIdx.y;
D += length * groupID;
delta_D += length * groupID;
delta_Db += length * groupID;
realType recp_localD = 1.f / D[element];
realType localDeltaD = 0.f;
realType localDeltaDb = 0.f;
for (int batchID = 0; batchID < batchSize; batchID++) {
realType val = gradOutput[length * (batchID * groupSize + groupID) + element] * recp_localD;
localDeltaD += val * in[length * batchID + element];
localDeltaDb += val;
}
delta_D[element] += localDeltaD;
delta_Db[element] += localDeltaDb;
}
template<typename realType, typename complexType>
int acdc_fp(hipStream_t stream,
int length, int batchSize, int groupSize,
hipfftHandle planR2C, hipfftHandle planC2C,
const realType * __restrict__ in,
const realType * __restrict__ A,
const realType * __restrict__ Ab,
const realType * __restrict__ D,
const realType * __restrict__ Db,
realType * __restrict__ out,
realType * __restrict__ tmp1,
realType * __restrict__ tmp2) {
if (length & 1) {
printf("acdc_fp: length must be even (%d passed)\n", length);
return 1;
}
hipfftSetStream(planR2C, stream);
hipfftSetStream(planC2C, stream);
dim3 blockDim;
dim3 gridDim;
gridDim.y = groupSize;
blockDim.x = 128;
gridDim.x = (length + blockDim.x - 1) / blockDim.x;
gridDim.z = (batchSize + 1) / 2;
// Two DCTs required. Inverse is handled in the custom setup.
// R2C is only faster for longer sequences (launch latency vs bandwidth)
if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) {
DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, A, Ab, in, tmp1);
hipfftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2);
DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL);
}
else {
DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, A, Ab, in, tmp1);
hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD);
DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, D, Db, tmp2, tmp1, NULL);
}
hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD);
IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, NULL, NULL, tmp2, out);
return 0;
}
// NOTE: For the backward pass "in" is bottom, "out" is top, so we write to in.
template<typename realType, typename complexType>
int acdc_bp(hipStream_t stream,
int length,
int batchSize,
int groupSize,
hipfftHandle planR2C, hipfftHandle planC2C,
realType * __restrict__ delta_in,
const realType * __restrict__ A,
const realType * __restrict__ Ab,
const realType * __restrict__ D,
const realType * __restrict__ Db,
const realType * __restrict__ delta_out,
realType * __restrict__ delta_mid,
realType * __restrict__ tmp1,
realType * __restrict__ tmp2) {
if (length & 1) {
printf("acdc_bp: length must be even (%d passed)\n", length);
return 1;
}
hipfftSetStream(planR2C, stream);
hipfftSetStream(planC2C, stream);
dim3 blockDim;
dim3 gridDim;
gridDim.y = groupSize;
blockDim.x = 128;
gridDim.x = (length + blockDim.x - 1) / blockDim.x;
gridDim.z = (batchSize + 1) / 2;
// Backward through CD
// R2C is only faster for longer sequences (launch latency vs bandwidth)
if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) {
DCT_setup<realType, complexType, false, true> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, NULL, NULL, delta_out, tmp1);
hipfftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2);
DCT_final_IDCT_setup<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid);
}
else {
DCT_setup<realType, complexType, false, false> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, NULL, NULL, delta_out, tmp1);
hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD);
DCT_final_IDCT_setup<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, D, NULL, tmp2, tmp1, delta_mid);
}
// Backward through CA
hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD);
IDCT_final<realType, complexType> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, A, NULL, tmp2, delta_in);
return 0;
}
template<typename realType, typename complexType>
int acdc_bp_acc(hipStream_t stream,
int length,
int batchSize,
int groupSize,
hipfftHandle planR2C, hipfftHandle planC2C,
realType * __restrict__ delta_in,
realType * __restrict__ delta_mid,
const realType * __restrict__ A,
const realType * __restrict__ Ab,
const realType * __restrict__ D,
const realType * __restrict__ inputA,
realType * __restrict__ inputD,
realType * __restrict__ delta_A,
realType * __restrict__ delta_Ab,
realType * __restrict__ delta_D,
realType * __restrict__ delta_Db,
realType * __restrict__ tmp1,
realType * __restrict__ tmp2) {
if (length & 1) {
printf("acdc_bp_acc length must be even (%d passed)\n", length);
return 1;
}
hipfftSetStream(planR2C, stream);
hipfftSetStream(planC2C, stream);
dim3 blockDim;
dim3 gridDim;
gridDim.y = groupSize;
blockDim.x = 32;
gridDim.x = (length + blockDim.x - 1) / blockDim.x;
updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, A, inputA, delta_in, delta_A, delta_Ab);
blockDim.x = 128;
gridDim.x = (length + blockDim.x - 1) / blockDim.x;
gridDim.z = (batchSize + 1) / 2;
// Forward through AC to calculate input going into D
// R2C is only faster for longer sequences (launch latency vs bandwidth)
if (length * batchSize * groupSize >= R2C_SWITCH_SIZE) {
DCT_setup<realType, complexType, true, true> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, A, Ab, inputA, tmp1);
hipfftExecR2C(planR2C, (realType*)tmp1, (complexType*)tmp2);
DCT_final<realType, complexType, true> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, NULL, NULL, tmp2, inputD);
}
else {
DCT_setup<realType, complexType, true, false> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, A, Ab, inputA, tmp1);
hipfftExecC2C(planC2C, (complexType*)tmp1, (complexType*)tmp2, HIPFFT_FORWARD);
DCT_final<realType, complexType, false> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, NULL, NULL, tmp2, inputD);
}
blockDim.x = 32;
gridDim.x = (length + blockDim.x - 1) / blockDim.x;
gridDim.z = 1;
updateWeights<realType> <<< gridDim, blockDim, 0, stream >>> (
length, batchSize, groupSize, D, inputD, delta_mid, delta_D, delta_Db);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__device__ int sum = 1; __global__ void degreeCalc (int *array){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i>=1000000){
return;
}
sum+=array[i];
// if (i==999999){
// printf("%d", sum);
// }
}
__global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int start = -1, stop = -1;
int diff=0;
start = vertexArray[i];
if (i==n-1){
stop = m;
}
else{
stop = vertexArray[i+1];
}
diff = stop-start;
atomicAdd(°reeCount[i], diff);
for (int j=start; j<stop; j++){
atomicAdd(°reeCount[neighbourArray[j]-1], 1);
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__device__ int sum = 1; __global__ void degreeCalc (int *array){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i>=1000000){
return;
}
sum+=array[i];
// if (i==999999){
// printf("%d", sum);
// }
}
__global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int start = -1, stop = -1;
int diff=0;
start = vertexArray[i];
if (i==n-1){
stop = m;
}
else{
stop = vertexArray[i+1];
}
diff = stop-start;
atomicAdd(°reeCount[i], diff);
for (int j=start; j<stop; j++){
atomicAdd(°reeCount[neighbourArray[j]-1], 1);
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/*****************************************************************************
C-DAC Tech Workshop : hyPACK-2013
October 15-18, 2013
Example : cuda-matrix-vector-multiplication.cu
Objective : Write CUDA program to compute Matrix-Vector multiplication.
Input : None
Output : Execution time in seconds , Gflops achieved
Created : August-2013
E-mail : [email protected]
****************************************************************************/
#include<stdio.h>
#include<cuda.h>
#define BLOCKSIZE 16
#define SIZE 1024
#define EPS 1.0e-15
cudaDeviceProp deviceProp;
double *host_Mat,*host_Vect,*host_ResVect,*cpu_ResVect;
double *device_Mat,*device_Vect,*device_ResVect;
int vlength ,matRowSize , matColSize;
int device_Count;
int size = SIZE;
/*mem error*/
void mem_error(char *arrayname, char *benchmark, int len, char *type)
{
printf("\nMemory not sufficient to allocate for array %s\n\tBenchmark : %s \n\tMemory requested = %d number of %s elements\n",arrayname, benchmark, len, type);
exit(-1);
}
/*calculate Gflops*/
double calculate_gflops(float &Tsec)
{
float gflops=(1.0e-9 * (( 2.0 * size*size )/Tsec));
return gflops;
}
/*sequential function for mat vect multiplication*/
void CPU_MatVect()
{
cpu_ResVect = (double *)malloc(matRowSize*sizeof(double));
if(cpu_ResVect==NULL)
mem_error("cpu_ResVect","vectmatmul",size,"double");
int i,j;
for(i=0;i<matRowSize;i++)
{cpu_ResVect[i]=0;
for(j=0;j<matColSize;j++)
cpu_ResVect[i]+=host_Mat[i*vlength+j]*host_Vect[j];
}
}
/*Check for safe return of all calls to the device */
void CUDA_SAFE_CALL(cudaError_t call)
{
cudaError_t ret = call;
//printf("RETURN FROM THE CUDA CALL:%d\t:",ret);
switch(ret)
{
case cudaSuccess:
// printf("Success\n");
break;
/* case cudaErrorInvalidValue:
{
printf("ERROR: InvalidValue:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidDevicePointer:
{
printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidMemcpyDirection:
{
printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__);
exit(-1);
break;
} */
default:
{
printf(" ERROR at line :%i.%d' ' %s\n",__LINE__,ret,cudaGetErrorString(ret));
exit(-1);
break;
}
}
}
/*free memory*/
void dfree(double * arr[],int len)
{
for(int i=0;i<len;i++)
CUDA_SAFE_CALL(cudaFree(arr[i]));
printf("mem freed\n");
}
/* function to calculate relative error*/
void relError(double* dRes,double* hRes,int size)
{
double relativeError=0.0,errorNorm=0.0;
int flag=0;
int i;
for( i = 0; i < size; ++i) {
if (fabs(hRes[i]) > fabs(dRes[i]))
relativeError = fabs((hRes[i] - dRes[i]) / hRes[i]);
else
relativeError = fabs((dRes[i] - hRes[i]) / dRes[i]);
if (relativeError > EPS && relativeError != 0.0e+00 )
{
if(errorNorm < relativeError)
{
errorNorm = relativeError;
flag=1;
}
}
}
if( flag == 1)
{
printf(" \n Results verfication : Failed");
printf(" \n Considered machine precision : %e", EPS);
printf(" \n Relative Error : %e\n", errorNorm);
}
else
printf("\n Results verfication : Success\n");
}
/*prints the result in screen*/
void print_on_screen(char * program_name,float tsec,double gflops,int size,int flag)//flag=1 if gflops has been calculated else flag =0
{
printf("\n---------------%s----------------\n",program_name);
printf("\tSIZE\t TIME_SEC\t Gflops\n");
if(flag==1)
printf("\t%d\t%f\t%lf\t",size,tsec,gflops);
else
printf("\t%d\t%lf\t%lf\t",size,"---","---");
}
/*funtion to check blocks per grid and threads per block*/
void check_block_grid_dim(cudaDeviceProp devProp,dim3 blockDim,dim3 gridDim)
{
if( blockDim.x >= devProp.maxThreadsDim[0] || blockDim.y >= devProp.maxThreadsDim[1] || blockDim.z >= devProp.maxThreadsDim[2] )
{
printf("\nBlock Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxThreadsDim[0],devProp.maxThreadsDim[1],devProp.maxThreadsDim[2]);
exit(-1);
}
if( gridDim.x >= devProp.maxGridSize[0] || gridDim.y >= devProp.maxGridSize[1] || gridDim.z >= devProp.maxGridSize[2] )
{
printf("\nGrid Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxGridSize[0],devProp.maxGridSize[1],devProp.maxGridSize[2]);
exit(-1);
}
}
/*Get the number of GPU devices present on the host */
int get_DeviceCount()
{
int count;
cudaGetDeviceCount(&count);
return count;
}
/*Fill in the vector with double precision values */
void fill_dp_vector(double* vec,int size)
{
int ind;
for(ind=0;ind<size;ind++)
vec[ind]=drand48();
}
/////////////////////////////////////////////////////////////////////////////////////////
//
// MatVect : this kernel will perform actual MatrixVector Multiplication
//
/////////////////////////////////////////////////////////////////////////////////////////
__global__ void MatVectMultiplication(double *device_Mat, double *device_Vect,int matRowSize, int vlength,double *device_ResVect)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
int tindex=tidx+gridDim.x*BLOCKSIZE*tidy;
if(tindex<matRowSize)
{
int i;int m=tindex*vlength;
device_ResVect[tindex]=0.00;
for(i=0;i<vlength;i++)
device_ResVect[tindex]+=device_Mat[m+i]*device_Vect[i];
}
__syncthreads();
}//end of MatVect device function
/*function to launch kernel*/
void launch_Kernel_MatVectMul()
{
/* threads_per_block, blocks_per_grid */
int max=BLOCKSIZE*BLOCKSIZE;
int BlocksPerGrid=matRowSize/max+1;
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
if(matRowSize%max==0)BlocksPerGrid--;
dim3 dimGrid(1,BlocksPerGrid);
check_block_grid_dim(deviceProp,dimBlock,dimGrid);
MatVectMultiplication<<<dimGrid,dimBlock>>>(device_Mat,device_Vect,matRowSize,vlength,device_ResVect);
}
/*main function*/
int main()
{
// Vector length , Matrix Row and Col sizes..............
vlength = matColSize = SIZE;
matRowSize = SIZE;
// printf("this programs does computation of square matrix only\n");
float elapsedTime,Tsec;
cudaEvent_t start,stop;
device_Count=get_DeviceCount();
printf("\n\nNUmber of Devices : %d\n\n", device_Count);
// Device Selection, Device 1: Tesla C1060
cudaSetDevice(0);
int device;
// Current Device Detection
cudaGetDevice(&device);
cudaGetDeviceProperties(&deviceProp,device);
printf("Using device %d: %s \n", device, deviceProp.name);
/*allocating the memory for each matrix */
host_Mat =new double[matRowSize*matColSize];
host_Vect = new double[vlength];
host_ResVect = new double[matRowSize];
// ---------------checking host memory for error..............................
if(host_Mat==NULL)
mem_error("host_Mat","vectmatmul",matRowSize*matColSize,"double");
if(host_Vect==NULL)
mem_error("host_Vect","vectmatmul",vlength,"double");
if(host_ResVect==NULL)
mem_error("host_ResVect","vectmatmul",matRowSize,"double");
//--------------Initializing the input arrays..............
fill_dp_vector(host_Mat,matRowSize*matColSize);
fill_dp_vector(host_Vect,vlength);
/* allocate memory for GPU events
start = (cudaEvent_t) malloc (sizeof(cudaEvent_t));
stop = (cudaEvent_t) malloc (sizeof(cudaEvent_t));
if(start==NULL)
mem_error("start","vectvectmul",1,"cudaEvent_t");
if(stop==NULL)
mem_error("stop","vectvectmul",1,"cudaEvent_t");*/
//event creation...
CUDA_SAFE_CALL(cudaEventCreate (&start));
CUDA_SAFE_CALL(cudaEventCreate (&stop));
//allocating memory on GPU
CUDA_SAFE_CALL(cudaMalloc( (void**)&device_Mat, matRowSize*matColSize* sizeof(double)));
CUDA_SAFE_CALL(cudaMalloc( (void**)&device_Vect, vlength* sizeof(double)));
CUDA_SAFE_CALL(cudaMalloc( (void**)&device_ResVect, matRowSize* sizeof(double)));
//moving data from CPU to GPU
CUDA_SAFE_CALL(cudaMemcpy((void*)device_Mat, (void*)host_Mat, matRowSize*matColSize*sizeof(double) ,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy((void*)device_Vect, (void*)host_Vect,vlength*sizeof(double),cudaMemcpyHostToDevice));
// Launching kernell..........
CUDA_SAFE_CALL(cudaEventRecord (start, 0));
launch_Kernel_MatVectMul();
CUDA_SAFE_CALL(cudaEventRecord (stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize (stop));
CUDA_SAFE_CALL(cudaEventElapsedTime ( &elapsedTime, start, stop));
Tsec= 1.0e-3*elapsedTime;
// calling funtion for measuring Gflops
calculate_gflops(Tsec);
//printing the result on screen
print_on_screen("MAT VECT MULTIPLICATION",Tsec,calculate_gflops(Tsec),size,1);
//retriving result from device
CUDA_SAFE_CALL(cudaMemcpy((void*)host_ResVect, (void*)device_ResVect,matRowSize*sizeof(double),cudaMemcpyDeviceToHost));
// CPU calculation..and checking error deviation....
CPU_MatVect();
relError(cpu_ResVect,host_ResVect,size);
printf("\n ----------------------------------------------------------------------\n");
/*free the memory from GPU */
double *array[3];
array[0]=device_Mat;
array[1]=device_Vect;
array[2]=device_ResVect;
dfree(array,3);
//free host memory----------
free(host_Mat);
free(host_Vect);
free(host_ResVect);
free(cpu_ResVect);
return 0;
}// end of main
|
/*****************************************************************************
C-DAC Tech Workshop : hyPACK-2013
October 15-18, 2013
Example : cuda-matrix-vector-multiplication.cu
Objective : Write CUDA program to compute Matrix-Vector multiplication.
Input : None
Output : Execution time in seconds , Gflops achieved
Created : August-2013
E-mail : [email protected]
****************************************************************************/
#include<stdio.h>
#include<hip/hip_runtime.h>
#define BLOCKSIZE 16
#define SIZE 1024
#define EPS 1.0e-15
hipDeviceProp_t deviceProp;
double *host_Mat,*host_Vect,*host_ResVect,*cpu_ResVect;
double *device_Mat,*device_Vect,*device_ResVect;
int vlength ,matRowSize , matColSize;
int device_Count;
int size = SIZE;
/*mem error*/
void mem_error(char *arrayname, char *benchmark, int len, char *type)
{
printf("\nMemory not sufficient to allocate for array %s\n\tBenchmark : %s \n\tMemory requested = %d number of %s elements\n",arrayname, benchmark, len, type);
exit(-1);
}
/*calculate Gflops*/
double calculate_gflops(float &Tsec)
{
float gflops=(1.0e-9 * (( 2.0 * size*size )/Tsec));
return gflops;
}
/*sequential function for mat vect multiplication*/
void CPU_MatVect()
{
cpu_ResVect = (double *)malloc(matRowSize*sizeof(double));
if(cpu_ResVect==NULL)
mem_error("cpu_ResVect","vectmatmul",size,"double");
int i,j;
for(i=0;i<matRowSize;i++)
{cpu_ResVect[i]=0;
for(j=0;j<matColSize;j++)
cpu_ResVect[i]+=host_Mat[i*vlength+j]*host_Vect[j];
}
}
/*Check for safe return of all calls to the device */
void CUDA_SAFE_CALL(hipError_t call)
{
hipError_t ret = call;
//printf("RETURN FROM THE CUDA CALL:%d\t:",ret);
switch(ret)
{
case hipSuccess:
// printf("Success\n");
break;
/* case cudaErrorInvalidValue:
{
printf("ERROR: InvalidValue:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidDevicePointer:
{
printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__);
exit(-1);
break;
}
case cudaErrorInvalidMemcpyDirection:
{
printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__);
exit(-1);
break;
} */
default:
{
printf(" ERROR at line :%i.%d' ' %s\n",__LINE__,ret,hipGetErrorString(ret));
exit(-1);
break;
}
}
}
/*free memory*/
void dfree(double * arr[],int len)
{
for(int i=0;i<len;i++)
CUDA_SAFE_CALL(hipFree(arr[i]));
printf("mem freed\n");
}
/* function to calculate relative error*/
void relError(double* dRes,double* hRes,int size)
{
double relativeError=0.0,errorNorm=0.0;
int flag=0;
int i;
for( i = 0; i < size; ++i) {
if (fabs(hRes[i]) > fabs(dRes[i]))
relativeError = fabs((hRes[i] - dRes[i]) / hRes[i]);
else
relativeError = fabs((dRes[i] - hRes[i]) / dRes[i]);
if (relativeError > EPS && relativeError != 0.0e+00 )
{
if(errorNorm < relativeError)
{
errorNorm = relativeError;
flag=1;
}
}
}
if( flag == 1)
{
printf(" \n Results verfication : Failed");
printf(" \n Considered machine precision : %e", EPS);
printf(" \n Relative Error : %e\n", errorNorm);
}
else
printf("\n Results verfication : Success\n");
}
/*prints the result in screen*/
void print_on_screen(char * program_name,float tsec,double gflops,int size,int flag)//flag=1 if gflops has been calculated else flag =0
{
printf("\n---------------%s----------------\n",program_name);
printf("\tSIZE\t TIME_SEC\t Gflops\n");
if(flag==1)
printf("\t%d\t%f\t%lf\t",size,tsec,gflops);
else
printf("\t%d\t%lf\t%lf\t",size,"---","---");
}
/*funtion to check blocks per grid and threads per block*/
void check_block_grid_dim(hipDeviceProp_t devProp,dim3 blockDim,dim3 gridDim)
{
if( blockDim.x >= devProp.maxThreadsDim[0] || blockDim.y >= devProp.maxThreadsDim[1] || blockDim.z >= devProp.maxThreadsDim[2] )
{
printf("\nBlock Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxThreadsDim[0],devProp.maxThreadsDim[1],devProp.maxThreadsDim[2]);
exit(-1);
}
if( gridDim.x >= devProp.maxGridSize[0] || gridDim.y >= devProp.maxGridSize[1] || gridDim.z >= devProp.maxGridSize[2] )
{
printf("\nGrid Dimensions exceed the maximum limits:%d * %d * %d \n",devProp.maxGridSize[0],devProp.maxGridSize[1],devProp.maxGridSize[2]);
exit(-1);
}
}
/*Get the number of GPU devices present on the host */
int get_DeviceCount()
{
int count;
hipGetDeviceCount(&count);
return count;
}
/*Fill in the vector with double precision values */
void fill_dp_vector(double* vec,int size)
{
int ind;
for(ind=0;ind<size;ind++)
vec[ind]=drand48();
}
/////////////////////////////////////////////////////////////////////////////////////////
//
// MatVect : this kernel will perform actual MatrixVector Multiplication
//
/////////////////////////////////////////////////////////////////////////////////////////
__global__ void MatVectMultiplication(double *device_Mat, double *device_Vect,int matRowSize, int vlength,double *device_ResVect)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
int tindex=tidx+gridDim.x*BLOCKSIZE*tidy;
if(tindex<matRowSize)
{
int i;int m=tindex*vlength;
device_ResVect[tindex]=0.00;
for(i=0;i<vlength;i++)
device_ResVect[tindex]+=device_Mat[m+i]*device_Vect[i];
}
__syncthreads();
}//end of MatVect device function
/*function to launch kernel*/
void launch_Kernel_MatVectMul()
{
/* threads_per_block, blocks_per_grid */
int max=BLOCKSIZE*BLOCKSIZE;
int BlocksPerGrid=matRowSize/max+1;
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
if(matRowSize%max==0)BlocksPerGrid--;
dim3 dimGrid(1,BlocksPerGrid);
check_block_grid_dim(deviceProp,dimBlock,dimGrid);
MatVectMultiplication<<<dimGrid,dimBlock>>>(device_Mat,device_Vect,matRowSize,vlength,device_ResVect);
}
/*main function*/
int main()
{
// Vector length , Matrix Row and Col sizes..............
vlength = matColSize = SIZE;
matRowSize = SIZE;
// printf("this programs does computation of square matrix only\n");
float elapsedTime,Tsec;
hipEvent_t start,stop;
device_Count=get_DeviceCount();
printf("\n\nNUmber of Devices : %d\n\n", device_Count);
// Device Selection, Device 1: Tesla C1060
hipSetDevice(0);
int device;
// Current Device Detection
hipGetDevice(&device);
hipGetDeviceProperties(&deviceProp,device);
printf("Using device %d: %s \n", device, deviceProp.name);
/*allocating the memory for each matrix */
host_Mat =new double[matRowSize*matColSize];
host_Vect = new double[vlength];
host_ResVect = new double[matRowSize];
// ---------------checking host memory for error..............................
if(host_Mat==NULL)
mem_error("host_Mat","vectmatmul",matRowSize*matColSize,"double");
if(host_Vect==NULL)
mem_error("host_Vect","vectmatmul",vlength,"double");
if(host_ResVect==NULL)
mem_error("host_ResVect","vectmatmul",matRowSize,"double");
//--------------Initializing the input arrays..............
fill_dp_vector(host_Mat,matRowSize*matColSize);
fill_dp_vector(host_Vect,vlength);
/* allocate memory for GPU events
start = (cudaEvent_t) malloc (sizeof(cudaEvent_t));
stop = (cudaEvent_t) malloc (sizeof(cudaEvent_t));
if(start==NULL)
mem_error("start","vectvectmul",1,"cudaEvent_t");
if(stop==NULL)
mem_error("stop","vectvectmul",1,"cudaEvent_t");*/
//event creation...
CUDA_SAFE_CALL(hipEventCreate (&start));
CUDA_SAFE_CALL(hipEventCreate (&stop));
//allocating memory on GPU
CUDA_SAFE_CALL(hipMalloc( (void**)&device_Mat, matRowSize*matColSize* sizeof(double)));
CUDA_SAFE_CALL(hipMalloc( (void**)&device_Vect, vlength* sizeof(double)));
CUDA_SAFE_CALL(hipMalloc( (void**)&device_ResVect, matRowSize* sizeof(double)));
//moving data from CPU to GPU
CUDA_SAFE_CALL(hipMemcpy((void*)device_Mat, (void*)host_Mat, matRowSize*matColSize*sizeof(double) ,hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy((void*)device_Vect, (void*)host_Vect,vlength*sizeof(double),hipMemcpyHostToDevice));
// Launching kernell..........
CUDA_SAFE_CALL(hipEventRecord (start, 0));
launch_Kernel_MatVectMul();
CUDA_SAFE_CALL(hipEventRecord (stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize (stop));
CUDA_SAFE_CALL(hipEventElapsedTime ( &elapsedTime, start, stop));
Tsec= 1.0e-3*elapsedTime;
// calling funtion for measuring Gflops
calculate_gflops(Tsec);
//printing the result on screen
print_on_screen("MAT VECT MULTIPLICATION",Tsec,calculate_gflops(Tsec),size,1);
//retriving result from device
CUDA_SAFE_CALL(hipMemcpy((void*)host_ResVect, (void*)device_ResVect,matRowSize*sizeof(double),hipMemcpyDeviceToHost));
// CPU calculation..and checking error deviation....
CPU_MatVect();
relError(cpu_ResVect,host_ResVect,size);
printf("\n ----------------------------------------------------------------------\n");
/*free the memory from GPU */
double *array[3];
array[0]=device_Mat;
array[1]=device_Vect;
array[2]=device_ResVect;
dfree(array,3);
//free host memory----------
free(host_Mat);
free(host_Vect);
free(host_ResVect);
free(cpu_ResVect);
return 0;
}// end of main
|
Convert the following CUDA code to AMD GPU code:
cuda
/* LA-CC-16080
Copyright © 2016 Priscilla Kelly and Los Alamos National Laboratory. All Rights Reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY Priscilla Kelly and Los Alamos National Laboratory "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Priscilla Kelly <[email protected]>
*/
#include "stdio.h"
#include "stdlib.h"
#define maxThread 32
/*******************************************/
/* Cuda kernel to apply the rules of Life */
/*******************************************/
__global__ void applyRules(int row,int col,int *update, int *hold) {
int threadMax = blockDim.x;
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int linID;
if (blockID == 1) {
linID = threadID;
} else {
linID = blockID*threadMax+threadID;
}
int elements = (row-2)*(col-2);
int i = linID%(col-2);
int j = linID/(row-2);
int loc = col + i*col + j + 1;
if (linID < elements) {
int liveCells = 0;
int n, s, e, w, nw, ne, sw, se; // location in halo
n = loc-col;
nw = n-1;
ne = n+1;
w = loc-1;
e = loc+1;
s = loc+col;
sw = s-1;
se = s+1;
liveCells = hold[nw] + hold[n] + hold[ne]
+ hold[w] + hold[e]
+ hold[sw] + hold[s] + hold[se];
// Apply Rules
if (hold[loc] == 0) {
if (liveCells == 3) {
update[loc] = 1; // reproduction
} else {
update[loc] = 0; // remain dead
}
} else {
if (liveCells < 2){
update[loc] = 0; // under population
} else {
if (liveCells < 4) {
update[loc] = 1; // survivor
} else {
update[loc] = 0; // over population
}
}
}
}
}
/*******************************************/
/* Cuda kernel to upload N/S halo elements */
/*******************************************/
__global__ void add_NS_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add North portion
if (b == 0) {
subMat[t] = haloMat[t];
}
// add South portion
if (b == 1) {
int subLoc = col*(row-1)+t;
int haloLoc = (row+col)+t;
subMat[subLoc] = haloMat[haloLoc];
}
}
/*******************************************/
/* Cuda kernel to upload E/W halo elements */
/*******************************************/
__global__ void add_EW_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add East portion
if (b == 0) {
int subLoc = col-1+(col)*t;
int haloLoc = col+t;
subMat[subLoc] = haloMat[haloLoc];
}
// add the West portion
if (b == 1) {
int subLoc = (col)*t;
int haloLoc = 2*row+col+t;
subMat[subLoc] = haloMat[haloLoc];
}
}
/*******************************************/
/* Cuda kernel to get N/S halo elements */
/*******************************************/
__global__ void get_NS_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add North portion
if (b == 0) {
haloMat[t]=subMat[t+col];
}
// add South portion
if (b == 1) {
int subLoc = col*(row-2)+t;
int haloLoc = (row+col)+t;
haloMat[haloLoc]=subMat[subLoc];
}
}
/*******************************************/
/* Cuda kernel to get E/W halo elements */
/*******************************************/
__global__ void get_EW_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add East portion
if (b == 0) {
int subLoc = (col-2)+col*t;
int haloLoc = col+t;
haloMat[haloLoc]=subMat[subLoc];
}
// add the West portion
if (b == 1) {
int subLoc = 1+col*t;
int haloLoc = col+2*row+t;
haloMat[haloLoc]=subMat[subLoc] ;
}
}
/***************************************/
/* External c subroutine for CUDA */
/***************************************/
extern "C" void call_cuda_applyRules(int flag,int rows, int cols,int *halo, int *halo_dev, int *update, int *hold) {
/**************************************************/
/* Get the values to exchange over MPI */
/**************************************************/
if (flag == 0) {
int haloSize = sizeof(int)*2*(rows+cols);
cudaError_t err = cudaSuccess;
// Add the North and South rows to hold
get_NS_Halo<<<2, cols>>>(rows,cols,halo_dev,hold);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// Add the East and West columns to hold
get_EW_Halo<<<2, rows>>>(rows,cols,halo_dev,hold);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
err = cudaMemcpy(halo,halo_dev,haloSize,cudaMemcpyDeviceToHost);
return;
}
/*****************************************************/
/* Update hold with halo, then apply rules to update */
/*****************************************************/
if (flag == 1) {
int haloSize = sizeof(int)*2*(rows+cols);
cudaError_t err = cudaSuccess;
// Copy updated halo to GPU
err = cudaMemcpy(halo_dev,halo,haloSize,cudaMemcpyHostToDevice);
// Add the North and South rows to hold
add_NS_Halo<<<2, cols>>>(rows,cols,halo_dev,hold);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// Add the East and West columns to hold
add_EW_Halo<<<2, rows>>>(rows,cols,halo_dev,hold);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
// Apply the Rules
int N = (rows-2)*(cols-2);
int threadCnt;
int blockCnt;
//threads per block
threadCnt = maxThread;
//blocks
int check = N/maxThread;
if (check == 0) {
blockCnt = 1;
} else {
blockCnt = check + 1;
}
applyRules<<<blockCnt, threadCnt>>>(rows,cols,update,hold);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy halo to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
return;
}
}
|
/* LA-CC-16080
Copyright © 2016 Priscilla Kelly and Los Alamos National Laboratory. All Rights Reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY Priscilla Kelly and Los Alamos National Laboratory "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Priscilla Kelly <[email protected]>
*/
#include <hip/hip_runtime.h>
#include "stdio.h"
#include "stdlib.h"
#define maxThread 32
/*******************************************/
/* Cuda kernel to apply the rules of Life */
/*******************************************/
__global__ void applyRules(int row,int col,int *update, int *hold) {
int threadMax = blockDim.x;
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int linID;
if (blockID == 1) {
linID = threadID;
} else {
linID = blockID*threadMax+threadID;
}
int elements = (row-2)*(col-2);
int i = linID%(col-2);
int j = linID/(row-2);
int loc = col + i*col + j + 1;
if (linID < elements) {
int liveCells = 0;
int n, s, e, w, nw, ne, sw, se; // location in halo
n = loc-col;
nw = n-1;
ne = n+1;
w = loc-1;
e = loc+1;
s = loc+col;
sw = s-1;
se = s+1;
liveCells = hold[nw] + hold[n] + hold[ne]
+ hold[w] + hold[e]
+ hold[sw] + hold[s] + hold[se];
// Apply Rules
if (hold[loc] == 0) {
if (liveCells == 3) {
update[loc] = 1; // reproduction
} else {
update[loc] = 0; // remain dead
}
} else {
if (liveCells < 2){
update[loc] = 0; // under population
} else {
if (liveCells < 4) {
update[loc] = 1; // survivor
} else {
update[loc] = 0; // over population
}
}
}
}
}
/*******************************************/
/* Cuda kernel to upload N/S halo elements */
/*******************************************/
__global__ void add_NS_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add North portion
if (b == 0) {
subMat[t] = haloMat[t];
}
// add South portion
if (b == 1) {
int subLoc = col*(row-1)+t;
int haloLoc = (row+col)+t;
subMat[subLoc] = haloMat[haloLoc];
}
}
/*******************************************/
/* Cuda kernel to upload E/W halo elements */
/*******************************************/
__global__ void add_EW_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add East portion
if (b == 0) {
int subLoc = col-1+(col)*t;
int haloLoc = col+t;
subMat[subLoc] = haloMat[haloLoc];
}
// add the West portion
if (b == 1) {
int subLoc = (col)*t;
int haloLoc = 2*row+col+t;
subMat[subLoc] = haloMat[haloLoc];
}
}
/*******************************************/
/* Cuda kernel to get N/S halo elements */
/*******************************************/
__global__ void get_NS_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add North portion
if (b == 0) {
haloMat[t]=subMat[t+col];
}
// add South portion
if (b == 1) {
int subLoc = col*(row-2)+t;
int haloLoc = (row+col)+t;
haloMat[haloLoc]=subMat[subLoc];
}
}
/*******************************************/
/* Cuda kernel to get E/W halo elements */
/*******************************************/
__global__ void get_EW_Halo(int row,int col,int *haloMat,int *subMat) {
int b = blockIdx.x;
int t = threadIdx.x;
// add East portion
if (b == 0) {
int subLoc = (col-2)+col*t;
int haloLoc = col+t;
haloMat[haloLoc]=subMat[subLoc];
}
// add the West portion
if (b == 1) {
int subLoc = 1+col*t;
int haloLoc = col+2*row+t;
haloMat[haloLoc]=subMat[subLoc] ;
}
}
/***************************************/
/* External c subroutine for CUDA */
/***************************************/
extern "C" void call_cuda_applyRules(int flag,int rows, int cols,int *halo, int *halo_dev, int *update, int *hold) {
/**************************************************/
/* Get the values to exchange over MPI */
/**************************************************/
if (flag == 0) {
int haloSize = sizeof(int)*2*(rows+cols);
hipError_t err = hipSuccess;
// Add the North and South rows to hold
get_NS_Halo<<<2, cols>>>(rows,cols,halo_dev,hold);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Add the East and West columns to hold
get_EW_Halo<<<2, rows>>>(rows,cols,halo_dev,hold);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
err = hipMemcpy(halo,halo_dev,haloSize,hipMemcpyDeviceToHost);
return;
}
/*****************************************************/
/* Update hold with halo, then apply rules to update */
/*****************************************************/
if (flag == 1) {
int haloSize = sizeof(int)*2*(rows+cols);
hipError_t err = hipSuccess;
// Copy updated halo to GPU
err = hipMemcpy(halo_dev,halo,haloSize,hipMemcpyHostToDevice);
// Add the North and South rows to hold
add_NS_Halo<<<2, cols>>>(rows,cols,halo_dev,hold);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Add the East and West columns to hold
add_EW_Halo<<<2, rows>>>(rows,cols,halo_dev,hold);
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
// Apply the Rules
int N = (rows-2)*(cols-2);
int threadCnt;
int blockCnt;
//threads per block
threadCnt = maxThread;
//blocks
int check = N/maxThread;
if (check == 0) {
blockCnt = 1;
} else {
blockCnt = check + 1;
}
applyRules<<<blockCnt, threadCnt>>>(rows,cols,update,hold);
if (err != hipSuccess) {
fprintf(stderr, "Failed to copy halo to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
return;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define NUM_NODES 1024
// Declaration of a structure
typedef struct {
int startIndex; // starting index in Adj list
int numberOfNeighbors; // number of neighbors of each vertices
} Node;
__global__ void bfs_optimized(Node *gpu_vertex, int *gpu_neighbors, bool *gpu_frontier, bool *gpu_visited, int *gpu_cost, bool *gpu_done) {
// ThreadID
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
// boundary condition for threadID
if (threadId > NUM_NODES)
*gpu_done = false;
// checking condition for frontier and visited node array
if (gpu_frontier[threadId] == true && gpu_visited[threadId] == false) {
// Init
gpu_frontier[threadId] = false;
gpu_visited[threadId] = true;
// assign values from array
int startPoint = gpu_vertex[threadId].startIndex;
int endPoint = startPoint + gpu_vertex[threadId].numberOfNeighbors;
// traverse to the neighbors for every vertex
for (int i = startPoint; i < endPoint; i++) {
int neighbor = gpu_neighbors[i];
// check visited mark and increase cost
if (gpu_visited[neighbor] == false) {
gpu_cost[neighbor] = gpu_cost[threadId] + 1;
gpu_frontier[neighbor] = true;
*gpu_done = false;
}
}
}
}
// Main method
int main(int argc, char* argv[]) {
// Kernel launch parameters
int numberOfThreads = 1024;
int numberOfBlocks = NUM_NODES/numberOfThreads;
// Intialization of struct and neighbors array
Node vertex[NUM_NODES];
int edges[NUM_NODES];
// populate the graph
for(int i=0;i<NUM_NODES;i++) {
vertex[i].numberOfNeighbors = 1;//(rand() % 5)+1;
}
vertex[0].startIndex = 0;
for(int j=1;j<NUM_NODES;j++) {
vertex[j].startIndex = vertex[j-1].startIndex + vertex[j-1].numberOfNeighbors;
}
for(int k=0;k<NUM_NODES;k++) {
edges[k] = k+1;
}
cudaSetDevice(0);
// Time variable
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Intitalization of array for frontier and visited nodes and costpath
bool frontierArray[NUM_NODES] = { false };
bool visitedNodes[NUM_NODES] = { false };
int costOfPath[NUM_NODES] = { 0 };
int source = 0;
frontierArray[source] = true;
// GPU variable declaration
Node* gpu_vertex;
int* gpu_neighbors;
bool* gpu_frontier;
bool* gpu_visited;
int* gpu_cost;
bool* gpu_done;
// GPU memory allocation
cudaMalloc((void**)&gpu_vertex, sizeof(Node)*NUM_NODES);
cudaMalloc((void**)&gpu_neighbors, sizeof(Node)*NUM_NODES);
cudaMalloc((void**)&gpu_frontier, sizeof(bool)*NUM_NODES);
cudaMalloc((void**)&gpu_visited, sizeof(bool)*NUM_NODES);
cudaMalloc((void**)&gpu_cost, sizeof(int)*NUM_NODES);
cudaMalloc((void**)&gpu_done, sizeof(bool));
// Transfer of data from CPU to GPU
cudaMemcpy(gpu_vertex, vertex, sizeof(Node)*NUM_NODES, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_neighbors, edges, sizeof(Node)*NUM_NODES, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_frontier, frontierArray, sizeof(bool)*NUM_NODES, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_visited, visitedNodes, sizeof(bool)*NUM_NODES, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_cost, costOfPath, sizeof(int)*NUM_NODES, cudaMemcpyHostToDevice);
bool cpu_done;
cudaEventRecord(start, 0);
int Kernel_call_count = 0;
do {
Kernel_call_count++;
cpu_done = true;
cudaMemcpy(gpu_done, &cpu_done, sizeof(bool), cudaMemcpyHostToDevice);
// Kernel call
bfs_optimized<<<numberOfBlocks, numberOfThreads>>>(gpu_vertex, gpu_neighbors, gpu_frontier, gpu_visited, gpu_cost, gpu_done);
cudaMemcpy(&cpu_done, gpu_done , sizeof(bool), cudaMemcpyDeviceToHost);
} while (!cpu_done);
// Copy final results from GPU to CPU
cudaMemcpy(costOfPath, gpu_cost, sizeof(int)*NUM_NODES, cudaMemcpyDeviceToHost);
printf("Kernel call count: %d\n", Kernel_call_count);
cudaEventRecord(stop, 0);
cudaEventElapsedTime(&time, start, stop);
printf("Parallel Job execution time: %.2f ms\n", time);
cudaFree(gpu_vertex);
cudaFree(gpu_neighbors);
cudaFree(gpu_frontier);
cudaFree(gpu_visited);
cudaFree(gpu_cost);
cudaFree(gpu_done);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define NUM_NODES 1024
// Declaration of a structure
typedef struct {
int startIndex; // starting index in Adj list
int numberOfNeighbors; // number of neighbors of each vertices
} Node;
__global__ void bfs_optimized(Node *gpu_vertex, int *gpu_neighbors, bool *gpu_frontier, bool *gpu_visited, int *gpu_cost, bool *gpu_done) {
// ThreadID
int threadId = threadIdx.x + blockIdx.x * blockDim.x;
// boundary condition for threadID
if (threadId > NUM_NODES)
*gpu_done = false;
// checking condition for frontier and visited node array
if (gpu_frontier[threadId] == true && gpu_visited[threadId] == false) {
// Init
gpu_frontier[threadId] = false;
gpu_visited[threadId] = true;
// assign values from array
int startPoint = gpu_vertex[threadId].startIndex;
int endPoint = startPoint + gpu_vertex[threadId].numberOfNeighbors;
// traverse to the neighbors for every vertex
for (int i = startPoint; i < endPoint; i++) {
int neighbor = gpu_neighbors[i];
// check visited mark and increase cost
if (gpu_visited[neighbor] == false) {
gpu_cost[neighbor] = gpu_cost[threadId] + 1;
gpu_frontier[neighbor] = true;
*gpu_done = false;
}
}
}
}
// Main method
int main(int argc, char* argv[]) {
// Kernel launch parameters
int numberOfThreads = 1024;
int numberOfBlocks = NUM_NODES/numberOfThreads;
// Intialization of struct and neighbors array
Node vertex[NUM_NODES];
int edges[NUM_NODES];
// populate the graph
for(int i=0;i<NUM_NODES;i++) {
vertex[i].numberOfNeighbors = 1;//(rand() % 5)+1;
}
vertex[0].startIndex = 0;
for(int j=1;j<NUM_NODES;j++) {
vertex[j].startIndex = vertex[j-1].startIndex + vertex[j-1].numberOfNeighbors;
}
for(int k=0;k<NUM_NODES;k++) {
edges[k] = k+1;
}
hipSetDevice(0);
// Time variable
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
// Intitalization of array for frontier and visited nodes and costpath
bool frontierArray[NUM_NODES] = { false };
bool visitedNodes[NUM_NODES] = { false };
int costOfPath[NUM_NODES] = { 0 };
int source = 0;
frontierArray[source] = true;
// GPU variable declaration
Node* gpu_vertex;
int* gpu_neighbors;
bool* gpu_frontier;
bool* gpu_visited;
int* gpu_cost;
bool* gpu_done;
// GPU memory allocation
hipMalloc((void**)&gpu_vertex, sizeof(Node)*NUM_NODES);
hipMalloc((void**)&gpu_neighbors, sizeof(Node)*NUM_NODES);
hipMalloc((void**)&gpu_frontier, sizeof(bool)*NUM_NODES);
hipMalloc((void**)&gpu_visited, sizeof(bool)*NUM_NODES);
hipMalloc((void**)&gpu_cost, sizeof(int)*NUM_NODES);
hipMalloc((void**)&gpu_done, sizeof(bool));
// Transfer of data from CPU to GPU
hipMemcpy(gpu_vertex, vertex, sizeof(Node)*NUM_NODES, hipMemcpyHostToDevice);
hipMemcpy(gpu_neighbors, edges, sizeof(Node)*NUM_NODES, hipMemcpyHostToDevice);
hipMemcpy(gpu_frontier, frontierArray, sizeof(bool)*NUM_NODES, hipMemcpyHostToDevice);
hipMemcpy(gpu_visited, visitedNodes, sizeof(bool)*NUM_NODES, hipMemcpyHostToDevice);
hipMemcpy(gpu_cost, costOfPath, sizeof(int)*NUM_NODES, hipMemcpyHostToDevice);
bool cpu_done;
hipEventRecord(start, 0);
int Kernel_call_count = 0;
do {
Kernel_call_count++;
cpu_done = true;
hipMemcpy(gpu_done, &cpu_done, sizeof(bool), hipMemcpyHostToDevice);
// Kernel call
bfs_optimized<<<numberOfBlocks, numberOfThreads>>>(gpu_vertex, gpu_neighbors, gpu_frontier, gpu_visited, gpu_cost, gpu_done);
hipMemcpy(&cpu_done, gpu_done , sizeof(bool), hipMemcpyDeviceToHost);
} while (!cpu_done);
// Copy final results from GPU to CPU
hipMemcpy(costOfPath, gpu_cost, sizeof(int)*NUM_NODES, hipMemcpyDeviceToHost);
printf("Kernel call count: %d\n", Kernel_call_count);
hipEventRecord(stop, 0);
hipEventElapsedTime(&time, start, stop);
printf("Parallel Job execution time: %.2f ms\n", time);
hipFree(gpu_vertex);
hipFree(gpu_neighbors);
hipFree(gpu_frontier);
hipFree(gpu_visited);
hipFree(gpu_cost);
hipFree(gpu_done);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
__global__ void leakyrelu_kernel(float* output,
float* input,
float slope,
int dim_xw,
int dim_xh){
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float input_s[1024];
// if(tid < 1024){
if(idx < dim_xw *dim_xh){
input_s[tid] = input[idx];
}
if(tid < 1024){
input_s[tid] = fmaxf(0,input_s[tid]) + slope * fminf(0,input_s[tid]);
}
if(idx < dim_xw *dim_xh){
output[idx] = input_s[tid];
}
}
void launch_leakyrelu(float* output_y,
float *input_X,
float slope,
int dim_xw,
int dim_xh
){
int num_element = dim_xw * dim_xh;
dim3 gridSize((num_element+1023)/1024);
dim3 blockSize(1024);
leakyrelu_kernel<<<gridSize, blockSize>>>(output_y, \
input_X, \
slope, \
dim_xw,\
dim_xh);
}
|
#include <hip/hip_runtime.h>
__global__ void leakyrelu_kernel(float* output,
float* input,
float slope,
int dim_xw,
int dim_xh){
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float input_s[1024];
// if(tid < 1024){
if(idx < dim_xw *dim_xh){
input_s[tid] = input[idx];
}
if(tid < 1024){
input_s[tid] = fmaxf(0,input_s[tid]) + slope * fminf(0,input_s[tid]);
}
if(idx < dim_xw *dim_xh){
output[idx] = input_s[tid];
}
}
void launch_leakyrelu(float* output_y,
float *input_X,
float slope,
int dim_xw,
int dim_xh
){
int num_element = dim_xw * dim_xh;
dim3 gridSize((num_element+1023)/1024);
dim3 blockSize(1024);
leakyrelu_kernel<<<gridSize, blockSize>>>(output_y, \
input_X, \
slope, \
dim_xw,\
dim_xh);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
// A basic macro used to checking cuda errors.
// @param ans - the most recent enumerated cuda error to check.
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include <cfloat> // for FLT_MAX
#include <iostream>
#include <ctime> // for clock()
#include <cmath>
#include <algorithm>
#include <iterator>
struct instance
{
int n;
int *distance;
int *flow;
int *best_individual;
int best_result;
};
// Basic function for exiting code on CUDA errors.
// Does no special error handling, just exits the program if it finds any errors and gives an error message.
inline void gpuAssert(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
__device__ int *relativePositionIndexing(const float *vec, int n) {
int i, j;
int *vecRPI = (int*)malloc(sizeof(int)*n);
for(i=0;i<n;i++) {
vecRPI[i] = n;
for(j=0;j<n;j++) {
// vecRPI[i] gets n minus the amount of indexes greater than vec[i]
if(i!=j&&vec[j]>vec[i]) {
vecRPI[i]--;
}
}
}
// Remove dupes
for(i=0;i<n;i++) for(j=0;j<n;j++) if(i!=j&&vecRPI[j]==vecRPI[i]) vecRPI[j]--;
return vecRPI;
}
__device__ void relativePositionIndexingP(const float *vec, int n, int *vecRPI) {
int i, j;
for(i=0;i<n;i++) {
vecRPI[i] = n;
for(j=0;j<n;j++) {
// vecRPI[i] gets n minus the amount of indexes greater than vec[i]
if(i!=j&&vec[j]>vec[i]) {
vecRPI[i]--;
}
}
}
// Remove dupes
for(i=0;i<n;i++) for(j=0;j<n;j++) if(i!=j&&vecRPI[j]==vecRPI[i]) vecRPI[j]--;
}
__global__ void costFunctionP(const int *vecRPI, const struct instance *inst, unsigned long long int *costCalls, int *sum, int popSize) {
int idx = threadIdx.x;
if (idx >= popSize) return;
int i = blockIdx.x/inst->n;
int j = blockIdx.x%inst->n;
if(i==0 && j==0)
atomicAdd(costCalls, 1); // costCalls here should only be added once per execution
atomicAdd(&sum[idx], inst->flow[(vecRPI[idx*inst->n + i]-1)*inst->n + (vecRPI[idx*inst->n + j]-1)] * inst->distance[i*inst->n + j]);
}
__device__ int costFunction(const float *vec, const struct instance *inst, unsigned long long int *costCalls) {
int i, j, sum=0;
atomicAdd(costCalls, 1); // costCalls refers to how many times the cost function was calculated
int *vecRPI = relativePositionIndexing(vec, inst->n);
for(i=0;i<inst->n;i++) { // Cost function
for(j=0;j<inst->n;j++) {
sum += inst->flow[(vecRPI[i]-1)*inst->n + (vecRPI[j]-1)] * inst->distance[i*inst->n + j];
}
}
free(vecRPI);
return sum;
}
__device__ void swap(float *vec, int i, int j) {
float aux = vec[i];
vec[i] = vec[j];
vec[j] = aux;
}
__device__ void swapReinsert(float *vec, int i, int j) {
float aux = vec[i];
int k;
for(k=i;k<j;k++) { // Indexes inbetween go back a index
vec[k] = vec[k+1];
}
vec[k] = aux; // And vec[i] goes to the index j
}
__device__ void swapReinsertReverse(float *vec, int i, int j) {
float aux = vec[j];
int k;
for(k=j;k>i;k--) {
vec[k] = vec[k-1];
}
vec[k] = aux;
}
__device__ void swap2opt(float *vec, int i, int j) {
int c=(j-i+1)/2; // 2-opt swaps the indexes between i and j, so the number of swaps is half the distance
for(int k=0;k<c;k++)
swap(vec,i++,j--);
}
__device__ int localSearch(float *vec, const struct instance *inst, unsigned long long int *costCalls) {
int i, j, cost_2;
int cost = costFunction(vec, inst, costCalls);
for(i=0;i<inst->n-1;i++) {
for(j=i+1;j<inst->n;j++) {
swap2opt(vec,i,j);
cost_2 = costFunction(vec, inst, costCalls);
if(cost_2 < cost) {
return cost_2;
}
swap2opt(vec,i,j);
}
}
return cost;
}
void printCudaVector(const float *d_vec, int size)
{
std::cout << "\nsize: " << size << std::endl;
float *h_vec = new float[size];
gpuErrorCheck(cudaMemcpy(h_vec, d_vec, sizeof(float) * size, cudaMemcpyDeviceToHost));
int i;
std::cout << "{";
for (i = 0; i < size-1; i++) {
std::cout << h_vec[i] << ", ";
}
std::cout << h_vec[i] << "}" << std::endl;
delete[] h_vec;
}
__global__ void generatePopulation(float *d_x, float d_min, float d_max,
int *costs, const struct instance *inst, curandState_t *randStates,
int popSize, unsigned long seed, unsigned long long int *costCalls)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= popSize) return;
curandState_t *state = &randStates[idx];
curand_init(seed, idx, 0, state);
for (int i = 0; i < inst->n; i++) {
d_x[(idx*inst->n) + i] = (curand_uniform(state) * (d_max - d_min)) + d_min;
}
costs[idx] = costFunction(&d_x[idx*inst->n], inst, costCalls);
}
__global__ void evolutionKernelP(float *d_pop, float *d_trial, int *costs, float *d_nextPop, curandState_t *randStates,
int popSize, int CR, float F, const struct instance *inst, int *vecrpi, int *sum)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= popSize) return;
curandState_t *state = &randStates[idx];
sum[idx] = 0;
int a, b, c, j;
// Indexes for mutation
do { a = curand(state) % popSize; } while (a == idx);
do { b = curand(state) % popSize; } while (b == idx || b == a);
do { c = curand(state) % popSize; } while (c == idx || c == a || c == b);
j = curand(state) % inst->n;
for (int i = 1; i <= inst->n; i++) {
if ((curand(state) % 1000) < CR || j==i) { // If crossover is satisfied, it mutates the current index
d_trial[(idx*inst->n)+i] = d_pop[(a*inst->n)+i] + (F * (d_pop[(b*inst->n)+i] - d_pop[(c*inst->n)+i]));
} else { // If there's no crossover for this index
d_trial[(idx*inst->n)+i] = d_pop[(idx*inst->n)+i];
}
}
relativePositionIndexingP(&d_trial[idx*inst->n], inst->n, &vecrpi[idx*inst->n]);
}
__global__ void selectionP(float *d_pop, float *d_trial, int *costs, float *d_nextPop, int dim, int popSize, int *score) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= popSize) return;
int j;
if (score[idx] < costs[idx]) { // Update the next generation with the trial vector
for (j = 0; j < dim; j++) {
d_nextPop[(idx*dim) + j] = d_trial[(idx*dim) + j];
}
costs[idx] = score[idx];
} else { // Keep the individual for the next generation
for (j = 0; j < dim; j++) {
d_nextPop[(idx*dim) + j] = d_pop[(idx*dim) + j];
}
}
}
__global__ void evolutionKernel(float *d_pop, float *d_trial, int *costs, float *d_nextPop, curandState_t *randStates,
int popSize, int CR, float F, const struct instance *inst, unsigned long long int *costCalls) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= popSize) return;
curandState_t *state = &randStates[idx];
int a, b, c, j;
// Indexes for mutation
do { a = curand(state) % popSize; } while (a == idx);
do { b = curand(state) % popSize; } while (b == idx || b == a);
do { c = curand(state) % popSize; } while (c == idx || c == a || c == b);
j = curand(state) % inst->n;
for (int i = 0; i <= inst->n; i++) {
if ((curand(state) % 1000) < CR || j==i) { // If crossover is satisfied, it mutates the current index
d_trial[(idx*inst->n)+i] = d_pop[(a*inst->n)+i] + (F * (d_pop[(b*inst->n)+i] - d_pop[(c*inst->n)+i]));
} else { // If there's no crossover for this index
d_trial[(idx*inst->n)+i] = d_pop[(idx*inst->n)+i];
}
}
int score = costFunction(&d_trial[idx*inst->n], inst, costCalls);
if (score < costs[idx]) { // Update the next generation with the trial vector
for (j = 0; j < inst->n; j++) {
d_nextPop[(idx*inst->n) + j] = d_trial[(idx*inst->n) + j];
}
costs[idx] = score;
} else { // Keep the individual for the next generation
for (j = 0; j < inst->n; j++) {
d_nextPop[(idx*inst->n) + j] = d_pop[(idx*inst->n) + j];
}
}
}
float *differentialEvolution(float d_min, float d_max, int popSize, int maxGenerations, int crossoverRate, float F,
const struct instance *inst, unsigned long long int *costCalls, long int maxCostCalls) {
int CR = crossoverRate*1000;
// Allocation of values
float *d_pop, *d_nextPop, *d_trial;
void *randStates;
int *costs;
cudaMallocManaged(&d_pop, sizeof(float) * popSize*inst->n);
cudaMallocManaged(&d_nextPop, sizeof(float) * popSize*inst->n);
cudaMallocManaged(&d_trial, sizeof(float) * popSize*inst->n);
cudaMallocManaged(&randStates, sizeof(curandState_t)*popSize);
cudaMallocManaged(&costs, sizeof(int) * popSize);
// "First of all, your thread block size should always be a multiple of 32,
// because kernels issue instructions in warps (32 threads).
// For example, if you have a block size of 50 threads, the GPU will still
// issue commands to 64 threads and you'd just be wasting them."
// https://stackoverflow.com/questions/4391162/cuda-determining-threads-per-block-blocks-per-grid
int popSize32 = ceil(popSize / 32.0) * 32;
// Generate the population
cudaError_t ret;
generatePopulation<<<1, popSize32>>>(d_pop, d_min, d_max, costs, inst, (curandState_t *)randStates, popSize, clock(), costCalls);
gpuErrorCheck(cudaPeekAtLastError());
ret = cudaDeviceSynchronize();
gpuErrorCheck(ret);
int *vecRPI;
ret = cudaMallocManaged(&vecRPI, sizeof(int) * popSize * inst->n);
gpuErrorCheck(ret);
int *sum;
ret = cudaMallocManaged(&sum, sizeof(int) * popSize);
gpuErrorCheck(ret);
for (int i = 1; i <= maxGenerations && *costCalls <= maxCostCalls; i++) {
// start kernel for this generation
evolutionKernelP<<<1, popSize32>>>(d_pop, d_trial, costs, d_nextPop,
(curandState_t *)randStates, popSize, CR, F, inst, vecRPI, sum);
gpuErrorCheck(cudaPeekAtLastError());
ret = cudaDeviceSynchronize();
gpuErrorCheck(ret);
costFunctionP<<<inst->n*inst->n, popSize32>>>(vecRPI, inst, costCalls, sum, popSize);
gpuErrorCheck(cudaPeekAtLastError());
ret = cudaDeviceSynchronize();
gpuErrorCheck(ret);
selectionP<<<1, popSize32>>>(d_pop, d_trial, costs, d_nextPop, inst->n, popSize, sum);
gpuErrorCheck(cudaPeekAtLastError());
ret = cudaDeviceSynchronize();
gpuErrorCheck(ret);
// Update the population for the next generation
float *tmp = d_pop;
d_pop = d_nextPop;
d_nextPop = tmp;
}
int best_index = std::distance(costs, std::min_element(costs, costs+popSize));
return d_pop+(best_index*inst->n);
}
|
// A basic macro used to checking cuda errors.
// @param ans - the most recent enumerated cuda error to check.
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include <cfloat> // for FLT_MAX
#include <iostream>
#include <ctime> // for clock()
#include <cmath>
#include <algorithm>
#include <iterator>
struct instance
{
int n;
int *distance;
int *flow;
int *best_individual;
int best_result;
};
// Basic function for exiting code on CUDA errors.
// Does no special error handling, just exits the program if it finds any errors and gives an error message.
inline void gpuAssert(hipError_t code, const char *file, int line)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
exit(code);
}
}
__device__ int *relativePositionIndexing(const float *vec, int n) {
int i, j;
int *vecRPI = (int*)malloc(sizeof(int)*n);
for(i=0;i<n;i++) {
vecRPI[i] = n;
for(j=0;j<n;j++) {
// vecRPI[i] gets n minus the amount of indexes greater than vec[i]
if(i!=j&&vec[j]>vec[i]) {
vecRPI[i]--;
}
}
}
// Remove dupes
for(i=0;i<n;i++) for(j=0;j<n;j++) if(i!=j&&vecRPI[j]==vecRPI[i]) vecRPI[j]--;
return vecRPI;
}
__device__ void relativePositionIndexingP(const float *vec, int n, int *vecRPI) {
int i, j;
for(i=0;i<n;i++) {
vecRPI[i] = n;
for(j=0;j<n;j++) {
// vecRPI[i] gets n minus the amount of indexes greater than vec[i]
if(i!=j&&vec[j]>vec[i]) {
vecRPI[i]--;
}
}
}
// Remove dupes
for(i=0;i<n;i++) for(j=0;j<n;j++) if(i!=j&&vecRPI[j]==vecRPI[i]) vecRPI[j]--;
}
__global__ void costFunctionP(const int *vecRPI, const struct instance *inst, unsigned long long int *costCalls, int *sum, int popSize) {
int idx = threadIdx.x;
if (idx >= popSize) return;
int i = blockIdx.x/inst->n;
int j = blockIdx.x%inst->n;
if(i==0 && j==0)
atomicAdd(costCalls, 1); // costCalls here should only be added once per execution
atomicAdd(&sum[idx], inst->flow[(vecRPI[idx*inst->n + i]-1)*inst->n + (vecRPI[idx*inst->n + j]-1)] * inst->distance[i*inst->n + j]);
}
__device__ int costFunction(const float *vec, const struct instance *inst, unsigned long long int *costCalls) {
int i, j, sum=0;
atomicAdd(costCalls, 1); // costCalls refers to how many times the cost function was calculated
int *vecRPI = relativePositionIndexing(vec, inst->n);
for(i=0;i<inst->n;i++) { // Cost function
for(j=0;j<inst->n;j++) {
sum += inst->flow[(vecRPI[i]-1)*inst->n + (vecRPI[j]-1)] * inst->distance[i*inst->n + j];
}
}
free(vecRPI);
return sum;
}
__device__ void swap(float *vec, int i, int j) {
float aux = vec[i];
vec[i] = vec[j];
vec[j] = aux;
}
__device__ void swapReinsert(float *vec, int i, int j) {
float aux = vec[i];
int k;
for(k=i;k<j;k++) { // Indexes inbetween go back a index
vec[k] = vec[k+1];
}
vec[k] = aux; // And vec[i] goes to the index j
}
__device__ void swapReinsertReverse(float *vec, int i, int j) {
float aux = vec[j];
int k;
for(k=j;k>i;k--) {
vec[k] = vec[k-1];
}
vec[k] = aux;
}
__device__ void swap2opt(float *vec, int i, int j) {
int c=(j-i+1)/2; // 2-opt swaps the indexes between i and j, so the number of swaps is half the distance
for(int k=0;k<c;k++)
swap(vec,i++,j--);
}
__device__ int localSearch(float *vec, const struct instance *inst, unsigned long long int *costCalls) {
int i, j, cost_2;
int cost = costFunction(vec, inst, costCalls);
for(i=0;i<inst->n-1;i++) {
for(j=i+1;j<inst->n;j++) {
swap2opt(vec,i,j);
cost_2 = costFunction(vec, inst, costCalls);
if(cost_2 < cost) {
return cost_2;
}
swap2opt(vec,i,j);
}
}
return cost;
}
void printCudaVector(const float *d_vec, int size)
{
std::cout << "\nsize: " << size << std::endl;
float *h_vec = new float[size];
gpuErrorCheck(hipMemcpy(h_vec, d_vec, sizeof(float) * size, hipMemcpyDeviceToHost));
int i;
std::cout << "{";
for (i = 0; i < size-1; i++) {
std::cout << h_vec[i] << ", ";
}
std::cout << h_vec[i] << "}" << std::endl;
delete[] h_vec;
}
__global__ void generatePopulation(float *d_x, float d_min, float d_max,
int *costs, const struct instance *inst, hiprandState_t *randStates,
int popSize, unsigned long seed, unsigned long long int *costCalls)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= popSize) return;
hiprandState_t *state = &randStates[idx];
hiprand_init(seed, idx, 0, state);
for (int i = 0; i < inst->n; i++) {
d_x[(idx*inst->n) + i] = (hiprand_uniform(state) * (d_max - d_min)) + d_min;
}
costs[idx] = costFunction(&d_x[idx*inst->n], inst, costCalls);
}
__global__ void evolutionKernelP(float *d_pop, float *d_trial, int *costs, float *d_nextPop, hiprandState_t *randStates,
int popSize, int CR, float F, const struct instance *inst, int *vecrpi, int *sum)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= popSize) return;
hiprandState_t *state = &randStates[idx];
sum[idx] = 0;
int a, b, c, j;
// Indexes for mutation
do { a = hiprand(state) % popSize; } while (a == idx);
do { b = hiprand(state) % popSize; } while (b == idx || b == a);
do { c = hiprand(state) % popSize; } while (c == idx || c == a || c == b);
j = hiprand(state) % inst->n;
for (int i = 1; i <= inst->n; i++) {
if ((hiprand(state) % 1000) < CR || j==i) { // If crossover is satisfied, it mutates the current index
d_trial[(idx*inst->n)+i] = d_pop[(a*inst->n)+i] + (F * (d_pop[(b*inst->n)+i] - d_pop[(c*inst->n)+i]));
} else { // If there's no crossover for this index
d_trial[(idx*inst->n)+i] = d_pop[(idx*inst->n)+i];
}
}
relativePositionIndexingP(&d_trial[idx*inst->n], inst->n, &vecrpi[idx*inst->n]);
}
__global__ void selectionP(float *d_pop, float *d_trial, int *costs, float *d_nextPop, int dim, int popSize, int *score) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= popSize) return;
int j;
if (score[idx] < costs[idx]) { // Update the next generation with the trial vector
for (j = 0; j < dim; j++) {
d_nextPop[(idx*dim) + j] = d_trial[(idx*dim) + j];
}
costs[idx] = score[idx];
} else { // Keep the individual for the next generation
for (j = 0; j < dim; j++) {
d_nextPop[(idx*dim) + j] = d_pop[(idx*dim) + j];
}
}
}
__global__ void evolutionKernel(float *d_pop, float *d_trial, int *costs, float *d_nextPop, hiprandState_t *randStates,
int popSize, int CR, float F, const struct instance *inst, unsigned long long int *costCalls) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= popSize) return;
hiprandState_t *state = &randStates[idx];
int a, b, c, j;
// Indexes for mutation
do { a = hiprand(state) % popSize; } while (a == idx);
do { b = hiprand(state) % popSize; } while (b == idx || b == a);
do { c = hiprand(state) % popSize; } while (c == idx || c == a || c == b);
j = hiprand(state) % inst->n;
for (int i = 0; i <= inst->n; i++) {
if ((hiprand(state) % 1000) < CR || j==i) { // If crossover is satisfied, it mutates the current index
d_trial[(idx*inst->n)+i] = d_pop[(a*inst->n)+i] + (F * (d_pop[(b*inst->n)+i] - d_pop[(c*inst->n)+i]));
} else { // If there's no crossover for this index
d_trial[(idx*inst->n)+i] = d_pop[(idx*inst->n)+i];
}
}
int score = costFunction(&d_trial[idx*inst->n], inst, costCalls);
if (score < costs[idx]) { // Update the next generation with the trial vector
for (j = 0; j < inst->n; j++) {
d_nextPop[(idx*inst->n) + j] = d_trial[(idx*inst->n) + j];
}
costs[idx] = score;
} else { // Keep the individual for the next generation
for (j = 0; j < inst->n; j++) {
d_nextPop[(idx*inst->n) + j] = d_pop[(idx*inst->n) + j];
}
}
}
float *differentialEvolution(float d_min, float d_max, int popSize, int maxGenerations, int crossoverRate, float F,
const struct instance *inst, unsigned long long int *costCalls, long int maxCostCalls) {
int CR = crossoverRate*1000;
// Allocation of values
float *d_pop, *d_nextPop, *d_trial;
void *randStates;
int *costs;
hipMallocManaged(&d_pop, sizeof(float) * popSize*inst->n);
hipMallocManaged(&d_nextPop, sizeof(float) * popSize*inst->n);
hipMallocManaged(&d_trial, sizeof(float) * popSize*inst->n);
hipMallocManaged(&randStates, sizeof(hiprandState_t)*popSize);
hipMallocManaged(&costs, sizeof(int) * popSize);
// "First of all, your thread block size should always be a multiple of 32,
// because kernels issue instructions in warps (32 threads).
// For example, if you have a block size of 50 threads, the GPU will still
// issue commands to 64 threads and you'd just be wasting them."
// https://stackoverflow.com/questions/4391162/cuda-determining-threads-per-block-blocks-per-grid
int popSize32 = ceil(popSize / 32.0) * 32;
// Generate the population
hipError_t ret;
generatePopulation<<<1, popSize32>>>(d_pop, d_min, d_max, costs, inst, (hiprandState_t *)randStates, popSize, clock(), costCalls);
gpuErrorCheck(hipPeekAtLastError());
ret = hipDeviceSynchronize();
gpuErrorCheck(ret);
int *vecRPI;
ret = hipMallocManaged(&vecRPI, sizeof(int) * popSize * inst->n);
gpuErrorCheck(ret);
int *sum;
ret = hipMallocManaged(&sum, sizeof(int) * popSize);
gpuErrorCheck(ret);
for (int i = 1; i <= maxGenerations && *costCalls <= maxCostCalls; i++) {
// start kernel for this generation
evolutionKernelP<<<1, popSize32>>>(d_pop, d_trial, costs, d_nextPop,
(hiprandState_t *)randStates, popSize, CR, F, inst, vecRPI, sum);
gpuErrorCheck(hipPeekAtLastError());
ret = hipDeviceSynchronize();
gpuErrorCheck(ret);
costFunctionP<<<inst->n*inst->n, popSize32>>>(vecRPI, inst, costCalls, sum, popSize);
gpuErrorCheck(hipPeekAtLastError());
ret = hipDeviceSynchronize();
gpuErrorCheck(ret);
selectionP<<<1, popSize32>>>(d_pop, d_trial, costs, d_nextPop, inst->n, popSize, sum);
gpuErrorCheck(hipPeekAtLastError());
ret = hipDeviceSynchronize();
gpuErrorCheck(ret);
// Update the population for the next generation
float *tmp = d_pop;
d_pop = d_nextPop;
d_nextPop = tmp;
}
int best_index = std::distance(costs, std::min_element(costs, costs+popSize));
return d_pop+(best_index*inst->n);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#define NUM_BUCKETS 128
__global__ void compute_histogram(const size_t num_elements, const float range,
const float *data, unsigned *histogram) {
int t = threadIdx.x;
int nt = blockDim.x;
__shared__ unsigned local_histogram[NUM_BUCKETS];
for (int i = t; i < NUM_BUCKETS; i += nt) local_histogram[i] = 0;
__syncthreads();
for (int idx = (blockIdx.x * blockDim.x) + threadIdx.x; idx < num_elements;
idx += gridDim.x * blockDim.x) {
size_t bucket = floor(data[idx] / range * (NUM_BUCKETS - 1));
atomicAdd(&local_histogram[bucket], 1);
}
__syncthreads();
for (int i = t; i < NUM_BUCKETS; i += nt)
atomicAdd(&histogram[i], local_histogram[i]);
}
int main() {
size_t num_elements = 1 << 20;
size_t data_size = num_elements * sizeof(float);
size_t histogram_size = NUM_BUCKETS * sizeof(unsigned);
float *data = (float *)malloc(data_size);
unsigned *histogram = (unsigned *)malloc(histogram_size);
float *d_data;
unsigned *d_histogram;
cudaMalloc(&d_data, data_size);
cudaMalloc(&d_histogram, histogram_size);
float range = (float)RAND_MAX;
for (size_t idx = 0; idx < num_elements; idx++) {
data[idx] = rand();
}
for (size_t idx = 0; idx < NUM_BUCKETS; idx++) {
histogram[idx] = 0;
}
cudaMemcpyAsync(d_data, data, data_size, cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_histogram, histogram, histogram_size, cudaMemcpyHostToDevice);
size_t elts_per_thread = 16;
size_t block_size = 256;
size_t blocks = (num_elements + elts_per_thread * block_size - 1) /
(elts_per_thread * block_size);
compute_histogram<<<blocks, block_size>>>(num_elements, range, d_data,
d_histogram);
cudaMemcpyAsync(histogram, d_histogram, histogram_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
size_t total = 0;
for (size_t idx = 0; idx < NUM_BUCKETS; idx++) {
total += histogram[idx];
printf("histogram[%lu] = %u\n", idx, histogram[idx]);
}
printf("\ntotal = %lu (%s)\n", total,
total == num_elements ? "PASS" : "FAIL");
cudaFree(d_data);
cudaFree(d_histogram);
free(data);
free(histogram);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#define NUM_BUCKETS 128
__global__ void compute_histogram(const size_t num_elements, const float range,
const float *data, unsigned *histogram) {
int t = threadIdx.x;
int nt = blockDim.x;
__shared__ unsigned local_histogram[NUM_BUCKETS];
for (int i = t; i < NUM_BUCKETS; i += nt) local_histogram[i] = 0;
__syncthreads();
for (int idx = (blockIdx.x * blockDim.x) + threadIdx.x; idx < num_elements;
idx += gridDim.x * blockDim.x) {
size_t bucket = floor(data[idx] / range * (NUM_BUCKETS - 1));
atomicAdd(&local_histogram[bucket], 1);
}
__syncthreads();
for (int i = t; i < NUM_BUCKETS; i += nt)
atomicAdd(&histogram[i], local_histogram[i]);
}
int main() {
size_t num_elements = 1 << 20;
size_t data_size = num_elements * sizeof(float);
size_t histogram_size = NUM_BUCKETS * sizeof(unsigned);
float *data = (float *)malloc(data_size);
unsigned *histogram = (unsigned *)malloc(histogram_size);
float *d_data;
unsigned *d_histogram;
hipMalloc(&d_data, data_size);
hipMalloc(&d_histogram, histogram_size);
float range = (float)RAND_MAX;
for (size_t idx = 0; idx < num_elements; idx++) {
data[idx] = rand();
}
for (size_t idx = 0; idx < NUM_BUCKETS; idx++) {
histogram[idx] = 0;
}
hipMemcpyAsync(d_data, data, data_size, hipMemcpyHostToDevice);
hipMemcpyAsync(d_histogram, histogram, histogram_size, hipMemcpyHostToDevice);
size_t elts_per_thread = 16;
size_t block_size = 256;
size_t blocks = (num_elements + elts_per_thread * block_size - 1) /
(elts_per_thread * block_size);
compute_histogram<<<blocks, block_size>>>(num_elements, range, d_data,
d_histogram);
hipMemcpyAsync(histogram, d_histogram, histogram_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
size_t total = 0;
for (size_t idx = 0; idx < NUM_BUCKETS; idx++) {
total += histogram[idx];
printf("histogram[%lu] = %u\n", idx, histogram[idx]);
}
printf("\ntotal = %lu (%s)\n", total,
total == num_elements ? "PASS" : "FAIL");
hipFree(d_data);
hipFree(d_histogram);
free(data);
free(histogram);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <time.h>
#define NUM_INTERVALS 1000000
#define THREADS_PER_BLOCK 1000
#define BLOCKS 10
double baseIntervalo = 1.0/NUM_INTERVALS;
__global__ void piFunc(double * acum){
int t = threadIdx.x;
int threads = blockDim.x;
acum[t] = 0;
int intervals_per_thread = NUM_INTERVALS/(BLOCKS*THREADS_PER_BLOCK);
double baseIntervalo = 1.0/NUM_INTERVALS;
for(int i = (threads*blockIdx.x + t)*intervals_per_thread; i<(threads*blockIdx.x + t)*intervals_per_thread + intervals_per_thread; i++){
double x = (i+0.5)*baseIntervalo;
double fdx = 4 / (1 + x * x);
acum[threads*blockIdx.x + t] += fdx;
}
}
int main(){
clock_t start, end;
double h_pi[BLOCKS*THREADS_PER_BLOCK];
double * d_pi;
double pi = 0;
cudaMalloc(&d_pi, BLOCKS*THREADS_PER_BLOCK*sizeof(double));
start = clock();
piFunc<<<BLOCKS, THREADS_PER_BLOCK>>>(d_pi);
cudaMemcpy(h_pi, d_pi, BLOCKS*THREADS_PER_BLOCK*sizeof(double), cudaMemcpyDeviceToHost);
for(int i=0; i<BLOCKS*THREADS_PER_BLOCK; i++)
//printf("%f", h_pi[i]);
pi+=h_pi[i];
pi *= baseIntervalo;
end = clock();
printf("Result = %20.18lf (%ld)\n", pi, end - start);
cudaFree(d_pi);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#define NUM_INTERVALS 1000000
#define THREADS_PER_BLOCK 1000
#define BLOCKS 10
double baseIntervalo = 1.0/NUM_INTERVALS;
__global__ void piFunc(double * acum){
int t = threadIdx.x;
int threads = blockDim.x;
acum[t] = 0;
int intervals_per_thread = NUM_INTERVALS/(BLOCKS*THREADS_PER_BLOCK);
double baseIntervalo = 1.0/NUM_INTERVALS;
for(int i = (threads*blockIdx.x + t)*intervals_per_thread; i<(threads*blockIdx.x + t)*intervals_per_thread + intervals_per_thread; i++){
double x = (i+0.5)*baseIntervalo;
double fdx = 4 / (1 + x * x);
acum[threads*blockIdx.x + t] += fdx;
}
}
int main(){
clock_t start, end;
double h_pi[BLOCKS*THREADS_PER_BLOCK];
double * d_pi;
double pi = 0;
hipMalloc(&d_pi, BLOCKS*THREADS_PER_BLOCK*sizeof(double));
start = clock();
piFunc<<<BLOCKS, THREADS_PER_BLOCK>>>(d_pi);
hipMemcpy(h_pi, d_pi, BLOCKS*THREADS_PER_BLOCK*sizeof(double), hipMemcpyDeviceToHost);
for(int i=0; i<BLOCKS*THREADS_PER_BLOCK; i++)
//printf("%f", h_pi[i]);
pi+=h_pi[i];
pi *= baseIntervalo;
end = clock();
printf("Result = %20.18lf (%ld)\n", pi, end - start);
hipFree(d_pi);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void simple_histo(int *d_bins, const int *d_in, const int BIN_COUNT, int ARRAY_SIZE)
{
unsigned int myId = threadIdx.x + blockDim.x + blockIdx.x;
// checking for out-of-bounds
if (myId>=ARRAY_SIZE)
{
return;
}
unsigned int myItem = d_in[myId];
unsigned int myBin = min(static_cast<unsigned int>(BIN_COUNT - 1),
static_cast<unsigned int>(myItem % BIN_COUNT));
atomicAdd(&(d_bins[myBin]), 1);
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 65536;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
const int BIN_COUNT = 16;
const int BIN_BYTES = BIN_COUNT * sizeof(int);
// generate the input array on host
int h_in[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = 3;
}
int h_bins[BIN_COUNT];
for(int i = 0; i < BIN_COUNT; i++)
{
h_bins[i] = 0;
}
// declare GPU memory pointers
int * d_in;
int * d_bins;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_bins, BIN_BYTES);
// transfer the arrays to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_bins, h_bins, BIN_BYTES, cudaMemcpyHostToDevice);
// pop'in that kernel
simple_histo<<<ARRAY_SIZE/64, 64>>>(d_bins, d_in, BIN_COUNT, ARRAY_SIZE);
// copy back to HOST
cudaMemcpy(h_bins, d_bins, BIN_BYTES, cudaMemcpyDeviceToHost);
for(int i = 0; i<BIN_COUNT; i++)
{
printf("bin %d: count %d\n", i, h_bins[i]);
}
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_bins);
return 0;
}
|
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void simple_histo(int *d_bins, const int *d_in, const int BIN_COUNT, int ARRAY_SIZE)
{
unsigned int myId = threadIdx.x + blockDim.x + blockIdx.x;
// checking for out-of-bounds
if (myId>=ARRAY_SIZE)
{
return;
}
unsigned int myItem = d_in[myId];
unsigned int myBin = min(static_cast<unsigned int>(BIN_COUNT - 1),
static_cast<unsigned int>(myItem % BIN_COUNT));
atomicAdd(&(d_bins[myBin]), 1);
}
int main(int argc, char **argv)
{
const int ARRAY_SIZE = 65536;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
const int BIN_COUNT = 16;
const int BIN_BYTES = BIN_COUNT * sizeof(int);
// generate the input array on host
int h_in[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = 3;
}
int h_bins[BIN_COUNT];
for(int i = 0; i < BIN_COUNT; i++)
{
h_bins[i] = 0;
}
// declare GPU memory pointers
int * d_in;
int * d_bins;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_bins, BIN_BYTES);
// transfer the arrays to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_bins, h_bins, BIN_BYTES, hipMemcpyHostToDevice);
// pop'in that kernel
simple_histo<<<ARRAY_SIZE/64, 64>>>(d_bins, d_in, BIN_COUNT, ARRAY_SIZE);
// copy back to HOST
hipMemcpy(h_bins, d_bins, BIN_BYTES, hipMemcpyDeviceToHost);
for(int i = 0; i<BIN_COUNT; i++)
{
printf("bin %d: count %d\n", i, h_bins[i]);
}
// free GPU memory allocation
hipFree(d_in);
hipFree(d_bins);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void skip_128b(float *A, float *C, const int N)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x)+32*(threadIdx.x%32);
if (i < N) C[i] = A[i];
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void skip_128b(float *A, float *C, const int N)
{
int i = (blockIdx.x * blockDim.x + threadIdx.x)+32*(threadIdx.x%32);
if (i < N) C[i] = A[i];
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include <cstdlib>
#include <stdlib.h>
#include <ctime>
#include <queue>
using namespace std;
short iterative_bfs(short **matrix, unsigned long long N, short target, bool **visited_matrix);
short recursive_bfs(short **matrix, unsigned long long N, short target, bool **visited_matrix);
short recursive_bfs_helper(short **matrix, unsigned long long N, short target, unsigned long long x, unsigned long long y, bool **visited_matrix);
void kernel_bfs_wrapper(short *matrix, int *found_idx, int *mtx, unsigned long long N, short target);
__global__ void bfs_kernel(short *matrix, int *found_idx, int *mtx, unsigned long long N, short target);
int main()
{
// default size is 100
unsigned long long N = 100;
int *h_found_idx, *found_idx;
short **seq_matrix,
*h_matrix,
*cuda_matrix,
*seq_result,
*cuda_result,
target;
int *mtx;
bool **visited_matrix;
// Declare timers
float cuda_elapsed_time;
cudaEvent_t cuda_start, cuda_stop;
double seq_start, seq_stop, seq_iter_elapsed_time, seq_recur_elapsed_time;
cout << "Enter N (NxN matrix): ";
cin >> N;
cout << "Enter target integer [0 to 100]: ";
cin >> target;
// allocate memory for seq
seq_matrix = (short**) malloc(N * sizeof(short*));
visited_matrix = (bool**) malloc(N * sizeof(bool*));
seq_result = (short*) malloc(sizeof(short));
h_found_idx = (int*) malloc(sizeof(int));
h_matrix = (short*) malloc(N * N * sizeof(short));
srand(time(0));
// set matrix to random shortegers from 0 to 100
for (unsigned long long i = 0; i < N; i++) {
seq_matrix[i] = (short*) malloc(N * sizeof(short));
visited_matrix[i] = (bool*) malloc(N * sizeof(bool));
for (unsigned long long j = 0; j < N; j++) {
seq_matrix[i][j] = rand() % 101;
visited_matrix[i][j] = false;
h_matrix[i*N + j] = seq_matrix[i][j];
}
}
//cout << "random numbers generated" << endl;
// allocate memory for cuda
cudaMalloc((void **) &cuda_matrix, N * N * sizeof(short));
//cudaMallocPitch((void**)&cuda_matrix, &pitch, N * sizeof(short), N);
//cudaMallocPitch((void**)&cuda_visited_matrix, &pitch_visited, N * sizeof(short), N);
//for (unsigned long long i = 0; i < N; i++) {
// cudaMalloc(&cuda_matrix[i], N * sizeof(short));
//}
cudaMalloc((void**)&found_idx, sizeof(int));
//cudaMalloc((void**)&cuda_result, sizeof(short));
cudaMalloc((void**)&mtx, sizeof(int));
//cout << "cuda malloc good" << endl;
// set values of cuda target to -1 (not found)
// set values of mtx target to 0
cudaMemset(found_idx, 0, sizeof(int));
//cudaMemset(cuda_result, -1, sizeof(short));
cudaMemset(mtx, 0, sizeof(short));
// set up timing variables
cudaEventCreate(&cuda_start);
cudaEventCreate(&cuda_stop);
//for (unsigned long long i = 0; i < N; i++) {
// cudaMemcpy(cuda_matrix[i], seq_matrix[i], N * sizeof(short), cudaMemcpyHostToDevice);
//cudaMemcpy(cuda_visited_matrix[i], visited_matrix[i], N * sizeof(bool), cudaMemcpyHostToDevice);
//}
cudaMemcpy(cuda_matrix, h_matrix, N * N * sizeof(short), cudaMemcpyHostToDevice);
//cudaMemcpy2DToArray(cuda_matrix, pitch, seq_matrix, N*sizeof(short), N*sizeof(short), N, cudaMemcpyHostToDevice);
//cudaMemcpy2DToArray(cuda_visited_matrix, pitch_visited, visited_matrix, N*sizeof(bool), N*sizeof(bool), N, cudaMemcpyHostToDevice);
//cudaMemcpy(cuda_matrix, seq_matrix, N * sizeof(short*), cudaMemcpyHostToDevice);
//cudaMemcpy(cuda_visited_matrix, visited_matrix, N * sizeof(bool*), cudaMemcpyHostToDevice);
// copy from host to device
cudaEventRecord(cuda_start, 0);
// START CUDA
kernel_bfs_wrapper(cuda_matrix, found_idx, mtx, N, target);
// copy from device to host
cudaEventRecord(cuda_stop, 0);
cudaEventSynchronize(cuda_stop);
cudaEventElapsedTime(&cuda_elapsed_time, cuda_start, cuda_stop);
//cudaMemcpy2D(seq_result, sizeof(short)*N, cuda_result, pitch, sizeof(short)*N, N, cudaMemcpyDeviceToHost);
//cudaMemcpy(seq_result, cuda_result, sizeof(short), cudaMemcpyDeviceToHost);
cudaMemcpy(h_found_idx, found_idx, sizeof(int), cudaMemcpyDeviceToHost);
// destroy timers
cudaEventDestroy(cuda_start);
cudaEventDestroy(cuda_stop);
cout << "----------------------------------------------------------" << endl;
cout << "Found: " << h_matrix[*h_found_idx] << endl;
cout << "[CUDA] Elapsed time: " << cuda_elapsed_time << " clock cycles" << endl;
cout << "----------------------------------------------------------" << endl;
cout << endl;
cout << "Starting sequential iterative approach." << endl;
// reset visited_matrix back to false
for (unsigned long long i = 0; i < N; i++) {
for (unsigned long long j = 0; j < N; j++) {
visited_matrix[i][j] = false;
}
}
seq_start = (double) clock();
// call iterative bfs
*seq_result = -1;
*seq_result = iterative_bfs(seq_matrix, N, target, visited_matrix);
seq_stop = (double) clock();
seq_iter_elapsed_time = (double) 1000.0*((double)(seq_stop - seq_start))/CLOCKS_PER_SEC;
cout << "----------------------------------------------------------" << endl;
cout << "Found: " << *seq_result << endl;
cout << "[SEQUENTIAL - Iterative] Elapsed time: " << seq_iter_elapsed_time << " clock cycles" << endl;
cout << "----------------------------------------------------------" << endl;
cout << "Starting sequential recursive approach." << endl;
// reset visited_matrix back to false
for (unsigned long long i = 0; i < N; i++) {
for (unsigned long long j = 0; j < N; j++) {
visited_matrix[i][j] = false;
}
}
seq_start = (double) clock();
// call recursive bfs
*seq_result = -1;
*seq_result = recursive_bfs(seq_matrix, N, target, visited_matrix);
seq_stop = (double) clock();
seq_recur_elapsed_time = (double) 1000.0*((double)(seq_stop - seq_start))/CLOCKS_PER_SEC;
cout << "----------------------------------------------------------" << endl;
cout << "Found: " << *seq_result << endl;
cout << "[SEQUENTIAL - Recursive] Elapsed time: " << seq_recur_elapsed_time << " clock cycles" << endl;
cout << "----------------------------------------------------------" << endl;
// free and cuda free
for (unsigned long long i = 0; i < N; i++) {
free(seq_matrix[i]);
free(visited_matrix[i]);
}
free(seq_matrix);
free(visited_matrix);
free(h_matrix);
free(seq_result);
free(h_found_idx);
cudaFree(cuda_matrix);
cudaFree(mtx);
cudaFree(found_idx);
//cudaFree(cuda_result);
return 0;
}
short iterative_bfs(short **matrix, unsigned long long N, short target, bool **visited_matrix) {
// check initial spot
if (matrix[0][0] == target)
return matrix[0][0];
visited_matrix[0][0] = true;
queue<unsigned long long> qx;
queue<unsigned long long> qy;
qx.push(0);
qy.push(0);
while (!qx.empty()) {
unsigned long long x = qx.front();
unsigned long long y = qy.front();
qx.pop();
qy.pop();
visited_matrix[x][y] = true;
if (matrix[x][y] == target) {
return matrix[x][y];
}
// check right then check down
if (x + 1 < N && !visited_matrix[x+1][y]) {
qx.push(x+1);
qy.push(y);
}
if (y + 1 < N && !visited_matrix[x][y+1]) {
qx.push(x);
qy.push(y+1);
}
}
return -1;
}
short recursive_bfs(short **matrix, unsigned long long N, short target, bool **visited_matrix) {
// check initial spot
if (matrix[0][0] == target)
return matrix[0][0];
visited_matrix[0][0] = true;
return recursive_bfs_helper(matrix, N, target, 0, 0, visited_matrix);
}
short recursive_bfs_helper(short **matrix, unsigned long long N, short target, unsigned long long x, unsigned long long y, bool **visited_matrix) {
// check right
if (x+1 < N && matrix[x+1][y] == target)
return matrix[x+1][y];
// check down
if (y+1 < N && matrix[x][y+1] == target)
return matrix[x][y+1];
// traverse to right first, then down
short right = -1;
short down = -1;
if (x+1 < N && !visited_matrix[x+1][y]) {
visited_matrix[x+1][y] = true;
right = recursive_bfs_helper(matrix, N, target, x+1, y, visited_matrix);
}
// if found in right, then return right
if (right != -1)
return right;
if (y+1 < N && !visited_matrix[x][y+1]) {
visited_matrix[x][y+1] = true;
down = recursive_bfs_helper(matrix, N, target, x, y+1, visited_matrix);
}
// if found in down, then return down
if (down != -1)
return down;
// otherwise, nothing has been found.
return -1;
}
void kernel_bfs_wrapper(short *matrix, int *found_idx, int *mtx, unsigned long long N, short target)
{
// 1 dimensional
dim3 blockSize = (256);
dim3 gridSize = ((N*N + (2048 - 1))/(2048));
bfs_kernel<<< gridSize, blockSize >>>(matrix, found_idx, mtx, N, target);
}
__global__ void bfs_kernel(short *matrix, int *found_idx, int *mtx, unsigned long long N, short target)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
//unsigned long long idy = threadIdx.y + blockDim.y + blockIdx.y;
if (idx >= N*N) {
return;
}
//printf("%d\n", matrix[idx]);
//short *row = (short*) ((char*) matrix + ;
// found!!
if (matrix[idx] == target) {
//printf("%d\n", matrix[idx]);
//while(atomicCAS(mtx, 0, 1) != 0);
//printf("Set\n");
//*result = matrix[idx];
atomicMax(found_idx, idx);
//printf("found id: %d\n", *found_idx);
//printf("id: %d\n", idx);
//printf("Result: %d\n", *result);
//atomicExch(mtx, 0);
//printf("Keep going\n");
}
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include <cstdlib>
#include <stdlib.h>
#include <ctime>
#include <queue>
using namespace std;
short iterative_bfs(short **matrix, unsigned long long N, short target, bool **visited_matrix);
short recursive_bfs(short **matrix, unsigned long long N, short target, bool **visited_matrix);
short recursive_bfs_helper(short **matrix, unsigned long long N, short target, unsigned long long x, unsigned long long y, bool **visited_matrix);
void kernel_bfs_wrapper(short *matrix, int *found_idx, int *mtx, unsigned long long N, short target);
__global__ void bfs_kernel(short *matrix, int *found_idx, int *mtx, unsigned long long N, short target);
int main()
{
// default size is 100
unsigned long long N = 100;
int *h_found_idx, *found_idx;
short **seq_matrix,
*h_matrix,
*cuda_matrix,
*seq_result,
*cuda_result,
target;
int *mtx;
bool **visited_matrix;
// Declare timers
float cuda_elapsed_time;
hipEvent_t cuda_start, cuda_stop;
double seq_start, seq_stop, seq_iter_elapsed_time, seq_recur_elapsed_time;
cout << "Enter N (NxN matrix): ";
cin >> N;
cout << "Enter target integer [0 to 100]: ";
cin >> target;
// allocate memory for seq
seq_matrix = (short**) malloc(N * sizeof(short*));
visited_matrix = (bool**) malloc(N * sizeof(bool*));
seq_result = (short*) malloc(sizeof(short));
h_found_idx = (int*) malloc(sizeof(int));
h_matrix = (short*) malloc(N * N * sizeof(short));
srand(time(0));
// set matrix to random shortegers from 0 to 100
for (unsigned long long i = 0; i < N; i++) {
seq_matrix[i] = (short*) malloc(N * sizeof(short));
visited_matrix[i] = (bool*) malloc(N * sizeof(bool));
for (unsigned long long j = 0; j < N; j++) {
seq_matrix[i][j] = rand() % 101;
visited_matrix[i][j] = false;
h_matrix[i*N + j] = seq_matrix[i][j];
}
}
//cout << "random numbers generated" << endl;
// allocate memory for cuda
hipMalloc((void **) &cuda_matrix, N * N * sizeof(short));
//cudaMallocPitch((void**)&cuda_matrix, &pitch, N * sizeof(short), N);
//cudaMallocPitch((void**)&cuda_visited_matrix, &pitch_visited, N * sizeof(short), N);
//for (unsigned long long i = 0; i < N; i++) {
// cudaMalloc(&cuda_matrix[i], N * sizeof(short));
//}
hipMalloc((void**)&found_idx, sizeof(int));
//cudaMalloc((void**)&cuda_result, sizeof(short));
hipMalloc((void**)&mtx, sizeof(int));
//cout << "cuda malloc good" << endl;
// set values of cuda target to -1 (not found)
// set values of mtx target to 0
hipMemset(found_idx, 0, sizeof(int));
//cudaMemset(cuda_result, -1, sizeof(short));
hipMemset(mtx, 0, sizeof(short));
// set up timing variables
hipEventCreate(&cuda_start);
hipEventCreate(&cuda_stop);
//for (unsigned long long i = 0; i < N; i++) {
// cudaMemcpy(cuda_matrix[i], seq_matrix[i], N * sizeof(short), cudaMemcpyHostToDevice);
//cudaMemcpy(cuda_visited_matrix[i], visited_matrix[i], N * sizeof(bool), cudaMemcpyHostToDevice);
//}
hipMemcpy(cuda_matrix, h_matrix, N * N * sizeof(short), hipMemcpyHostToDevice);
//cudaMemcpy2DToArray(cuda_matrix, pitch, seq_matrix, N*sizeof(short), N*sizeof(short), N, cudaMemcpyHostToDevice);
//cudaMemcpy2DToArray(cuda_visited_matrix, pitch_visited, visited_matrix, N*sizeof(bool), N*sizeof(bool), N, cudaMemcpyHostToDevice);
//cudaMemcpy(cuda_matrix, seq_matrix, N * sizeof(short*), cudaMemcpyHostToDevice);
//cudaMemcpy(cuda_visited_matrix, visited_matrix, N * sizeof(bool*), cudaMemcpyHostToDevice);
// copy from host to device
hipEventRecord(cuda_start, 0);
// START CUDA
kernel_bfs_wrapper(cuda_matrix, found_idx, mtx, N, target);
// copy from device to host
hipEventRecord(cuda_stop, 0);
hipEventSynchronize(cuda_stop);
hipEventElapsedTime(&cuda_elapsed_time, cuda_start, cuda_stop);
//cudaMemcpy2D(seq_result, sizeof(short)*N, cuda_result, pitch, sizeof(short)*N, N, cudaMemcpyDeviceToHost);
//cudaMemcpy(seq_result, cuda_result, sizeof(short), cudaMemcpyDeviceToHost);
hipMemcpy(h_found_idx, found_idx, sizeof(int), hipMemcpyDeviceToHost);
// destroy timers
hipEventDestroy(cuda_start);
hipEventDestroy(cuda_stop);
cout << "----------------------------------------------------------" << endl;
cout << "Found: " << h_matrix[*h_found_idx] << endl;
cout << "[CUDA] Elapsed time: " << cuda_elapsed_time << " clock cycles" << endl;
cout << "----------------------------------------------------------" << endl;
cout << endl;
cout << "Starting sequential iterative approach." << endl;
// reset visited_matrix back to false
for (unsigned long long i = 0; i < N; i++) {
for (unsigned long long j = 0; j < N; j++) {
visited_matrix[i][j] = false;
}
}
seq_start = (double) clock();
// call iterative bfs
*seq_result = -1;
*seq_result = iterative_bfs(seq_matrix, N, target, visited_matrix);
seq_stop = (double) clock();
seq_iter_elapsed_time = (double) 1000.0*((double)(seq_stop - seq_start))/CLOCKS_PER_SEC;
cout << "----------------------------------------------------------" << endl;
cout << "Found: " << *seq_result << endl;
cout << "[SEQUENTIAL - Iterative] Elapsed time: " << seq_iter_elapsed_time << " clock cycles" << endl;
cout << "----------------------------------------------------------" << endl;
cout << "Starting sequential recursive approach." << endl;
// reset visited_matrix back to false
for (unsigned long long i = 0; i < N; i++) {
for (unsigned long long j = 0; j < N; j++) {
visited_matrix[i][j] = false;
}
}
seq_start = (double) clock();
// call recursive bfs
*seq_result = -1;
*seq_result = recursive_bfs(seq_matrix, N, target, visited_matrix);
seq_stop = (double) clock();
seq_recur_elapsed_time = (double) 1000.0*((double)(seq_stop - seq_start))/CLOCKS_PER_SEC;
cout << "----------------------------------------------------------" << endl;
cout << "Found: " << *seq_result << endl;
cout << "[SEQUENTIAL - Recursive] Elapsed time: " << seq_recur_elapsed_time << " clock cycles" << endl;
cout << "----------------------------------------------------------" << endl;
// free and cuda free
for (unsigned long long i = 0; i < N; i++) {
free(seq_matrix[i]);
free(visited_matrix[i]);
}
free(seq_matrix);
free(visited_matrix);
free(h_matrix);
free(seq_result);
free(h_found_idx);
hipFree(cuda_matrix);
hipFree(mtx);
hipFree(found_idx);
//cudaFree(cuda_result);
return 0;
}
short iterative_bfs(short **matrix, unsigned long long N, short target, bool **visited_matrix) {
// check initial spot
if (matrix[0][0] == target)
return matrix[0][0];
visited_matrix[0][0] = true;
queue<unsigned long long> qx;
queue<unsigned long long> qy;
qx.push(0);
qy.push(0);
while (!qx.empty()) {
unsigned long long x = qx.front();
unsigned long long y = qy.front();
qx.pop();
qy.pop();
visited_matrix[x][y] = true;
if (matrix[x][y] == target) {
return matrix[x][y];
}
// check right then check down
if (x + 1 < N && !visited_matrix[x+1][y]) {
qx.push(x+1);
qy.push(y);
}
if (y + 1 < N && !visited_matrix[x][y+1]) {
qx.push(x);
qy.push(y+1);
}
}
return -1;
}
short recursive_bfs(short **matrix, unsigned long long N, short target, bool **visited_matrix) {
// check initial spot
if (matrix[0][0] == target)
return matrix[0][0];
visited_matrix[0][0] = true;
return recursive_bfs_helper(matrix, N, target, 0, 0, visited_matrix);
}
short recursive_bfs_helper(short **matrix, unsigned long long N, short target, unsigned long long x, unsigned long long y, bool **visited_matrix) {
// check right
if (x+1 < N && matrix[x+1][y] == target)
return matrix[x+1][y];
// check down
if (y+1 < N && matrix[x][y+1] == target)
return matrix[x][y+1];
// traverse to right first, then down
short right = -1;
short down = -1;
if (x+1 < N && !visited_matrix[x+1][y]) {
visited_matrix[x+1][y] = true;
right = recursive_bfs_helper(matrix, N, target, x+1, y, visited_matrix);
}
// if found in right, then return right
if (right != -1)
return right;
if (y+1 < N && !visited_matrix[x][y+1]) {
visited_matrix[x][y+1] = true;
down = recursive_bfs_helper(matrix, N, target, x, y+1, visited_matrix);
}
// if found in down, then return down
if (down != -1)
return down;
// otherwise, nothing has been found.
return -1;
}
void kernel_bfs_wrapper(short *matrix, int *found_idx, int *mtx, unsigned long long N, short target)
{
// 1 dimensional
dim3 blockSize = (256);
dim3 gridSize = ((N*N + (2048 - 1))/(2048));
bfs_kernel<<< gridSize, blockSize >>>(matrix, found_idx, mtx, N, target);
}
__global__ void bfs_kernel(short *matrix, int *found_idx, int *mtx, unsigned long long N, short target)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
//unsigned long long idy = threadIdx.y + blockDim.y + blockIdx.y;
if (idx >= N*N) {
return;
}
//printf("%d\n", matrix[idx]);
//short *row = (short*) ((char*) matrix + ;
// found!!
if (matrix[idx] == target) {
//printf("%d\n", matrix[idx]);
//while(atomicCAS(mtx, 0, 1) != 0);
//printf("Set\n");
//*result = matrix[idx];
atomicMax(found_idx, idx);
//printf("found id: %d\n", *found_idx);
//printf("id: %d\n", idx);
//printf("Result: %d\n", *result);
//atomicExch(mtx, 0);
//printf("Keep going\n");
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
//SEE bao_flow_c2f_classic_kernel.cu
|
#include <hip/hip_runtime.h>
//SEE bao_flow_c2f_classic_kernel.cu
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
#define MAXN 8000 /* Max value of N */
int N; /* Matrix Dimension*/
int numThreads; /* Number of Threads */
/*Random*/
#define randm() 4|2[uid]&3
/*CUDA Function for calculating mean column-wise and then reducing each column's totals*/
/*This Function will be called Number of blocks times*/
__global__ void Mean_SD_Norm(float* input,float* output ,float* mean_out,float* sd_out, int dim1, int numThread,int eval_ceil)
{
extern __shared__ float mean[];//shared 1D-matrix for storing temporary results for mean of each threads
extern __shared__ float sd[];//shared 1D-matrix for storing temporary results for sd of each threads
__shared__ float meansum;//shared 1D-matrix for storing mean total of each threads
__shared__ float sdsum;//shared 1D-matrix for storing SD total of each threads
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;//Getting Thread X Index for Particular Block
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;//Getting Thread Y Index for Particular Block
int eva_block,index;
unsigned int thread_id = threadIdx.y;//Getting Id of thread
unsigned int j = idx_y * dim1 + idx_x;//calculating index for input matrix
__syncthreads();//waiting for all threads
mean[thread_id]=input[j];//Assigned each column element of matrix to each thread
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);//calculating index of remaining element
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
mean[thread_id]+=input[index];
}
}
/*Reducing sum of each thread to final block sum*/
if(thread_id==0)
{
for(int i=0;i<numThread;i++)
{
meansum+=mean[thread_id+i];
}
mean_out[blockIdx.x]=meansum/dim1;//Mean of block
}
__syncthreads();
sd[thread_id] = powf(input[j] - mean_out[blockIdx.x], 2.0);//evaluating SD for each thread for particular block
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
sd[thread_id]+=powf(input[index] - mean_out[blockIdx.x], 2.0);
}
}
/*Reducing SD Sum of each thread to final block SD sum*/
if(thread_id==0)
{
sdsum=0;
for(int i=0;i<numThread;i++)
{
sdsum+=sd[thread_id+i];//calculating index of remaining element
}
sd_out[blockIdx.x]=sdsum/dim1;//SD of block
}
__syncthreads();//waiting for threads
/*Normalization of each block data on basis of mean and sd of each block*/
output[blockIdx.x*dim1+thread_id] = (input[thread_id+blockIdx.x*dim1] - mean_out[blockIdx.x]) / sd_out[blockIdx.x];
/*Reducing Normalized Sum for remaining elements*/
for(int i=0;i<eval_ceil;i++){
if((numThread+thread_id)+blockIdx.x*dim1 < dim1*dim1)
{
output[(numThread+thread_id)+blockIdx.x*dim1] = (input[(numThread+thread_id)+blockIdx.x*dim1] - mean_out[blockIdx.x])/sd_out[blockIdx.x];//Normalizing the Matrix Indexes
}
}
}
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 4) {
seed = atoi(argv[3]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 3) {
N = atoi(argv[1]);
numThreads = atoi(argv[2]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
/*Number of Threads should be less than or equal to 1024 else exit*/
if (numThreads > 1024)
{
printf("Number of threads cannot be more than %i.\n", 1024);
exit(0);
}
}
else
{
printf("Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n",argv[0]);
exit(0);
}
printf("\nMatrix dimension N = %i.\n", N);
}
int main(int argc, char **argv)
{
/* Timing variables */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
float* Host_Input = new float [N * N];//Input Matrix
float* Host_Output = new float [N * N];//Output Matrix
int i,j;
/*Initializing Input Matrix with random values*/
printf("\nInitializing...\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
//Host_Input[j* N + i] = j+1;
Host_Input[j* N + i] = (float)rand() / 32768.0;
}
}
float* input;//Device Input Matrix
float* output;//Device Output Matrix
float* mean_out;//Device Mean Matrix
float* sd_out;//Device SD Matrix
size_t matrix_size_2d = N * N * sizeof(float);//Size of 2D Matrix
size_t matrix_size_1d = N * sizeof(float);//Size of 1D Matrix
//allocated the device memory for source array
cudaMalloc(&input, matrix_size_2d);
cudaMemcpy(input, Host_Input, matrix_size_2d, cudaMemcpyHostToDevice);
//allocate the device memory for destination array
cudaMalloc(&output, matrix_size_2d);
//allocate the device memory for mean array
cudaMalloc(&mean_out, matrix_size_1d);
//allocate the device memory for sd array
cudaMalloc(&sd_out, matrix_size_1d);
dim3 dimBlock;
dim3 dimGrid;
/* Designing Decisions for number of blocks and number of threads in each block */
if( N < numThreads)
{
dimBlock.x = 1;
dimBlock.y = N;
dimGrid.x = N;
dimGrid.y = 1;
}
else
{
dimBlock.x = 1;
dimBlock.y = numThreads;
dimGrid.x = N;
dimGrid.y = 1;
}
/* Start Clock */
printf("\nStarting clock.\n");
cudaEventRecord(start);
gettimeofday(&etstart,&tzdummy);
etstart2 = times(&cputstart);
double d_ceil=(double)N/(double)numThreads;
int c=ceil(d_ceil);
//printf("nt=%d\t c1=%ld\tc=%d\n",nt,c1,c);
//Calling CUDA Kernel Function For Normalizing Matrix
Mean_SD_Norm<<<dimGrid, dimBlock, matrix_size_1d>>>(input,output,mean_out,sd_out,N,numThreads,c);
cudaDeviceSynchronize();
/* Stop Clock code below*/
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
/*Copying Output Device Matrix to Output Host Matrix*/
cudaMemcpy(Host_Output, output, N * N * sizeof(float), cudaMemcpyDeviceToHost);
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
/* Display output */
/* if (N < 10) {
printf("\nB1 =\n\t");
for (i= 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%1.10f%s", Host_Output[i* N + j], (j < N-1) ? ", " : ";\n\t");
}
}
}*/
/* Display result time */
printf("\nElapsed time CPU Time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000);
printf("Elapsed GPU Time = %g ms \n",milliseconds);
printf("Effective Bandwidth in (GB/s): %f \n", (2*matrix_size_2d/milliseconds)/1e6);
float mean = N * log2((float)N) + N;
float sd = N * log2((float)N) + (2*N) + (2*N*N);
float norm = 2 * N * N;
printf("Effective Throughput in (GFLOPS/s): %f \n", ((mean+sd+norm)*1e-9)/(milliseconds*1e-3));
//deallocate device memory below
cudaFree(input);
cudaFree(output);
cudaFree(mean_out);
cudaFree(sd_out);
//deallocate Host Input and Host Output Matrix
free(Host_Input);
free(Host_Output);
exit(0);
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <sys/types.h>
#include <sys/times.h>
#include <sys/time.h>
#include <time.h>
#define MAXN 8000 /* Max value of N */
int N; /* Matrix Dimension*/
int numThreads; /* Number of Threads */
/*Random*/
#define randm() 4|2[uid]&3
/*CUDA Function for calculating mean column-wise and then reducing each column's totals*/
/*This Function will be called Number of blocks times*/
__global__ void Mean_SD_Norm(float* input,float* output ,float* mean_out,float* sd_out, int dim1, int numThread,int eval_ceil)
{
extern __shared__ float mean[];//shared 1D-matrix for storing temporary results for mean of each threads
extern __shared__ float sd[];//shared 1D-matrix for storing temporary results for sd of each threads
__shared__ float meansum;//shared 1D-matrix for storing mean total of each threads
__shared__ float sdsum;//shared 1D-matrix for storing SD total of each threads
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;//Getting Thread X Index for Particular Block
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;//Getting Thread Y Index for Particular Block
int eva_block,index;
unsigned int thread_id = threadIdx.y;//Getting Id of thread
unsigned int j = idx_y * dim1 + idx_x;//calculating index for input matrix
__syncthreads();//waiting for all threads
mean[thread_id]=input[j];//Assigned each column element of matrix to each thread
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);//calculating index of remaining element
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
mean[thread_id]+=input[index];
}
}
/*Reducing sum of each thread to final block sum*/
if(thread_id==0)
{
for(int i=0;i<numThread;i++)
{
meansum+=mean[thread_id+i];
}
mean_out[blockIdx.x]=meansum/dim1;//Mean of block
}
__syncthreads();
sd[thread_id] = powf(input[j] - mean_out[blockIdx.x], 2.0);//evaluating SD for each thread for particular block
/*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/
for(int i=0;i<dim1;i+=numThread)
{
index=dim1*(numThread+thread_id+i);
eva_block=index+blockIdx.x;
if(eva_block < dim1*dim1)
{
sd[thread_id]+=powf(input[index] - mean_out[blockIdx.x], 2.0);
}
}
/*Reducing SD Sum of each thread to final block SD sum*/
if(thread_id==0)
{
sdsum=0;
for(int i=0;i<numThread;i++)
{
sdsum+=sd[thread_id+i];//calculating index of remaining element
}
sd_out[blockIdx.x]=sdsum/dim1;//SD of block
}
__syncthreads();//waiting for threads
/*Normalization of each block data on basis of mean and sd of each block*/
output[blockIdx.x*dim1+thread_id] = (input[thread_id+blockIdx.x*dim1] - mean_out[blockIdx.x]) / sd_out[blockIdx.x];
/*Reducing Normalized Sum for remaining elements*/
for(int i=0;i<eval_ceil;i++){
if((numThread+thread_id)+blockIdx.x*dim1 < dim1*dim1)
{
output[(numThread+thread_id)+blockIdx.x*dim1] = (input[(numThread+thread_id)+blockIdx.x*dim1] - mean_out[blockIdx.x])/sd_out[blockIdx.x];//Normalizing the Matrix Indexes
}
}
}
/* returns a seed for srand based on the time */
unsigned int time_seed() {
struct timeval t;
struct timezone tzdummy;
gettimeofday(&t, &tzdummy);
return (unsigned int)(t.tv_usec);
}
/* Set the program parameters from the command-line arguments */
void parameters(int argc, char **argv) {
int seed = 0; /* Random seed */
char uid[32]; /*User name */
/* Read command-line arguments */
srand(time_seed()); /* Randomize */
if (argc == 4) {
seed = atoi(argv[3]);
srand(seed);
printf("Random seed = %i\n", seed);
}
if (argc >= 3) {
N = atoi(argv[1]);
numThreads = atoi(argv[2]);
if (N < 1 || N > MAXN) {
printf("N = %i is out of range.\n", N);
exit(0);
}
/*Number of Threads should be less than or equal to 1024 else exit*/
if (numThreads > 1024)
{
printf("Number of threads cannot be more than %i.\n", 1024);
exit(0);
}
}
else
{
printf("Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n",argv[0]);
exit(0);
}
printf("\nMatrix dimension N = %i.\n", N);
}
int main(int argc, char **argv)
{
/* Timing variables */
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
clock_t etstart2, etstop2; /* Elapsed times using times() */
unsigned long long usecstart, usecstop;
struct tms cputstart, cputstop; /* CPU times for my processes */
/* Process program parameters */
parameters(argc, argv);
float* Host_Input = new float [N * N];//Input Matrix
float* Host_Output = new float [N * N];//Output Matrix
int i,j;
/*Initializing Input Matrix with random values*/
printf("\nInitializing...\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
//Host_Input[j* N + i] = j+1;
Host_Input[j* N + i] = (float)rand() / 32768.0;
}
}
float* input;//Device Input Matrix
float* output;//Device Output Matrix
float* mean_out;//Device Mean Matrix
float* sd_out;//Device SD Matrix
size_t matrix_size_2d = N * N * sizeof(float);//Size of 2D Matrix
size_t matrix_size_1d = N * sizeof(float);//Size of 1D Matrix
//allocated the device memory for source array
hipMalloc(&input, matrix_size_2d);
hipMemcpy(input, Host_Input, matrix_size_2d, hipMemcpyHostToDevice);
//allocate the device memory for destination array
hipMalloc(&output, matrix_size_2d);
//allocate the device memory for mean array
hipMalloc(&mean_out, matrix_size_1d);
//allocate the device memory for sd array
hipMalloc(&sd_out, matrix_size_1d);
dim3 dimBlock;
dim3 dimGrid;
/* Designing Decisions for number of blocks and number of threads in each block */
if( N < numThreads)
{
dimBlock.x = 1;
dimBlock.y = N;
dimGrid.x = N;
dimGrid.y = 1;
}
else
{
dimBlock.x = 1;
dimBlock.y = numThreads;
dimGrid.x = N;
dimGrid.y = 1;
}
/* Start Clock */
printf("\nStarting clock.\n");
hipEventRecord(start);
gettimeofday(&etstart,&tzdummy);
etstart2 = times(&cputstart);
double d_ceil=(double)N/(double)numThreads;
int c=ceil(d_ceil);
//printf("nt=%d\t c1=%ld\tc=%d\n",nt,c1,c);
//Calling CUDA Kernel Function For Normalizing Matrix
Mean_SD_Norm<<<dimGrid, dimBlock, matrix_size_1d>>>(input,output,mean_out,sd_out,N,numThreads,c);
hipDeviceSynchronize();
/* Stop Clock code below*/
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
gettimeofday(&etstop, &tzdummy);
etstop2 = times(&cputstop);
printf("Stopped clock.\n");
/*Copying Output Device Matrix to Output Host Matrix*/
hipMemcpy(Host_Output, output, N * N * sizeof(float), hipMemcpyDeviceToHost);
usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec;
usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec;
/* Display output */
/* if (N < 10) {
printf("\nB1 =\n\t");
for (i= 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%1.10f%s", Host_Output[i* N + j], (j < N-1) ? ", " : ";\n\t");
}
}
}*/
/* Display result time */
printf("\nElapsed time CPU Time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000);
printf("Elapsed GPU Time = %g ms \n",milliseconds);
printf("Effective Bandwidth in (GB/s): %f \n", (2*matrix_size_2d/milliseconds)/1e6);
float mean = N * log2((float)N) + N;
float sd = N * log2((float)N) + (2*N) + (2*N*N);
float norm = 2 * N * N;
printf("Effective Throughput in (GFLOPS/s): %f \n", ((mean+sd+norm)*1e-9)/(milliseconds*1e-3));
//deallocate device memory below
hipFree(input);
hipFree(output);
hipFree(mean_out);
hipFree(sd_out);
//deallocate Host Input and Host Output Matrix
free(Host_Input);
free(Host_Output);
exit(0);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_real_distribution.h>
int main()
{
int seed;
std::cin >> seed;
thrust::minstd_rand eng(seed);
thrust::uniform_real_distribution<double> d(25, 40);
for(int i = 0; i< 10; i ++)
{
std::cout << d(eng) << " ";
}
std::cout << "\n";
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_real_distribution.h>
int main()
{
int seed;
std::cin >> seed;
thrust::minstd_rand eng(seed);
thrust::uniform_real_distribution<double> d(25, 40);
for(int i = 0; i< 10; i ++)
{
std::cout << d(eng) << " ";
}
std::cout << "\n";
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/*
Name: Jonathan Dunlap
Course: Introduction to Parallel and Cloud Computing
CRN: 75092
Assignment: Refactor ParallelTeam
Data: 11/19/2013
*/
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cstdlib>
__global__ void incrementArrayOnDevice(int *a, int N, int *count)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if( id < N )
{
if( a[id] == 3 )
{
atomicAdd(count, 1);
}
}
}
extern "C"
int run_kernel(int *a_h, int length)
{
//int *a_h; // host
int *a_d; // device
int N = length;
// allocate array on host
a_h = (int*)malloc(sizeof(int) * N);
for(int i = 0; i < N; ++i)
a_h[i] = (i % 3 == 0 ? 3 : 1);
// allocate arrays on device
cudaMalloc(&a_d, sizeof(int) * N);
// copy data from host to device
cudaMemcpy(a_d, a_h, sizeof(int) * N, cudaMemcpyHostToDevice);
// do calculation on device
int blockSize = 512;
int nBlocks = N / blockSize + (N % blockSize == 0 ? 0 : 1);
printf("number of blocks: %d\n", nBlocks);
int count;
int *devCount;
cudaMalloc(&devCount, sizeof(int));
cudaMemset(devCount, 0, sizeof(int));
incrementArrayOnDevice<<<nBlocks, blockSize>>> (a_d, N, devCount);
cudaMemcpy(&count, devCount, sizeof(int), cudaMemcpyDeviceToHost); // retrieve result from device
free(a_h);
cudaFree(a_d);
cudaFree(devCount);
return count;
}
|
/*
Name: Jonathan Dunlap
Course: Introduction to Parallel and Cloud Computing
CRN: 75092
Assignment: Refactor ParallelTeam
Data: 11/19/2013
*/
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <cstdlib>
__global__ void incrementArrayOnDevice(int *a, int N, int *count)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if( id < N )
{
if( a[id] == 3 )
{
atomicAdd(count, 1);
}
}
}
extern "C"
int run_kernel(int *a_h, int length)
{
//int *a_h; // host
int *a_d; // device
int N = length;
// allocate array on host
a_h = (int*)malloc(sizeof(int) * N);
for(int i = 0; i < N; ++i)
a_h[i] = (i % 3 == 0 ? 3 : 1);
// allocate arrays on device
hipMalloc(&a_d, sizeof(int) * N);
// copy data from host to device
hipMemcpy(a_d, a_h, sizeof(int) * N, hipMemcpyHostToDevice);
// do calculation on device
int blockSize = 512;
int nBlocks = N / blockSize + (N % blockSize == 0 ? 0 : 1);
printf("number of blocks: %d\n", nBlocks);
int count;
int *devCount;
hipMalloc(&devCount, sizeof(int));
hipMemset(devCount, 0, sizeof(int));
incrementArrayOnDevice<<<nBlocks, blockSize>>> (a_d, N, devCount);
hipMemcpy(&count, devCount, sizeof(int), hipMemcpyDeviceToHost); // retrieve result from device
free(a_h);
hipFree(a_d);
hipFree(devCount);
return count;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
extern "C" __global__ void kernel_0arg() {}
extern "C" __global__ void kernel_1arg(float * arg0) {}
extern "C" __global__ void kernel_2arg(float * arg0, float * arg1) {}
extern "C" __global__ void kernel_3arg(float * arg0, float * arg1,
float * arg2) {}
extern "C" __global__ void kernel_4arg(float * arg0, float * arg1,
float * arg2, float * arg3) {}
extern "C" __global__ void kernel_5arg(float * arg0, float * arg1,
float * arg2, float * arg3,
float * arg4) {}
extern "C" __global__ void kernel_6arg(float * arg0, float * arg1,
float * arg2, float * arg3,
float * arg4, float * arg5) {}
extern "C" __global__ void kernel_7arg(float * arg0, float * arg1,
float * arg2, float * arg3,
float * arg4, float * arg5,
float * arg6) {}
extern "C" __global__ void kernel_8arg(float * arg0, float * arg1,
float * arg2, float * arg3,
float * arg4, float * arg5,
float * arg6, float * arg7) {}
extern "C" __global__ void kernel_9arg(float * arg0, float * arg1,
float * arg2, float * arg3,
float * arg4, float * arg5,
float * arg6, float * arg7,
float * arg8) {}
extern "C" __global__ void kernel_10arg(float * arg0, float * arg1,
float * arg2, float * arg3,
float * arg4, float * arg5,
float * arg6, float * arg7,
float * arg8, float * arg9) {}
|
#include <hip/hip_runtime.h>
extern "C" __global__ void kernel_0arg() {}
extern "C" __global__ void kernel_1arg(float * arg0) {}
extern "C" __global__ void kernel_2arg(float * arg0, float * arg1) {}
extern "C" __global__ void kernel_3arg(float * arg0, float * arg1,
float * arg2) {}
extern "C" __global__ void kernel_4arg(float * arg0, float * arg1,
float * arg2, float * arg3) {}
extern "C" __global__ void kernel_5arg(float * arg0, float * arg1,
float * arg2, float * arg3,
float * arg4) {}
extern "C" __global__ void kernel_6arg(float * arg0, float * arg1,
float * arg2, float * arg3,
float * arg4, float * arg5) {}
extern "C" __global__ void kernel_7arg(float * arg0, float * arg1,
float * arg2, float * arg3,
float * arg4, float * arg5,
float * arg6) {}
extern "C" __global__ void kernel_8arg(float * arg0, float * arg1,
float * arg2, float * arg3,
float * arg4, float * arg5,
float * arg6, float * arg7) {}
extern "C" __global__ void kernel_9arg(float * arg0, float * arg1,
float * arg2, float * arg3,
float * arg4, float * arg5,
float * arg6, float * arg7,
float * arg8) {}
extern "C" __global__ void kernel_10arg(float * arg0, float * arg1,
float * arg2, float * arg3,
float * arg4, float * arg5,
float * arg6, float * arg7,
float * arg8, float * arg9) {}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void cudaUZeroInit_kernel(unsigned int size, unsigned int* data)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride)
data[i] = 0U;
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cudaUZeroInit_kernel(unsigned int size, unsigned int* data)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride)
data[i] = 0U;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
__global__ void shared_mem(int x[], int n, int blks) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float temp_data;
while(thread_id < n){
temp_data = x[thread_id];
for(int i=0;i<n;i++){
temp_data+=1;
}
thread_id+=blks*blockDim.x;
}
}
__global__ void global_mem(int x[], int n, int blks) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
while(thread_id < n){
for(int i=0;i<n;i++){
x[thread_id]+=1;
}
thread_id+=blks*blockDim.x;
}
}
/*
*argumentos
*1 - n_elementos
*2 - threads por bloco
*3 - n_blocos
*/
int main(int argc, char* argv[]) {
int n, th_p_blk;
int *h_x;
int *d_x;
size_t size;
th_p_blk = 1024;
n = 1024;
if(argc > 1)
n = atoi(argv[1]);
if(argc > 2)
th_p_blk = atoi(argv[2]);
int blks = ceil((float)n/(float)th_p_blk);
if(argc > 3)
blks = atoi(argv[3]);
size = n*sizeof(int);
// Allocate memory for the vectors on host memory.
h_x = (int*) malloc(size);
for (int i = 0; i < n; i++) {
h_x[i] =0;
}
/* Allocate vectors in device memory */
cudaMalloc(&d_x, size);
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
float time_shared,time_global;
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0); // 0 is the stream number
// do Work…
/* Kernel Call */
shared_mem<<<blks,th_p_blk>>>(d_x, n,blks);
cudaThreadSynchronize();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
float elapsedTime;
cudaEventElapsedTime (&elapsedTime, start, stop);
time_shared = elapsedTime;
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0); // 0 is the stream number
// do Work…
/* Kernel Call */
global_mem<<<blks,th_p_blk>>>(d_x, n,blks);
cudaThreadSynchronize();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&elapsedTime, start, stop);
time_global = elapsedTime;
//printf ("Total GPU Time: %.5f ms \n", elapsedTime);
printf ("[%d,%.5f,%.5f],\n", n,time_shared,time_global);
cudaEventDestroy(start);
/* Free device memory */
cudaFree(d_x);
/* Free host memory */
free(h_x);
return 0;
} /* main */
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
__global__ void shared_mem(int x[], int n, int blks) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ float temp_data;
while(thread_id < n){
temp_data = x[thread_id];
for(int i=0;i<n;i++){
temp_data+=1;
}
thread_id+=blks*blockDim.x;
}
}
__global__ void global_mem(int x[], int n, int blks) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
while(thread_id < n){
for(int i=0;i<n;i++){
x[thread_id]+=1;
}
thread_id+=blks*blockDim.x;
}
}
/*
*argumentos
*1 - n_elementos
*2 - threads por bloco
*3 - n_blocos
*/
int main(int argc, char* argv[]) {
int n, th_p_blk;
int *h_x;
int *d_x;
size_t size;
th_p_blk = 1024;
n = 1024;
if(argc > 1)
n = atoi(argv[1]);
if(argc > 2)
th_p_blk = atoi(argv[2]);
int blks = ceil((float)n/(float)th_p_blk);
if(argc > 3)
blks = atoi(argv[3]);
size = n*sizeof(int);
// Allocate memory for the vectors on host memory.
h_x = (int*) malloc(size);
for (int i = 0; i < n; i++) {
h_x[i] =0;
}
/* Allocate vectors in device memory */
hipMalloc(&d_x, size);
/* Copy vectors from host memory to device memory */
hipMemcpy(d_x, h_x, size, hipMemcpyHostToDevice);
float time_shared,time_global;
hipEvent_t start, stop;
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start, 0); // 0 is the stream number
// do Work…
/* Kernel Call */
shared_mem<<<blks,th_p_blk>>>(d_x, n,blks);
hipDeviceSynchronize();
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
float elapsedTime;
hipEventElapsedTime (&elapsedTime, start, stop);
time_shared = elapsedTime;
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start, 0); // 0 is the stream number
// do Work…
/* Kernel Call */
global_mem<<<blks,th_p_blk>>>(d_x, n,blks);
hipDeviceSynchronize();
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
hipEventElapsedTime (&elapsedTime, start, stop);
time_global = elapsedTime;
//printf ("Total GPU Time: %.5f ms \n", elapsedTime);
printf ("[%d,%.5f,%.5f],\n", n,time_shared,time_global);
hipEventDestroy(start);
/* Free device memory */
hipFree(d_x);
/* Free host memory */
free(h_x);
return 0;
} /* main */
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void cu_fliplr(const float* src, float* dst, const int rows, const int cols, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int c = tid % cols;
int r = tid / cols;
dst[tid] = src[(cols - c - 1) + r * cols];
tid += stride;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cu_fliplr(const float* src, float* dst, const int rows, const int cols, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int c = tid % cols;
int r = tid / cols;
dst[tid] = src[(cols - c - 1) + r * cols];
tid += stride;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "kernel.cuh"
#define TX 32
#define TY 32
#define RAD 1
#define divUp(a,b) ((a) + (b) - 1) / (b)
__device__
unsigned char clip(int n){ return n > 255 ? 255 : (n < 0 ? 0 : n);}
__device__
int idxClip(int idx,int idxMax){
return idx > idxMax ? idxMax : (idx < 0 ? 0 : idx);
}
__device__
int flatten(int col,int row,int width,int height){
return idxClip(col,width) + idxClip(row,height)*width;
}
__global__
void resetKernel(float *d_temp,int w,int h,BC bc){
const int col = blockIdx.x*blockDim.x + threadIdx.x;
const int row = blockIdx.y*blockDim.y + threadIdx.y;
if((col >= w) || (row >= h)) return;
d_temp[row*w + col] = bc.t_a;
}
__global__
void tempKernel(uchar4* d_out,float *d_temp,int w,int h,BC bc){
extern __shared__ float s_in[];
const int col = threadIdx.x + blockDim.x*blockIdx.x;
const int row = threadIdx.y + blockDim.y*blockIdx.y;
if((col >= w) || (row >= h)) return;
const int idx = flatten(col,row,w,h);
const int s_w = blockDim.x + 2*RAD;
const int s_h = blockDim.y + 2*RAD;
const int s_col = threadIdx.x + RAD;
const int s_row = threadIdx.y + RAD;
const int s_idx = flatten(s_col,s_row,s_w,s_h);
d_out[idx].x = 0;
d_out[idx].y = 0;
d_out[idx].z = 0;
d_out[idx].w = 255;
s_in[s_idx] = d_temp[idx];
if(threadIdx.x < RAD){
s_in[flatten(s_col - RAD,s_row,s_w,s_h)] = d_temp[flatten(col - RAD,row,w,h)];
s_in[flatten(s_col + blockDim.x,s_row,s_w,s_h)] = d_temp[flatten(col + blockDim.x,row,w,h)];
}
if(threadIdx.y < RAD){
s_in[flatten(s_col,s_row - RAD,s_w,s_h)] = d_temp[flatten(col,row - RAD,w,h)];
s_in[flatten(s_col,s_row + blockDim.y,s_w,s_h)] = d_temp[flatten(col,row + blockDim.y,w,h)];
}
float dSq = (col - bc.x)*(col - bc.x) + (row - bc.y)*(row - bc.y);
if(dSq < bc.rad*bc.rad){
d_temp[idx] = bc.t_s;
return;
}
if((col == 0) || (col == w - 1) || (row == 0) || (col + row < bc.chamfer) || (col - row > w - bc.chamfer)){
d_temp[idx] = bc.t_a;
return;
}
if(row == h-1){
d_temp[idx] = bc.t_g;
return;
}
__syncthreads();
float temp = 0.25f*(s_in[flatten(s_col - 1,s_row,s_w,s_h)] +
s_in[flatten(s_col + 1,s_row,s_w,s_h)] +
s_in[flatten(s_col,s_row - 1,s_w,s_h)] +
s_in[flatten(s_col,s_row + 1,s_w,s_h)]);
d_temp[idx] = temp;
const unsigned char intensity = clip((int)temp);
d_out[idx].x = intensity;
d_out[idx].z = 255 - intensity;
}
void kernelLauncher(uchar4 *d_out,float *d_temp,int w,int h,BC bc){
const dim3 blockSize(TX,TY);
const dim3 gridSize(divUp(w,TX),divUp(h,TY));
const size_t smSz = (TX + 2*RAD)*(TY + 2*RAD)*sizeof(float);
tempKernel<<<gridSize,blockSize,smSz>>>(d_out,d_temp,w,h,bc);
}
void resetTemperature(float *d_temp,int w,int h, BC bc){
const dim3 blockSize(TX,TY);
const dim3 gridSize(divUp(w,TX),divUp(h,TY));
resetKernel<<<gridSize,blockSize>>>(d_temp,w,h,bc);
}
|
//
// Created by shuliang on 19-7-18.
//
#ifndef HARMONICCOORDINATEDEMO_KERNEL_CUH
#define HARMONICCOORDINATEDEMO_KERNEL_CUH
struct uchar4;
typedef struct{
int x,y;
float rad;
int chamfer;
float t_s,t_a,t_g;
}BC;
void kernelLauncher(uchar4 *d_out,float *d_temp,int w,int h,BC bc);
void resetTemperature(float *d_temp,int w,int h,BC bc);
#endif //HARMONICCOORDINATEDEMO_KERNEL_CUH
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
using namespace std;
void getCudaDeviceInfo() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
cout << "GPU Device Id: " << i << endl;
cout << "Device name: " << prop.name << endl;
cout << "Memory Clock Rate (KHz): " <<
prop.memoryClockRate << endl;
cout << "Memory Bus Width (bits): " <<
prop.memoryBusWidth << endl;
cout << "Peak Memory Bandwidth (GB/s): " <<
2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6 << endl;
cout << endl;
}
}
|
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
void getCudaDeviceInfo() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
cout << "GPU Device Id: " << i << endl;
cout << "Device name: " << prop.name << endl;
cout << "Memory Clock Rate (KHz): " <<
prop.memoryClockRate << endl;
cout << "Memory Bus Width (bits): " <<
prop.memoryBusWidth << endl;
cout << "Peak Memory Bandwidth (GB/s): " <<
2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6 << endl;
cout << endl;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<cmath>
#include<iostream>
#include <stdio.h>
#include<iomanip>
#include<fstream>
using namespace std;
#define precision 1E-8
#define PI 3.141592653589793238462643383
#define divide (8192)
#define saving 8192
#define lambdamax 1.5
#define dlambda (double(lambdamax)/divide)
const int times = 1000000;
__global__ void kernel(double *dev_arr)//ÿһ߳̽м
{
int offset = blockDim.x*blockIdx.x + threadIdx.x;//λƶ
double thread_lambda = offset * dlambda;
int start = offset * saving;
for (int i = 0; i < times; i++)
{
dev_arr[start + (i) % saving] = thread_lambda * sin(PI * dev_arr[start + (i - 1) % saving]);
}
}
double arr[divide*saving];//ÿһ̻߳һСΪ1024buffer
int main()
{
ofstream out("C:\\Users\\10069\\Desktop\\Sinx.txt");
double *dev_arr;
cudaMalloc((void**)&dev_arr, sizeof(double)*divide*saving);
for (int i = 0; i < divide*saving; i++)
{
arr[i] = rand()-(RAND_MAX/2);
}
cudaMemcpy(dev_arr,arr, sizeof(double)*divide*saving, cudaMemcpyHostToDevice);
int blocksize = 512;
kernel<<<divide / blocksize, blocksize >>>(dev_arr);
cudaMemcpy(arr, dev_arr, sizeof(double)*divide*saving, cudaMemcpyDeviceToHost);
double result = 0;
out << setprecision(12);
for (int i = 0; i < divide*saving; i++)
{
out << arr[i] << ' ';
}
cudaFree(dev_arr);
out.close();
system("pause");
}
|
#include "hip/hip_runtime.h"
#include<cmath>
#include<iostream>
#include <stdio.h>
#include<iomanip>
#include<fstream>
using namespace std;
#define precision 1E-8
#define PI 3.141592653589793238462643383
#define divide (8192)
#define saving 8192
#define lambdamax 1.5
#define dlambda (double(lambdamax)/divide)
const int times = 1000000;
__global__ void kernel(double *dev_arr)//ÿһ߳̽м
{
int offset = blockDim.x*blockIdx.x + threadIdx.x;//λƶ
double thread_lambda = offset * dlambda;
int start = offset * saving;
for (int i = 0; i < times; i++)
{
dev_arr[start + (i) % saving] = thread_lambda * sin(PI * dev_arr[start + (i - 1) % saving]);
}
}
double arr[divide*saving];//ÿһ̻߳һСΪ1024buffer
int main()
{
ofstream out("C:\\Users\\10069\\Desktop\\Sinx.txt");
double *dev_arr;
hipMalloc((void**)&dev_arr, sizeof(double)*divide*saving);
for (int i = 0; i < divide*saving; i++)
{
arr[i] = rand()-(RAND_MAX/2);
}
hipMemcpy(dev_arr,arr, sizeof(double)*divide*saving, hipMemcpyHostToDevice);
int blocksize = 512;
kernel<<<divide / blocksize, blocksize >>>(dev_arr);
hipMemcpy(arr, dev_arr, sizeof(double)*divide*saving, hipMemcpyDeviceToHost);
double result = 0;
out << setprecision(12);
for (int i = 0; i < divide*saving; i++)
{
out << arr[i] << ' ';
}
hipFree(dev_arr);
out.close();
system("pause");
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/*******************************************************************************
* serveral useful gpu functions will be defined in this file to calculate
* weno derivatives
******************************************************************************/
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
// convert subindex to linear index
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
__device__ inline
double weno_onesided_derivative(double v1, double v2, double v3, double v4, double v5)
{
// different choices of ENO derivatives
double phi1 = 1./3. * v1 - 7./6. * v2 + 11./6. * v3;
double phi2 = -1./6. * v2 + 5./6. * v3 + 1./3. * v4;
double phi3 = 1./3. * v3 + 5./6. * v4 - 1./6. * v5;
// smoothness parameter
double S1 = 13./12. * pow((v1 - 2*v2 + v3),2) + 1./4. * pow((v1 - 4*v2 + 3*v3),2);
double S2 = 13./12. * pow((v2 - 2*v3 + v4),2) + 1./4. * pow((v2 - v4),2);
double S3 = 13./12. * pow((v3 - 2*v4 + v5),2) + 1./4. * pow((3*v3 - 4*v4 + v5),2);
double epsilon = 1e-6;
double alpha1 = 0.1 / pow( (S1 + epsilon), 2);
double alpha2 = 0.6 / pow( (S2 + epsilon), 2);
double alpha3 = 0.3 / pow( (S3 + epsilon), 2);
// weights for each stencil
double sum = alpha1 + alpha2 + alpha3;
double omega1 = alpha1 / sum;
double omega2 = alpha2 / sum;
double omega3 = alpha3 / sum;
return (omega1*phi1 + omega2*phi2 + omega3*phi3);
}
__global__
void weno_derivative(double * WENO_back_x, double * WENO_fore_x, double * WENO_back_y, double * WENO_fore_y, double * WENO_back_z, double * WENO_fore_z, double const * lsf, int rows, int cols, int pges, double dx, double dy, double dz)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
double v1 = (lsf[left2] - lsf[left3]) / dx;
double v2 = (lsf[left1] - lsf[left2]) / dx;
double v3 = (lsf[ind] - lsf[left1]) / dx;
double v4 = (lsf[rght1] - lsf[ind]) / dx;
double v5 = (lsf[rght2] - lsf[rght1]) / dx;
double v6 = (lsf[rght3] - lsf[rght2]) / dx;
WENO_back_x[ind] = weno_onesided_derivative(v1,v2,v3,v4,v5);
WENO_fore_x[ind] = weno_onesided_derivative(v6,v5,v4,v3,v2);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
v1 = (lsf[back2] - lsf[back3]) / dy;
v2 = (lsf[back1] - lsf[back2]) / dy;
v3 = (lsf[ind] - lsf[back1]) / dy;
v4 = (lsf[frnt1] - lsf[ind]) / dy;
v5 = (lsf[frnt2] - lsf[frnt1]) / dy;
v6 = (lsf[frnt3] - lsf[frnt2]) / dy;
WENO_back_y[ind] = weno_onesided_derivative(v1,v2,v3,v4,v5);
WENO_fore_y[ind] = weno_onesided_derivative(v6,v5,v4,v3,v2);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
v1 = (lsf[down2] - lsf[down3]) / dz;
v2 = (lsf[down1] - lsf[down2]) / dz;
v3 = (lsf[ind] - lsf[down1]) / dz;
v4 = (lsf[upup1] - lsf[ind]) / dz;
v5 = (lsf[upup2] - lsf[upup1]) / dz;
v6 = (lsf[upup3] - lsf[upup2]) / dz;
WENO_back_z[ind] = weno_onesided_derivative(v1,v2,v3,v4,v5);
WENO_fore_z[ind] = weno_onesided_derivative(v6,v5,v4,v3,v2);
}
// calculate numerical Hamiltonian for surface conservation law equation with input vx,vy,vz and
// surface divergence vd of v
__global__
void surface_conservation_step(double * step, double const * vx, double const * vy, double const * vz, double const * vd, double const * lsf, double dt, int rows, int cols, int pges, double dx, double dy, double dz)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
double v1 = (lsf[left2] - lsf[left3]) / dx;
double v2 = (lsf[left1] - lsf[left2]) / dx;
double v3 = (lsf[ind] - lsf[left1]) / dx;
double v4 = (lsf[rght1] - lsf[ind]) / dx;
double v5 = (lsf[rght2] - lsf[rght1]) / dx;
double v6 = (lsf[rght3] - lsf[rght2]) / dx;
double xL= weno_onesided_derivative(v1,v2,v3,v4,v5);
double xR= weno_onesided_derivative(v6,v5,v4,v3,v2);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
v1 = (lsf[back2] - lsf[back3]) / dy;
v2 = (lsf[back1] - lsf[back2]) / dy;
v3 = (lsf[ind] - lsf[back1]) / dy;
v4 = (lsf[frnt1] - lsf[ind]) / dy;
v5 = (lsf[frnt2] - lsf[frnt1]) / dy;
v6 = (lsf[frnt3] - lsf[frnt2]) / dy;
double yB = weno_onesided_derivative(v1,v2,v3,v4,v5);
double yF = weno_onesided_derivative(v6,v5,v4,v3,v2);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
v1 = (lsf[down2] - lsf[down3]) / dz;
v2 = (lsf[down1] - lsf[down2]) / dz;
v3 = (lsf[ind] - lsf[down1]) / dz;
v4 = (lsf[upup1] - lsf[ind]) / dz;
v5 = (lsf[upup2] - lsf[upup1]) / dz;
v6 = (lsf[upup3] - lsf[upup2]) / dz;
double zD = weno_onesided_derivative(v1,v2,v3,v4,v5);
double zU = weno_onesided_derivative(v6,v5,v4,v3,v2);
step[ind] = (min2(0,vx[ind]) * xR + max2(0,vx[ind]) * xL +
min2(0,vy[ind]) * yF + max2(0,vy[ind]) * yB +
min2(0,vz[ind]) * zU + max2(0,vz[ind]) * zD +
lsf[ind] * vd[ind]) * dt ;
}
// calculate numerical Hamiltonian for surface conservation law equation with finite volume method
// assuming c field and velocity field are already extended
__global__
void spatial_finite_volume_step(double * step, double const * vx, double const * vy, double const * vz, double const * lsf, double dt, int rows, int cols, int pges, double dx, double dy, double dz)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double numericalFlux = 0;
double v_upwind, v_dnwind; // speed in the upwind and downwind direction
int dnwind, upwind;
// use linear approximation to calculate speed at the boundary
dnwind = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
upwind = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
v_dnwind = (vx[dnwind] + vx[ind]) / 2.0;
v_upwind = (vx[upwind] + vx[ind]) / 2.0;
numericalFlux += (min2(0,v_dnwind) * lsf[dnwind] - max2(0,v_dnwind) * lsf[ind]) * dy * dz;
numericalFlux += (max2(0,v_upwind) * lsf[upwind] - min2(0,v_upwind) * lsf[ind]) * dy * dz;
dnwind = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
upwind = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
v_dnwind = (vy[dnwind] + vy[ind]) / 2.0;
v_upwind = (vy[upwind] + vy[ind]) / 2.0;
numericalFlux += (min2(0,v_dnwind) * lsf[dnwind] - max2(0,v_dnwind) * lsf[ind]) * dx * dz;
numericalFlux += (max2(0,v_upwind) * lsf[upwind] - min2(0,v_upwind) * lsf[ind]) * dx * dz;
dnwind = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
upwind = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
v_dnwind = (vz[dnwind] + vz[ind]) / 2.0;
v_upwind = (vz[upwind] + vz[ind]) / 2.0;
numericalFlux += (min2(0,v_dnwind) * lsf[dnwind] - max2(0,v_dnwind) * lsf[ind]) * dx * dy;
numericalFlux += (max2(0,v_upwind) * lsf[upwind] - min2(0,v_upwind) * lsf[ind]) * dx * dy;
step[ind] = numericalFlux * dt / (dx * dy * dz);
}
|
#include <hip/hip_runtime.h>
/*******************************************************************************
* serveral useful gpu functions will be defined in this file to calculate
* weno derivatives
******************************************************************************/
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
// convert subindex to linear index
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
__device__ inline
double weno_onesided_derivative(double v1, double v2, double v3, double v4, double v5)
{
// different choices of ENO derivatives
double phi1 = 1./3. * v1 - 7./6. * v2 + 11./6. * v3;
double phi2 = -1./6. * v2 + 5./6. * v3 + 1./3. * v4;
double phi3 = 1./3. * v3 + 5./6. * v4 - 1./6. * v5;
// smoothness parameter
double S1 = 13./12. * pow((v1 - 2*v2 + v3),2) + 1./4. * pow((v1 - 4*v2 + 3*v3),2);
double S2 = 13./12. * pow((v2 - 2*v3 + v4),2) + 1./4. * pow((v2 - v4),2);
double S3 = 13./12. * pow((v3 - 2*v4 + v5),2) + 1./4. * pow((3*v3 - 4*v4 + v5),2);
double epsilon = 1e-6;
double alpha1 = 0.1 / pow( (S1 + epsilon), 2);
double alpha2 = 0.6 / pow( (S2 + epsilon), 2);
double alpha3 = 0.3 / pow( (S3 + epsilon), 2);
// weights for each stencil
double sum = alpha1 + alpha2 + alpha3;
double omega1 = alpha1 / sum;
double omega2 = alpha2 / sum;
double omega3 = alpha3 / sum;
return (omega1*phi1 + omega2*phi2 + omega3*phi3);
}
__global__
void weno_derivative(double * WENO_back_x, double * WENO_fore_x, double * WENO_back_y, double * WENO_fore_y, double * WENO_back_z, double * WENO_fore_z, double const * lsf, int rows, int cols, int pges, double dx, double dy, double dz)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
double v1 = (lsf[left2] - lsf[left3]) / dx;
double v2 = (lsf[left1] - lsf[left2]) / dx;
double v3 = (lsf[ind] - lsf[left1]) / dx;
double v4 = (lsf[rght1] - lsf[ind]) / dx;
double v5 = (lsf[rght2] - lsf[rght1]) / dx;
double v6 = (lsf[rght3] - lsf[rght2]) / dx;
WENO_back_x[ind] = weno_onesided_derivative(v1,v2,v3,v4,v5);
WENO_fore_x[ind] = weno_onesided_derivative(v6,v5,v4,v3,v2);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
v1 = (lsf[back2] - lsf[back3]) / dy;
v2 = (lsf[back1] - lsf[back2]) / dy;
v3 = (lsf[ind] - lsf[back1]) / dy;
v4 = (lsf[frnt1] - lsf[ind]) / dy;
v5 = (lsf[frnt2] - lsf[frnt1]) / dy;
v6 = (lsf[frnt3] - lsf[frnt2]) / dy;
WENO_back_y[ind] = weno_onesided_derivative(v1,v2,v3,v4,v5);
WENO_fore_y[ind] = weno_onesided_derivative(v6,v5,v4,v3,v2);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
v1 = (lsf[down2] - lsf[down3]) / dz;
v2 = (lsf[down1] - lsf[down2]) / dz;
v3 = (lsf[ind] - lsf[down1]) / dz;
v4 = (lsf[upup1] - lsf[ind]) / dz;
v5 = (lsf[upup2] - lsf[upup1]) / dz;
v6 = (lsf[upup3] - lsf[upup2]) / dz;
WENO_back_z[ind] = weno_onesided_derivative(v1,v2,v3,v4,v5);
WENO_fore_z[ind] = weno_onesided_derivative(v6,v5,v4,v3,v2);
}
// calculate numerical Hamiltonian for surface conservation law equation with input vx,vy,vz and
// surface divergence vd of v
__global__
void surface_conservation_step(double * step, double const * vx, double const * vy, double const * vz, double const * vd, double const * lsf, double dt, int rows, int cols, int pges, double dx, double dy, double dz)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int rght1 = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int rght2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int rght3 = sub2ind(row_idx, col_idx+3, pge_idx, rows, cols, pges);
int left1 = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
int left3 = sub2ind(row_idx, col_idx-3, pge_idx, rows, cols, pges);
double v1 = (lsf[left2] - lsf[left3]) / dx;
double v2 = (lsf[left1] - lsf[left2]) / dx;
double v3 = (lsf[ind] - lsf[left1]) / dx;
double v4 = (lsf[rght1] - lsf[ind]) / dx;
double v5 = (lsf[rght2] - lsf[rght1]) / dx;
double v6 = (lsf[rght3] - lsf[rght2]) / dx;
double xL= weno_onesided_derivative(v1,v2,v3,v4,v5);
double xR= weno_onesided_derivative(v6,v5,v4,v3,v2);
int frnt1 = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int frnt2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int frnt3 = sub2ind(row_idx+3, col_idx, pge_idx, rows, cols, pges);
int back1 = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
int back3 = sub2ind(row_idx-3, col_idx, pge_idx, rows, cols, pges);
v1 = (lsf[back2] - lsf[back3]) / dy;
v2 = (lsf[back1] - lsf[back2]) / dy;
v3 = (lsf[ind] - lsf[back1]) / dy;
v4 = (lsf[frnt1] - lsf[ind]) / dy;
v5 = (lsf[frnt2] - lsf[frnt1]) / dy;
v6 = (lsf[frnt3] - lsf[frnt2]) / dy;
double yB = weno_onesided_derivative(v1,v2,v3,v4,v5);
double yF = weno_onesided_derivative(v6,v5,v4,v3,v2);
int upup1 = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int upup2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int upup3 = sub2ind(row_idx, col_idx, pge_idx+3, rows, cols, pges);
int down1 = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
int down3 = sub2ind(row_idx, col_idx, pge_idx-3, rows, cols, pges);
v1 = (lsf[down2] - lsf[down3]) / dz;
v2 = (lsf[down1] - lsf[down2]) / dz;
v3 = (lsf[ind] - lsf[down1]) / dz;
v4 = (lsf[upup1] - lsf[ind]) / dz;
v5 = (lsf[upup2] - lsf[upup1]) / dz;
v6 = (lsf[upup3] - lsf[upup2]) / dz;
double zD = weno_onesided_derivative(v1,v2,v3,v4,v5);
double zU = weno_onesided_derivative(v6,v5,v4,v3,v2);
step[ind] = (min2(0,vx[ind]) * xR + max2(0,vx[ind]) * xL +
min2(0,vy[ind]) * yF + max2(0,vy[ind]) * yB +
min2(0,vz[ind]) * zU + max2(0,vz[ind]) * zD +
lsf[ind] * vd[ind]) * dt ;
}
// calculate numerical Hamiltonian for surface conservation law equation with finite volume method
// assuming c field and velocity field are already extended
__global__
void spatial_finite_volume_step(double * step, double const * vx, double const * vy, double const * vz, double const * lsf, double dt, int rows, int cols, int pges, double dx, double dy, double dz)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
double numericalFlux = 0;
double v_upwind, v_dnwind; // speed in the upwind and downwind direction
int dnwind, upwind;
// use linear approximation to calculate speed at the boundary
dnwind = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
upwind = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
v_dnwind = (vx[dnwind] + vx[ind]) / 2.0;
v_upwind = (vx[upwind] + vx[ind]) / 2.0;
numericalFlux += (min2(0,v_dnwind) * lsf[dnwind] - max2(0,v_dnwind) * lsf[ind]) * dy * dz;
numericalFlux += (max2(0,v_upwind) * lsf[upwind] - min2(0,v_upwind) * lsf[ind]) * dy * dz;
dnwind = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
upwind = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
v_dnwind = (vy[dnwind] + vy[ind]) / 2.0;
v_upwind = (vy[upwind] + vy[ind]) / 2.0;
numericalFlux += (min2(0,v_dnwind) * lsf[dnwind] - max2(0,v_dnwind) * lsf[ind]) * dx * dz;
numericalFlux += (max2(0,v_upwind) * lsf[upwind] - min2(0,v_upwind) * lsf[ind]) * dx * dz;
dnwind = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
upwind = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
v_dnwind = (vz[dnwind] + vz[ind]) / 2.0;
v_upwind = (vz[upwind] + vz[ind]) / 2.0;
numericalFlux += (min2(0,v_dnwind) * lsf[dnwind] - max2(0,v_dnwind) * lsf[ind]) * dx * dy;
numericalFlux += (max2(0,v_upwind) * lsf[upwind] - min2(0,v_upwind) * lsf[ind]) * dx * dy;
step[ind] = numericalFlux * dt / (dx * dy * dz);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
//
// Created by brian on 11/20/18.
//
#include "complex.cuh"
#include <cmath>
const float PI = 3.14159265358979f;
__device__ __host__ Complex::Complex() : real(0.0f), imag(0.0f) {}
__device__ __host__ Complex::Complex(float r) : real(r), imag(0.0f) {}
__device__ __host__ Complex::Complex(float r, float i) : real(r), imag(i) {}
__device__ __host__ Complex Complex::operator+(const Complex &b) const {
Complex a;
a.real = b.real + this->real;
a.imag = b.imag + this->imag;
return a;
}
__device__ __host__ Complex Complex::operator-(const Complex &b) const {
Complex a;
a.real = this->real - b.real ;
a.imag = this->imag - b.imag ;
return a;
}
__device__ __host__ Complex Complex::operator*(const Complex &b) const {
Complex a;
a.real = b.real * this->real - b.imag*this->imag;
a.imag = b.imag * this->real + b.real*this->imag;
return a;
}
__device__ __host__ Complex Complex::mag() const {
return Complex(sqrt(real*real + imag*imag));
}
__device__ __host__ Complex Complex::angle() const {
return Complex(atan2(imag,real)*360/(2*M_PI));
}
__device__ __host__ Complex Complex::conj() const {
return Complex(real,-imag);
}
std::ostream& operator<< (std::ostream& os, const Complex& rhs) {
Complex c(rhs);
if(fabsf(rhs.imag) < 1e-10) c.imag = 0.0f;
if(fabsf(rhs.real) < 1e-10) c.real = 0.0f;
if(c.imag == 0) {
os << c.real;
}
else {
os << "(" << c.real << "," << c.imag << ")";
}
return os;
}
|
//
// Created by brian on 11/20/18.
//
#pragma once
#include <hip/hip_runtime.h>
#include <iostream>
class Complex {
public:
__device__ __host__ Complex();
__device__ __host__ Complex(float r, float i);
__device__ __host__ Complex(float r);
__device__ __host__ Complex operator+(const Complex& b) const;
__device__ __host__ Complex operator-(const Complex& b) const;
__device__ __host__ Complex operator*(const Complex& b) const;
__device__ __host__ Complex mag() const;
__device__ __host__ Complex angle() const;
__device__ __host__ Complex conj() const;
float real;
float imag;
};
std::ostream& operator<<(std::ostream& os, const Complex& rhs);
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <cuda_runtime.h>
/*
Application adds two vectors declared in the code
*/
__global__ void vecAdd(int* a, int* b , int* c, int size){
// calculate thread id
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < size){
c[id] = a[id] + b[id];
}
}
void printVector(int* vec, int size){
printf("[%d", vec[0]);
for(int i=1;i<size; i++){
printf(", %d", vec[i]);
}
printf("]\n");
}
int main(int argc, char**argv){
int size = 5;
size_t vectorSize = size * sizeof(int);
//initialize host variables
int* h_vecA = (int*) malloc(vectorSize);
int* h_vecB = (int*) malloc(vectorSize);
int* h_vecResult = (int*) malloc(vectorSize);
for(int i = 0; i < size; i++){
h_vecA[i] = i;
h_vecB[i] = i*i;
}
// initialize device variables
int * d_vecA, *d_vecB, *d_vecResult;
cudaMalloc(&d_vecA, vectorSize);
cudaMalloc(&d_vecB, vectorSize);
cudaMalloc(&d_vecResult, vectorSize);
cudaMemcpy(d_vecA, h_vecA, vectorSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_vecB, h_vecB, vectorSize, cudaMemcpyHostToDevice);
dim3 blocksPerGrid(1, 1, 1);
dim3 threadsPerBlock(size, 1, 1);
vecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_vecA, d_vecB, d_vecResult, size);
// copy the result to the device
cudaMemcpy(h_vecResult, d_vecResult, vectorSize, cudaMemcpyDeviceToHost);
printf("The result: \n");
printVector(h_vecResult, size);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) );
exit(-1);
}
return 0;
}
|
#include <stdio.h>
#include <hip/hip_runtime.h>
/*
Application adds two vectors declared in the code
*/
__global__ void vecAdd(int* a, int* b , int* c, int size){
// calculate thread id
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < size){
c[id] = a[id] + b[id];
}
}
void printVector(int* vec, int size){
printf("[%d", vec[0]);
for(int i=1;i<size; i++){
printf(", %d", vec[i]);
}
printf("]\n");
}
int main(int argc, char**argv){
int size = 5;
size_t vectorSize = size * sizeof(int);
//initialize host variables
int* h_vecA = (int*) malloc(vectorSize);
int* h_vecB = (int*) malloc(vectorSize);
int* h_vecResult = (int*) malloc(vectorSize);
for(int i = 0; i < size; i++){
h_vecA[i] = i;
h_vecB[i] = i*i;
}
// initialize device variables
int * d_vecA, *d_vecB, *d_vecResult;
hipMalloc(&d_vecA, vectorSize);
hipMalloc(&d_vecB, vectorSize);
hipMalloc(&d_vecResult, vectorSize);
hipMemcpy(d_vecA, h_vecA, vectorSize, hipMemcpyHostToDevice);
hipMemcpy(d_vecB, h_vecB, vectorSize, hipMemcpyHostToDevice);
dim3 blocksPerGrid(1, 1, 1);
dim3 threadsPerBlock(size, 1, 1);
vecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_vecA, d_vecB, d_vecResult, size);
// copy the result to the device
hipMemcpy(h_vecResult, d_vecResult, vectorSize, hipMemcpyDeviceToHost);
printf("The result: \n");
printVector(h_vecResult, size);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error!=hipSuccess)
{
fprintf(stderr,"ERROR: %s\n", hipGetErrorString(error) );
exit(-1);
}
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N (32)
__global__ void inc(int *array, int len)
{
int i;
for (i = 0; i < len; i++)
array[i]++;
return;
}
int main(int argc, char *argv[])
{
int i;
int arrayH[N];
int *arrayD;
size_t array_size;
for (i=0; i<N; i++) arrayH[i] = i;
printf("input: ");
for (i=0; i<N; i++)
printf("%d ", arrayH[i]);
printf("\n");
array_size = sizeof(int) * N;
cudaMalloc((void **)&arrayD, array_size);
cudaMemcpy(arrayD, arrayH, array_size,
cudaMemcpyHostToDevice);
inc<<<1, 1>>>(arrayD, N);
cudaMemcpy(arrayH, arrayD, array_size,
cudaMemcpyDeviceToHost);
printf("output: ");
for (i=0; i<N; i++)
printf("%d ", arrayH[i]);
printf("\n");
return 0;
}
|
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define N (32)
__global__ void inc(int *array, int len)
{
int i;
for (i = 0; i < len; i++)
array[i]++;
return;
}
int main(int argc, char *argv[])
{
int i;
int arrayH[N];
int *arrayD;
size_t array_size;
for (i=0; i<N; i++) arrayH[i] = i;
printf("input: ");
for (i=0; i<N; i++)
printf("%d ", arrayH[i]);
printf("\n");
array_size = sizeof(int) * N;
hipMalloc((void **)&arrayD, array_size);
hipMemcpy(arrayD, arrayH, array_size,
hipMemcpyHostToDevice);
inc<<<1, 1>>>(arrayD, N);
hipMemcpy(arrayH, arrayD, array_size,
hipMemcpyDeviceToHost);
printf("output: ");
for (i=0; i<N; i++)
printf("%d ", arrayH[i]);
printf("\n");
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void add(float *c, float* a, float *b, int values){
int blockD = blockDim.x;
int blockX = blockIdx.x;
int threadX = threadIdx.x;
int i = blockX * blockD + threadX;
if(i < values)
c[i] = a[i] + b[i];
//printf("Hello Im thread %d in block %d of %d threads\n", threadX, blockX, blockD);
}
__host__ int main (int argc, char *argv[]){
int numValues = atoi(argv[1]);
int blocksize = atoi(argv[2]);
printf("Using program with %d values and %d blocksize\n", numValues, blocksize);
float *c = (float*)malloc(numValues*sizeof(float));
float *a = (float*)malloc(numValues*sizeof(float));
float *b = (float*)malloc(numValues*sizeof(float));
float *c_d, *b_d, *a_d;
cudaMalloc((void**)&c_d, numValues*sizeof(float));
cudaMalloc((void**)&b_d, numValues*sizeof(float));
cudaMalloc((void**)&a_d, numValues*sizeof(float));
for(int i=0; i < numValues; i++){
c[i] = 0.0;
a[i] = 3.0;
b[i] = 5.0;
}
printf("Done init\n");
int numBlocks = numValues/blocksize;
printf("Copying arrays from host to device\n");
cudaMemcpy(a_d, a, numValues*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, numValues*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c, numValues*sizeof(float), cudaMemcpyHostToDevice);
add<<<numBlocks,blocksize>>>(c_d, a_d, b_d, numValues);
cudaDeviceSynchronize();
printf("Copying values back to host\n");
cudaMemcpy(c, c_d, numValues*sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0; i < numValues; i++)
printf("C[%d] = %f\n", i, c[i]);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void add(float *c, float* a, float *b, int values){
int blockD = blockDim.x;
int blockX = blockIdx.x;
int threadX = threadIdx.x;
int i = blockX * blockD + threadX;
if(i < values)
c[i] = a[i] + b[i];
//printf("Hello Im thread %d in block %d of %d threads\n", threadX, blockX, blockD);
}
__host__ int main (int argc, char *argv[]){
int numValues = atoi(argv[1]);
int blocksize = atoi(argv[2]);
printf("Using program with %d values and %d blocksize\n", numValues, blocksize);
float *c = (float*)malloc(numValues*sizeof(float));
float *a = (float*)malloc(numValues*sizeof(float));
float *b = (float*)malloc(numValues*sizeof(float));
float *c_d, *b_d, *a_d;
hipMalloc((void**)&c_d, numValues*sizeof(float));
hipMalloc((void**)&b_d, numValues*sizeof(float));
hipMalloc((void**)&a_d, numValues*sizeof(float));
for(int i=0; i < numValues; i++){
c[i] = 0.0;
a[i] = 3.0;
b[i] = 5.0;
}
printf("Done init\n");
int numBlocks = numValues/blocksize;
printf("Copying arrays from host to device\n");
hipMemcpy(a_d, a, numValues*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(b_d, b, numValues*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(c_d, c, numValues*sizeof(float), hipMemcpyHostToDevice);
add<<<numBlocks,blocksize>>>(c_d, a_d, b_d, numValues);
hipDeviceSynchronize();
printf("Copying values back to host\n");
hipMemcpy(c, c_d, numValues*sizeof(float), hipMemcpyDeviceToHost);
for(int i=0; i < numValues; i++)
printf("C[%d] = %f\n", i, c[i]);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.0005
////////////////////////////////////////////////////////////////////////////////
// Row convolution kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowGPU(float *d_Dst, float *d_Src, float *d_Filter, int imageW, int imageH, int filterR){
int k;
float sum=0;
int row=blockDim.y*blockIdx.y+threadIdx.y+filterR;
int col=blockDim.x*blockIdx.x+threadIdx.x+filterR;
int newImageW=imageW+filterR*2;
for (k = -filterR; k <= filterR; k++) {
int d = col+ k;
sum += d_Src[row *newImageW + d] * d_Filter[filterR - k];
}
d_Dst[row *newImageW + col] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU(float *d_Dst, float *d_Src, float *d_Filter, int imageW, int imageH, int filterR){
int k;
float sum=0;
int row=blockDim.y*blockIdx.y+threadIdx.y+filterR;
int col=blockDim.x*blockIdx.x+threadIdx.x+filterR;
int newImageW =imageW+filterR*2;
for (k = -filterR; k <= filterR; k++) {
int d = row+ k;
sum += d_Src[col +newImageW* d] * d_Filter[filterR - k];
}
d_Dst[row * newImageW + col] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float
*h_Filter,
*h_Input,
*h_PaddingMatrix,
*h_Buffer,
*h_OutputCPU,
*h_OutputGPU,
*d_Filter,
*d_Input,
*d_Buffer,
*d_OutputGPU;
struct timespec tv1, tv2;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int imageW;
int imageH;
unsigned int i,j;
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
// printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
// scanf("%d", &imageW);
imageH = imageW;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
if(h_Filter==NULL){
printf("Allocation failed\n");
return 0;
}
h_Input = (float *)malloc(imageW * imageH * sizeof(float));
if(h_Input==NULL){
printf("Allocation failed\n");
return 0;
}
h_PaddingMatrix = (float *)malloc((imageW+filter_radius*2 )*(2*filter_radius+ imageH) * sizeof(float));
if(h_Input==NULL){
printf("Allocation failed\n");
return 0;
}
h_Buffer = (float *)malloc(imageW * imageH * sizeof(float));
if(h_Buffer==NULL){
printf("Allocation failed\n");
return 0;
}
h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float));
if(h_OutputCPU==NULL){
printf("Allocation failed\n");
return 0;
}
h_OutputGPU=(float *)malloc((imageW+2*filter_radius) * (imageH+2*filter_radius) * sizeof(float));
if(h_OutputGPU==NULL){
printf("Allocation failed \n");
cudaDeviceReset();
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Desmeush mnhmhs sto device
////////////////////////////////////////////////////////////////////////////////
cudaMalloc(&d_Filter,FILTER_LENGTH*sizeof(float));
cudaMalloc(&d_Input,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(float));
cudaMalloc(&d_Buffer,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(float));
cudaMalloc(&d_OutputGPU,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(float));
if(d_Filter==NULL || d_Input==NULL || d_Buffer==NULL || d_OutputGPU==NULL){
printf("Cuda Malloc Failed\n");
return 0;
}
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (float)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (float)rand() / ((float)RAND_MAX / 16);
}
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
clock_gettime(CLOCK_MONOTONIC_RAW, &tv1);
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
clock_gettime(CLOCK_MONOTONIC_RAW, &tv2);
printf ("CPU time = %10g seconds\n",
(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +
(double) (tv2.tv_sec - tv1.tv_sec));
dim3 dimGrid(imageW/8,imageH/8);
dim3 dimBlock(8,8);
for(i=0;i<(imageW+2*filter_radius)*(imageW+2*filter_radius);i++){
h_PaddingMatrix[i]=0;
}
for(i=0;i<imageW;i++){
for(j=0;j<imageW;j++){
h_PaddingMatrix[(i+filter_radius)*(2*filter_radius+imageW)+j+filter_radius]=h_Input[i*imageW+j];
}
}
printf("GPU computation... \n");
cudaMemcpy(d_Filter,h_Filter,FILTER_LENGTH*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_Input,h_PaddingMatrix,(imageH+2*filter_radius)*(imageW+2*filter_radius)*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(start,0);
convolutionRowGPU <<< dimGrid,dimBlock >>>(d_Buffer,d_Input, d_Filter, imageW, imageH, filter_radius);
cudaThreadSynchronize();
cudaError_t error=cudaGetLastError();
if(error!=cudaSuccess){
printf("Cuda Error:%s\n",cudaGetErrorString(error));
cudaDeviceReset();
return 0;
}
convolutionColumnGPU <<< dimGrid,dimBlock >>>(d_OutputGPU,d_Buffer, d_Filter, imageW, imageH, filter_radius);
cudaThreadSynchronize();
error=cudaGetLastError();
if(error!=cudaSuccess){
printf("Cuda Error:%s\n",cudaGetErrorString(error));
cudaDeviceReset();
return 0;
}
cudaEventRecord(stop,0);
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed,start,stop);
printf("GPU time %f seconds.\n",elapsed/1000);
cudaMemcpy(h_OutputGPU,d_OutputGPU,(imageH+2*filter_radius)*(imageW+2*filter_radius)*sizeof(float),cudaMemcpyDeviceToHost);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
cudaFree(d_OutputGPU);
cudaFree(d_Buffer);
cudaFree(d_Input);
cudaFree(h_Filter);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
cudaDeviceReset();
return 0;
}
|
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <hip/hip_runtime.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.0005
////////////////////////////////////////////////////////////////////////////////
// Row convolution kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowGPU(float *d_Dst, float *d_Src, float *d_Filter, int imageW, int imageH, int filterR){
int k;
float sum=0;
int row=blockDim.y*blockIdx.y+threadIdx.y+filterR;
int col=blockDim.x*blockIdx.x+threadIdx.x+filterR;
int newImageW=imageW+filterR*2;
for (k = -filterR; k <= filterR; k++) {
int d = col+ k;
sum += d_Src[row *newImageW + d] * d_Filter[filterR - k];
}
d_Dst[row *newImageW + col] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnGPU(float *d_Dst, float *d_Src, float *d_Filter, int imageW, int imageH, int filterR){
int k;
float sum=0;
int row=blockDim.y*blockIdx.y+threadIdx.y+filterR;
int col=blockDim.x*blockIdx.x+threadIdx.x+filterR;
int newImageW =imageW+filterR*2;
for (k = -filterR; k <= filterR; k++) {
int d = row+ k;
sum += d_Src[col +newImageW* d] * d_Filter[filterR - k];
}
d_Dst[row * newImageW + col] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float
*h_Filter,
*h_Input,
*h_PaddingMatrix,
*h_Buffer,
*h_OutputCPU,
*h_OutputGPU,
*d_Filter,
*d_Input,
*d_Buffer,
*d_OutputGPU;
struct timespec tv1, tv2;
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int imageW;
int imageH;
unsigned int i,j;
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
// printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
// scanf("%d", &imageW);
imageH = imageW;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
if(h_Filter==NULL){
printf("Allocation failed\n");
return 0;
}
h_Input = (float *)malloc(imageW * imageH * sizeof(float));
if(h_Input==NULL){
printf("Allocation failed\n");
return 0;
}
h_PaddingMatrix = (float *)malloc((imageW+filter_radius*2 )*(2*filter_radius+ imageH) * sizeof(float));
if(h_Input==NULL){
printf("Allocation failed\n");
return 0;
}
h_Buffer = (float *)malloc(imageW * imageH * sizeof(float));
if(h_Buffer==NULL){
printf("Allocation failed\n");
return 0;
}
h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float));
if(h_OutputCPU==NULL){
printf("Allocation failed\n");
return 0;
}
h_OutputGPU=(float *)malloc((imageW+2*filter_radius) * (imageH+2*filter_radius) * sizeof(float));
if(h_OutputGPU==NULL){
printf("Allocation failed \n");
hipDeviceReset();
return 0;
}
////////////////////////////////////////////////////////////////////////////////
// Desmeush mnhmhs sto device
////////////////////////////////////////////////////////////////////////////////
hipMalloc(&d_Filter,FILTER_LENGTH*sizeof(float));
hipMalloc(&d_Input,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(float));
hipMalloc(&d_Buffer,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(float));
hipMalloc(&d_OutputGPU,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(float));
if(d_Filter==NULL || d_Input==NULL || d_Buffer==NULL || d_OutputGPU==NULL){
printf("Cuda Malloc Failed\n");
return 0;
}
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (float)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (float)rand() / ((float)RAND_MAX / 16);
}
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
clock_gettime(CLOCK_MONOTONIC_RAW, &tv1);
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
clock_gettime(CLOCK_MONOTONIC_RAW, &tv2);
printf ("CPU time = %10g seconds\n",
(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +
(double) (tv2.tv_sec - tv1.tv_sec));
dim3 dimGrid(imageW/8,imageH/8);
dim3 dimBlock(8,8);
for(i=0;i<(imageW+2*filter_radius)*(imageW+2*filter_radius);i++){
h_PaddingMatrix[i]=0;
}
for(i=0;i<imageW;i++){
for(j=0;j<imageW;j++){
h_PaddingMatrix[(i+filter_radius)*(2*filter_radius+imageW)+j+filter_radius]=h_Input[i*imageW+j];
}
}
printf("GPU computation... \n");
hipMemcpy(d_Filter,h_Filter,FILTER_LENGTH*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_Input,h_PaddingMatrix,(imageH+2*filter_radius)*(imageW+2*filter_radius)*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(start,0);
convolutionRowGPU <<< dimGrid,dimBlock >>>(d_Buffer,d_Input, d_Filter, imageW, imageH, filter_radius);
hipDeviceSynchronize();
hipError_t error=hipGetLastError();
if(error!=hipSuccess){
printf("Cuda Error:%s\n",hipGetErrorString(error));
hipDeviceReset();
return 0;
}
convolutionColumnGPU <<< dimGrid,dimBlock >>>(d_OutputGPU,d_Buffer, d_Filter, imageW, imageH, filter_radius);
hipDeviceSynchronize();
error=hipGetLastError();
if(error!=hipSuccess){
printf("Cuda Error:%s\n",hipGetErrorString(error));
hipDeviceReset();
return 0;
}
hipEventRecord(stop,0);
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed,start,stop);
printf("GPU time %f seconds.\n",elapsed/1000);
hipMemcpy(h_OutputGPU,d_OutputGPU,(imageH+2*filter_radius)*(imageW+2*filter_radius)*sizeof(float),hipMemcpyDeviceToHost);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
hipFree(d_OutputGPU);
hipFree(d_Buffer);
hipFree(d_Input);
hipFree(h_Filter);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
hipDeviceReset();
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
#pragma once
/*
//åñëè äëÿ âñåõ êàðò õâàòèò âîçìîæíîñòåé âèäåîêàðòû (ÐÀÁÎÒÀÅÒ)
*/
__global__ void MapAdd1(int* one, const int* result, unsigned int mx, unsigned int width)
{
const unsigned int ppp = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int rix = ppp % width;
const unsigned int riy = (ppp / mx) + ((ppp % mx) / width);
const unsigned int xxx = riy * width + rix;
const unsigned int ddx = riy * mx + rix;
one[ddx] = result[xxx];
}
|
#include "includes.h"
#pragma once
#include <hip/hip_runtime.h>
/*
//åñëè äëÿ âñåõ êàðò õâàòèò âîçìîæíîñòåé âèäåîêàðòû (ÐÀÁÎÒÀÅÒ)
*/
__global__ void MapAdd1(int* one, const int* result, unsigned int mx, unsigned int width)
{
const unsigned int ppp = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int rix = ppp % width;
const unsigned int riy = (ppp / mx) + ((ppp % mx) / width);
const unsigned int xxx = riy * width + rix;
const unsigned int ddx = riy * mx + rix;
one[ddx] = result[xxx];
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
// Device code
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// Initialise the input vectors
void initialise_input_vect(float* A, float* B, int N)
{
for(int i=0; i<N; i++){
A[i]=i;
B[i]=2*i;
}
}
// Host code
int main()
{
int N = 1000; // Number of elements to process
bool print_results = 0; // Boolean variable for printing the results
size_t size = N * sizeof(float);
//==========================================================================
// Get the GPUs properties:
// Device name, Compute Capability, Global Memory (GB) etc
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Compute Capability: %d.%d\n", prop.major, prop.minor);
printf(" Total Global Mem: %.1fGB\n\n", ((double)prop.totalGlobalMem/1073741824.0));
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Max Number of Threads per Block: %d\n", prop.maxThreadsPerBlock);
printf(" Max Number of Blocks allowed in x-dir: %d\n", prop.maxGridSize[0]);
printf(" Max Number of Blocks allowed in y-dir: %d\n", prop.maxGridSize[1]);
printf(" Max Number of Blocks allowed in z-dir: %d\n", prop.maxGridSize[2]);
printf(" Warp Size: %d\n", prop.warpSize);
printf("===============================================\n\n");
}
//==========================================================================
// Allocate input vectors h_A and h_B in host memory
float* h_A = new float[N];
float* h_B = new float[N];
float* h_C = new float[N];
// Initialize input vectors
initialise_input_vect(h_A, h_B, N);
// Allocate vectors in device memory
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
int nThreadsPerBlock = 256;
int nblocks = (N / nThreadsPerBlock) + ((N % nThreadsPerBlock > 0) ? 1 : 0);
VecAdd<<<nblocks, nThreadsPerBlock>>>(d_A, d_B, d_C, N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Print the results
if(print_results) for (int i=0; i<N; i++) printf("h_C[%d] = %2.2f \n", i, h_C[i] );
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
delete[] h_A, h_B, h_C;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
// Device code
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
// Initialise the input vectors
void initialise_input_vect(float* A, float* B, int N)
{
for(int i=0; i<N; i++){
A[i]=i;
B[i]=2*i;
}
}
// Host code
int main()
{
int N = 1000; // Number of elements to process
bool print_results = 0; // Boolean variable for printing the results
size_t size = N * sizeof(float);
//==========================================================================
// Get the GPUs properties:
// Device name, Compute Capability, Global Memory (GB) etc
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Compute Capability: %d.%d\n", prop.major, prop.minor);
printf(" Total Global Mem: %.1fGB\n\n", ((double)prop.totalGlobalMem/1073741824.0));
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Max Number of Threads per Block: %d\n", prop.maxThreadsPerBlock);
printf(" Max Number of Blocks allowed in x-dir: %d\n", prop.maxGridSize[0]);
printf(" Max Number of Blocks allowed in y-dir: %d\n", prop.maxGridSize[1]);
printf(" Max Number of Blocks allowed in z-dir: %d\n", prop.maxGridSize[2]);
printf(" Warp Size: %d\n", prop.warpSize);
printf("===============================================\n\n");
}
//==========================================================================
// Allocate input vectors h_A and h_B in host memory
float* h_A = new float[N];
float* h_B = new float[N];
float* h_C = new float[N];
// Initialize input vectors
initialise_input_vect(h_A, h_B, N);
// Allocate vectors in device memory
float* d_A;
hipMalloc(&d_A, size);
float* d_B;
hipMalloc(&d_B, size);
float* d_C;
hipMalloc(&d_C, size);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Invoke kernel
int nThreadsPerBlock = 256;
int nblocks = (N / nThreadsPerBlock) + ((N % nThreadsPerBlock > 0) ? 1 : 0);
VecAdd<<<nblocks, nThreadsPerBlock>>>(d_A, d_B, d_C, N);
// Copy result from device memory to host memory
// h_C contains the result in host memory
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// Print the results
if(print_results) for (int i=0; i<N; i++) printf("h_C[%d] = %2.2f \n", i, h_C[i] );
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// Free host memory
delete[] h_A, h_B, h_C;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926535897932
#define MAXEQNS 10 // maximum number of differential equations in the system
const int itermax10 = 2; // number of iterations to use for rk10
const int itermax12 = 1; // number of additional iterations to use for rk12
const int neqns = 2; // number of differential equations in the system
const double tol = 1.0e-10; // the error tolerance
const double tol10 = tol / 10;
const bool sho = true; // set sho to true if you want the simple harmonic oscillator results
// set sho to false, if you want the predator - prey results
// the following constants are the 10th order method's coefficients
const double a0 = 0;
__constant__ double a1 = 0.11747233803526765;
__constant__ double a2 = 0.35738424175967745;
__constant__ double a3 = 0.64261575824032255;
__constant__ double a4 = 0.88252766196473235;
const double a5 = 1.0000000000000000;
__constant__ double b10 = 0.047323231137709573;
__constant__ double b11 = 0.077952072407795078;
__constant__ double b12 = -0.010133421269900587;
__constant__ double b13 = 0.0028864915990617097;
__constant__ double b14 = -0.00055603583939812082;
__constant__ double b20 = 0.021779075831486075;
__constant__ double b21 = 0.22367959757928498;
__constant__ double b22 = 0.12204792759220492;
__constant__ double b23 = -0.012091266674498959;
__constant__ double b24 = 0.0019689074312004371;
__constant__ double b30 = 0.044887590835180592;
__constant__ double b31 = 0.15973856856089786;
__constant__ double b32 = 0.32285378852557547;
__constant__ double b33 = 0.12204792759220492;
__constant__ double b34 = -0.0069121172735362915;
__constant__ double b40 = 0.019343435528957094;
__constant__ double b41 = 0.22312684732165494;
__constant__ double b42 = 0.23418268877986459;
__constant__ double b43 = 0.32792261792646064;
__constant__ double b44 = 0.077952072407795078;
const double b50 = 0.066666666666666667;
const double b51 = 0.10981508874708385;
const double b52 = 0.37359383699761912;
const double b53 = 0.18126454003786724;
const double b54 = 0.26865986755076313;
const double c0 = 0.033333333333333333;
const double c1 = 0.18923747814892349;
const double c2 = 0.27742918851774318;
const double c3 = 0.27742918851774318;
const double c4 = 0.18923747814892349;
const double c5 = 0.033333333333333333;
// the following coefficients allow us to get rk12 internal xk values from rk10 fk values
__constant__ double g10 = 0.043407276098971173;
__constant__ double g11 = 0.049891561330903419;
__constant__ double g12 = -0.012483721919363355;
__constant__ double g13 = 0.0064848904066894701;
__constant__ double g14 = -0.0038158693974615597;
__constant__ double g15 = 0.0014039153409773882;
__constant__ double g20 = 0.030385164419638569;
__constant__ double g21 = 0.19605322645426044;
__constant__ double g22 = 0.047860687574395354;
__constant__ double g23 = -0.012887249003100515;
__constant__ double g24 = 0.0064058521980400821;
__constant__ double g25 = -0.0022420783785910372;
__constant__ double g30 = 0.032291666666666667;
__constant__ double g31 = 0.19311806292811784;
__constant__ double g32 = 0.25797759963091718;
__constant__ double g33 = 0.019451588886825999;
__constant__ double g34 = -0.0038805847791943522;
__constant__ double g35 = 0.0010416666666666667;
__constant__ double g40 = 0.035575411711924371;
__constant__ double g41 = 0.18283162595088341;
__constant__ double g42 = 0.29031643752084369;
__constant__ double g43 = 0.22956850094334782;
__constant__ double g44 = -0.0068157483053369507;
__constant__ double g45 = 0.0029481689136947641;
__constant__ double g50 = 0.031929417992355945;
__constant__ double g51 = 0.19305334754638505;
__constant__ double g52 = 0.27094429811105371;
__constant__ double g53 = 0.28991291043710653;
__constant__ double g54 = 0.13934591681802007;
__constant__ double g55 = -0.010073942765637839;
const double g60 = 0.033333333333333333;
const double g61 = 0.18923747814892349;
const double g62 = 0.27742918851774318;
const double g63 = 0.27742918851774318;
const double g64 = 0.18923747814892349;
const double g65 = 0.033333333333333333;
// the following constants are the 12th order method's coefficients
const double ah0 = 0.0;
const double ah1 = 0.084888051860716535;
const double ah2 = 0.26557560326464289;
const double ah3 = 0.50000000000000000;
const double ah4 = 0.73442439673535711;
const double ah5 = 0.91511194813928346;
const double ah6 = 1.0000000000000000;
__constant__ double bh10 = 0.033684534770907752;
__constant__ double bh11 = 0.057301749935629582;
__constant__ double bh12 = -0.0082444880936983822;
__constant__ double bh13 = 0.0029151263642014432;
__constant__ double bh14 = -0.00096482361331657787;
__constant__ double bh15 = 0.00019595249699271744;
__constant__ double bh20 = 0.015902242088596380;
__constant__ double bh21 = 0.16276437062291593;
__constant__ double bh22 = 0.096031583397703751;
__constant__ double bh23 = -0.011758319711158930;
__constant__ double bh24 = 0.0032543514515832418;
__constant__ double bh25 = -0.00061862458499748489;
__constant__ double bh30 = 0.031250000000000000;
__constant__ double bh31 = 0.11881843285766042;
__constant__ double bh32 = 0.24868761828096535;
__constant__ double bh33 = 0.11000000000000000;
__constant__ double bh34 = -0.010410996557394222;
__constant__ double bh35 = 0.0016549454187684515;
__constant__ double bh40 = 0.015902242088596380;
__constant__ double bh41 = 0.15809680304274781;
__constant__ double bh42 = 0.18880881534382426;
__constant__ double bh43 = 0.28087114502765051;
__constant__ double bh44 = 0.096031583397703751;
__constant__ double bh45 = -0.0052861921651656089;
__constant__ double bh50 = 0.033684534770907752;
__constant__ double bh51 = 0.11440754737426645;
__constant__ double bh52 = 0.24657204460460206;
__constant__ double bh53 = 0.20929436236889375;
__constant__ double bh54 = 0.25385170908498387;
__constant__ double bh55 = 0.057301749935629582;
const double bh60 = 0;
const double bh61 = 0.19581988897471611;
const double bh62 = 0.14418011102528389;
const double bh63 = 0.32000000000000000;
const double bh64 = 0.14418011102528389;
const double bh65 = 0.19581988897471611;
const double ch0 = 0.023809523809523810;
const double ch1 = 0.13841302368078297;
const double ch2 = 0.21587269060493131;
const double ch3 = 0.24380952380952381;
const double ch4 = 0.21587269060493131;
const double ch5 = 0.13841302368078297;
const double ch6 = 0.023809523809523810;
#define cudaErrorCheck(call) { cudaAssert(call,__FILE__,__LINE__); }
void cudaAssert(const cudaError err, const char *file, const int line)
{
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",
file, line, cudaGetErrorString(err) );
getchar();
exit(1);
}
}
//****************************************************************************
//Derivative kernel: takes pointers to x[], and f[] allocated on the device
__global__ void derKernel(double* device_x, double* device_f)
{
//2 elements in device_x represent 2 elements from individual arrays X1-X4;
//ie if thread id is 0 then the array number is 0x2 and work on elements tx*2 and tx*2 +1
int tx = threadIdx.x;
int xArrayNumber = tx *2;
device_f[xArrayNumber] = device_x[xArrayNumber+1];
__syncthreads();
device_f[xArrayNumber+1] = -device_x[xArrayNumber];
__syncthreads();
}
__global__ void guessKernel(double*device_X_Total, double* device_X_Not,double* device_F_Not, double h){
device_X_Total[threadIdx.x] = device_X_Not[threadIdx.x] + a1 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +2] = device_X_Not[threadIdx.x] + a2 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +4] = device_X_Not[threadIdx.x] + a3 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +6] = device_X_Not[threadIdx.x] + a4 * h * device_F_Not[threadIdx.x];
}
__global__ void order10Kernel(double*device_X_Total, double* device_X_Not, double *device_F_Not, double h, double *device_f)
{
int tx = threadIdx.x;
device_X_Total[tx]=device_X_Not[tx] + h*((b10 * device_F_Not[tx]) + (b11 * device_f[tx]) + (b12 * device_f[tx+2]) + (b13 * device_f[tx +4]) + (b14 * device_f[tx +6]));
__syncthreads();
device_X_Total[tx+2]=device_X_Not[tx] + h*((b20 * device_F_Not[tx]) + (b21 * device_f[tx]) + (b22 * device_f[tx+2]) + (b23 * device_f[tx +4]) + (b24 * device_f[tx +6]));
__syncthreads();
device_X_Total[tx+4]=device_X_Not[tx] + h*((b30 * device_F_Not[tx]) +( b31 * device_f[tx]) + (b32 * device_f[tx+2]) + (b33 * device_f[tx +4]) + (b34 * device_f[tx +6]));
__syncthreads();
device_X_Total[tx+6]=device_X_Not[tx] + h*((b40 * device_F_Not[tx]) + (b41 * device_f[tx]) +( b42 * device_f[tx+2]) + (b43 * device_f[tx +4]) +( b44 * device_f[tx +6]));
__syncthreads();
}
__global__ void Order10FkKernel(double*device_X_Total, double* device_X_Not, double* device_F_Not, double h, double*device_f)
{
int tx = threadIdx.x;
device_X_Total[tx] = device_X_Not[tx] + h*((g10*device_F_Not[tx])+ (g11 * device_f[tx]) + (g12 * device_f[tx+2])+ (g13 * device_f[tx + 4]) + (g14 * device_f[tx+ 6])+ (g15 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+2] = device_X_Not[tx] + h*((g20*device_F_Not[tx])+ (g21 * device_f[tx]) + (g22 * device_f[tx+2])+ (g23 * device_f[tx + 4]) + (g24 * device_f[tx+ 6])+ (g25 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+4] = device_X_Not[tx] + h*((g30*device_F_Not[tx])+ (g31 * device_f[tx]) + (g32 * device_f[tx+2])+ (g33 * device_f[tx + 4]) + (g34 * device_f[tx+ 6])+ (g35 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+6] = device_X_Not[tx] + h*((g40*device_F_Not[tx])+ (g41 * device_f[tx]) + (g42 * device_f[tx+2])+ (g43 * device_f[tx + 4]) + (g44 * device_f[tx+ 6])+ (g45 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+8] = device_X_Not[tx] + h*((g50*device_F_Not[tx])+ (g51 * device_f[tx]) + (g52 * device_f[tx+2])+ (g53 * device_f[tx + 4]) + (g54 * device_f[tx+ 6])+ (g55 *device_f[tx+8]));
__syncthreads();
}
__global__ void Order12Kernel(double*device_X_Total, double* device_X_Not, double* device_F_Not, double h, double*device_f){
int tx = threadIdx.x;
device_X_Total[tx] = device_X_Not[tx] + h*((bh10*device_F_Not[tx])+ (bh11 * device_f[tx]) + (bh12 * device_f[tx+2])+ (bh13 * device_f[tx + 4]) + (bh14 * device_f[tx+ 6])+ (bh15 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+2] = device_X_Not[tx] + h*((bh20*device_F_Not[tx])+ (bh21 * device_f[tx]) + (bh22 * device_f[tx+2])+ (bh23 * device_f[tx + 4]) + (bh24 * device_f[tx+ 6])+ (bh25 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+4] = device_X_Not[tx] + h*((bh30*device_F_Not[tx])+ (bh31 * device_f[tx]) + (bh32 * device_f[tx+2])+ (bh33 * device_f[tx + 4]) + (bh34 * device_f[tx+ 6])+ (bh35 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+6] = device_X_Not[tx] + h*((bh40*device_F_Not[tx])+ (bh41 * device_f[tx]) + (bh42 * device_f[tx+2])+ (bh43 * device_f[tx + 4]) + (bh44 * device_f[tx+ 6])+ (bh45 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+8] = device_X_Not[tx] + h*((bh50*device_F_Not[tx])+ (bh51 * device_f[tx]) + (bh52 * device_f[tx+2])+ (bh53 * device_f[tx + 4]) + (bh54 * device_f[tx+ 6])+ (bh55 *device_f[tx+8]));
__syncthreads();
}
//****************************************************************************
// the following function describes the ordinary differential equations
//** The function is still live for the non parallel function calls to der
//** still active in the program.
void der(double t, double x[], double f[]) {
if (sho) {
f[0] = x[1];
f[1] = -x[0];
}
else {
f[0] = x[0] * (2.0 - x[1]);
f[1] = x[1] * (x[0] - 1.0);
}
}
void rk1210() {
// Implicit Runge-Kutta of orders 12 and 10
double x0[MAXEQNS], x1[MAXEQNS], x2[MAXEQNS], x3[MAXEQNS], x4[MAXEQNS];
double x5[MAXEQNS], x6[MAXEQNS], xn10[MAXEQNS], xn12[MAXEQNS];
double t0, tf, h, hnew, est, esti, f0[MAXEQNS], f1[MAXEQNS], f2[MAXEQNS];
double f3[MAXEQNS], f4[MAXEQNS], f5[MAXEQNS], f6[MAXEQNS];
int iter;
bool finished = false; // becomes true when we have reached tf
if (sho) {
h = PI / 4.0; // initial guess for stepsize to use
x0[0] = 0.0; // initial value of first component
x0[1] = 1.0; // initial value of second component
t0 = 0.0; // initial t value, t0
tf = 2 * PI; // final t value, tf
}
else {
h = 1.0 / 2.0; // initial guess for stepsize to use
x0[0] = 2.0; // initial value of first component
x0[1] = 2.0; // initial value of second component
t0 = 0.0; // initial t value, t0
tf = 4.0; // final t value, tf
}
printf("Initial conditions are t0 = %8.5lf, x0[0] = %18.15lf, x0[1] = %18.15lf\n", t0, x0[0], x0[1]);
const int arraySize = 10; //there will be 8 elements being written from x1-x4 (Remaining 2 for when X5 is included);
int numOfXArrays =4;
double x_total[arraySize];
double f_total[arraySize];
while (!finished) { // keep going until we reach tf successfully
der(t0, x0, f0); // first, we will get 10th order results
//////////////////// THIS CAN BE DONE IN PARALLEL ///////////////////////
//for (int i = 0; i<neqns; i++) {
// x1[i] = x0[i] + a1*h*f0[i]; // just guess that solution is a straight line initially
// x2[i] = x0[i] + a2*h*f0[i]; // at the four internal points within the step
// x3[i] = x0[i] + a3*h*f0[i];
// x4[i] = x0[i] + a4*h*f0[i];
//}
//*************************************************************************************
double* device_x_total; double* device_x_not; double* device_f_not; //creating variables for the device
//allocating memory for device variables
cudaMalloc((void**) &device_x_total, arraySize * sizeof(double));
cudaMalloc((void**) &device_x_not, arraySize * sizeof(double));
cudaMalloc((void**) &device_f_not, arraySize * sizeof(double));
//copying contents of x0 and f0 to the device variables
cudaMemcpy(device_x_not, x0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_f_not, f0, arraySize *sizeof(double), cudaMemcpyHostToDevice);
guessKernel<<<1, neqns>>>(device_x_total, device_x_not, device_f_not, h);
cudaMemcpy(x_total, device_x_total, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
//************************************************************************************
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1); // now, evaluate the derivatives at these four points
//der(t0 + a2*h, x2, f2);
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4);
//****************************************************************************************
double* device_f; //creating variables for device;
//allocating memory for x[], and f[]
cudaMalloc((void**) &device_x_total, arraySize* sizeof(double));
cudaMalloc((void**) &device_f, arraySize * sizeof(double));
//copying over t and x[]
cudaMemcpy(device_x_total, x_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
//*******Creating timers to test 4 arrays *********
/*cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);*/
//kernel call
derKernel<<<1,numOfXArrays>>>(device_x_total, device_f);
/* cudaEventRecord(stop);*/
//copying data from device to host
cudaMemcpy(f_total, device_f, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
/*cudaEventSynchronize(stop);
float milliseconds =0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Kernel took: %f milliseconds\n", milliseconds);
getchar();*/
//****************************************************************************************
cudaMalloc((void**) &device_x_total, arraySize* sizeof(double));
cudaMalloc((void**) &device_x_not, arraySize*sizeof(double));
cudaMalloc((void**) &device_f_not, arraySize*sizeof(double));
cudaMalloc((void**) &device_f, arraySize*sizeof(double));
for (iter = 0; iter<itermax10; iter++) { // now, we perform itermax10 iterations for the 10th order method
printf("iter = %d\n", iter);
////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
/*for (int i = 0; i<neqns; i++) x1[i] = x0[i] + h*(b10*f0[i] + b11*f1[i] + b12*f2[i] + b13*f3[i] + b14*f4[i]);
for (int i = 0; i<neqns; i++) x2[i] = x0[i] + h*(b20*f0[i] + b21*f1[i] + b22*f2[i] + b23*f3[i] + b24*f4[i]);
for (int i = 0; i<neqns; i++) x3[i] = x0[i] + h*(b30*f0[i] + b31*f1[i] + b32*f2[i] + b33*f3[i] + b34*f4[i]);
for (int i = 0; i<neqns; i++) x4[i] = x0[i] + h*(b40*f0[i] + b41*f1[i] + b42*f2[i] + b43*f3[i] + b44*f4[i]);*/
//*************Copying over f_total f0 and x0*******************************
cudaMemcpy(device_f, f_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_f_not, f0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_x_not, x0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
order10Kernel<<<1,neqns>>>(device_x_total,device_x_not,device_f_not, h,device_f);
cudaMemcpy(x_total, device_x_total, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
//**********************************************************************************************************************
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1); // now, evaluate the derivatives at these four points
//der(t0 + a2*h, x2, f2);
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4);
//************************************************************************************
cudaMalloc((void**) &device_x_total, arraySize* sizeof(double));
cudaMalloc((void**) &device_f, arraySize * sizeof(double));
//copying over t and x[]
cudaMemcpy(device_x_total, x_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
//kernel call
derKernel<<<1,numOfXArrays>>>(device_x_total, device_f);
//writeback
cudaMemcpy(f_total, device_f, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
//*****************************************************************************************
//////////////////// END OF PARALLEL SECTION OF CODE ///////////////////////
}
cudaFree(device_x_total);
cudaFree(device_x_not);
cudaFree(device_f_not);
cudaFree(device_f);
memcpy(x1,x_total, 2*sizeof(double));
memcpy(x2,x_total +2, 2*sizeof(double));
memcpy(x3,x_total +4, 2 *sizeof(double));
memcpy(x4, x_total +6, 2*sizeof(double));
memcpy(f1,f_total, 2*sizeof(double));
memcpy(f2,f_total +2, 2*sizeof(double));
memcpy(f3,f_total +4, 2 *sizeof(double));
memcpy(f4, f_total +6, 2*sizeof(double));
for (int i = 0; i<neqns; i++) x5[i] = x0[i] + h*(b50*f0[i] + b51*f1[i] + b52*f2[i] + b53*f3[i] + b54*f4[i]); // now get x5
der(t0 + a5*h, x5, f5); // and get the derivative there, f5
for (int i = 0; i<neqns; i++) {
xn10[i] = x0[i] + h*(c0*f0[i] + c1*f1[i] + c2*f2[i] + c3*f3[i] + c4*f4[i] + c5*f5[i]); // now compute final 10th order answer
}
if (sho) {
printf("10th order iterations = %d, t = %8.5lf, xn10[0] = %18.15lf, xn10[1] = %18.15lf, error[0] = %e, error[1] = %e\n",
itermax10, t0 + h, xn10[0], xn10[1], xn10[0] - sin(t0 + h), xn10[1] - cos(t0 + h));
}
else {
printf("10th order iterations = %d, t = %8.5lf, xn10[0] = %18.15lf, xn10[1] = %18.15lf\n",
itermax10, t0 + h, xn10[0], xn10[1]);
}
//////////////////// THIS CAN BE DONE IN PARALLEL ///////////////////////
//for (int i = 0; i<neqns; i++) {
// x1[i] = x0[i] + h*(g10*f0[i] + g11*f1[i] + g12*f2[i] + g13*f3[i] + g14*f4[i] + g15*f5[i]); // these fk's are from 10th order method,
// x2[i] = x0[i] + h*(g20*f0[i] + g21*f1[i] + g22*f2[i] + g23*f3[i] + g24*f4[i] + g25*f5[i]); // and note that they are being
// x3[i] = x0[i] + h*(g30*f0[i] + g31*f1[i] + g32*f2[i] + g33*f3[i] + g34*f4[i] + g35*f5[i]); // used to build the five internal values
// x4[i] = x0[i] + h*(g40*f0[i] + g41*f1[i] + g42*f2[i] + g43*f3[i] + g44*f4[i] + g45*f5[i]); // used to construct the 12th order xk's,
// x5[i] = x0[i] + h*(g50*f0[i] + g51*f1[i] + g52*f2[i] + g53*f3[i] + g54*f4[i] + g55*f5[i]); // so these xk's are for the 12th order method
//}
//***************************************************************************************************************************************************
//copying f arrays to a single array f_total
memcpy(f_total,f1, 2*sizeof(double));
memcpy(f_total+2,f2, 2*sizeof(double));
memcpy(f_total+4,f3, 2 *sizeof(double));
memcpy(f_total+6, f4, 2*sizeof(double));
memcpy(f_total+8, f5, 2*sizeof(double));
//allocating memory
cudaMalloc((void**) &device_x_total, arraySize*sizeof(double));
cudaMalloc((void**) &device_f, arraySize*sizeof(double));
cudaMalloc((void**) &device_x_not, arraySize*sizeof(double));
cudaMalloc((void**) &device_f_not, arraySize*sizeof(double));
//copying over f0, x0, and f_total
cudaMemcpy(device_f,f_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_x_not, x0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_f_not, f0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
//calling order10 kernel
Order10FkKernel<<<1, neqns>>>(device_x_total, device_x_not, device_f_not, h, device_f);
//WriteBack of x_total
cudaMemcpy(x_total, device_x_total, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
//***************************************************************************************************************************************************
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1);
//der(t0 + a2*h, x2, f2); // now we get the fk's to be used in the 12th order method
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4); // i.e., obtain derivatives at the five internal points needed for 12th order method
//der(t0 + a5*h, x5, f5);
//********************************************************************************************************************
numOfXArrays=5; //because we are passing in X1-X5
cudaMemcpy(device_x_total, x_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
//kernel call
derKernel<<<1,numOfXArrays>>>(device_x_total, device_f);
//copying data from device to host
cudaMemcpy(f_total, device_f, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
//****************************************************************************************************************************
//////////////////// END OF PARALLEL SECTION OF CODE ///////////////////////
for (iter = 0; iter<itermax12; iter++) { // now we can iterate to improve the values at the five internal points
//////////////////// THIS CAN BE DONE IN PARALLEL ///////////////////////
//for (int i = 0; i<neqns; i++) { // each time, we recompute the internal xk values used in the 12th order method
// x1[i] = x0[i] + h*(bh10*f0[i] + bh11*f1[i] + bh12*f2[i] + bh13*f3[i] + bh14*f4[i] + bh15*f5[i]);
// x2[i] = x0[i] + h*(bh20*f0[i] + bh21*f1[i] + bh22*f2[i] + bh23*f3[i] + bh24*f4[i] + bh25*f5[i]);
// x3[i] = x0[i] + h*(bh30*f0[i] + bh31*f1[i] + bh32*f2[i] + bh33*f3[i] + bh34*f4[i] + bh35*f5[i]);
// x4[i] = x0[i] + h*(bh40*f0[i] + bh41*f1[i] + bh42*f2[i] + bh43*f3[i] + bh44*f4[i] + bh45*f5[i]);
// x5[i] = x0[i] + h*(bh50*f0[i] + bh51*f1[i] + bh52*f2[i] + bh53*f3[i] + bh54*f4[i] + bh55*f5[i]);
//}
cudaMemcpy(device_f, f_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_x_not, x0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_f_not, f0, arraySize*sizeof(double), cudaMemcpyHostToDevice);
Order12Kernel<<<1,neqns>>>(device_x_total, device_x_not, device_f_not, h, device_f);
cudaMemcpy(x_total, device_x_total, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1); // once again, obtain derivatives at the five internal points of the 12th order method
//der(t0 + a2*h, x2, f2);
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4);
//der(t0 + a5*h, x5, f5);
//////////////////// END OF PARALLEL SECTION OF CODE ///////////////////////
//*******************************************************************************************************************
cudaMemcpy(device_x_total, x_total, arraySize*sizeof(double), cudaMemcpyHostToDevice);
//kernel call
derKernel<<<1,numOfXArrays>>>(device_x_total, device_f);
//copying data from device to host
cudaMemcpy(f_total, device_f, arraySize*sizeof(double), cudaMemcpyDeviceToHost);
}
cudaFree(device_x_total);
cudaFree(device_f);
cudaFree(device_x_not);
cudaFree(device_f_not);
memcpy(f1,f_total, 2*sizeof(double));
memcpy(f2,f_total +2, 2*sizeof(double));
memcpy(f3,f_total +4, 2 *sizeof(double));
memcpy(f4, f_total +6, 2*sizeof(double));
memcpy(f5, f_total +8, 2*sizeof(double));
memcpy(x1,x_total, 2*sizeof(double));
memcpy(x2,x_total +2, 2*sizeof(double));
memcpy(x3,x_total +4, 2 *sizeof(double));
memcpy(x4, x_total +6, 2*sizeof(double));
memcpy(x5, x_total +8, 2*sizeof(double));
for (int i = 0; i<neqns; i++) { // iteration complete, so now compute final base value for 12th order method
x6[i] = x0[i] + h*(bh60*f0[i] + bh61*f1[i] + bh62*f2[i] + bh63*f3[i] + bh64*f4[i] + bh65*f5[i]);
}
der(t0 + ah6*h, x6, f6); // and get the derivative there
for (int i = 0; i<neqns; i++) { // now, compute the final 12th order approximation to the solution at the end of the step
xn12[i] = x0[i] + h*(ch0*f0[i] + ch1*f1[i] + ch2*f2[i] + ch3*f3[i] + ch4*f4[i] + ch5*f5[i] + ch6*f6[i]); // now compute final 12th order answer
}
printf(" The estimates of the errors in the 10-th order method by differencing with 12-th order method results are %e and %e\n", xn10[0] - xn12[0], xn10[1] - xn12[1]);
if (sho) {
printf("12th order iterations = %d, t = %8.5lf, xn12[0] = %18.15lf, xn12[1] = %18.15lf, error[0] = %e, error[1] = %e\n",
iter, t0 + h, xn12[0], xn12[1], xn12[0] - sin(t0 + h), xn12[1] - cos(t0 + h));
}
else {
printf("12th order iterations = %d, t = %8.5lf, xn12[0] = %18.15lf, xn12[1] = %18.15lf\n",
iter, t0 + h, xn12[0], xn12[1]);
}
est = 1.0e-30;
for (int i = 0; i<neqns; i++) { // now, just update the solution to prepare for the next step
esti = xn10[i] - xn12[i];
est = est + esti*esti;
}
est = sqrt(est); // sqrt of the sum of the squares of the errors in each component of the solution at t0 + h
hnew = h * pow(tol10 / est, 0.1);
if (est < tol) { // if error estimate is less than the error tolerance, then the step succeeded
printf("The step succeeded since est = %e was less than tol = %e\n\n", est, tol);
for (int i = 0; i<neqns; i++) { // now, just update the solution to prepare for the next step
x0[i] = xn12[i];
}
t0 = t0 + h; // and update the independent variable
if (t0 / tf >= 0.99999999999999) finished = true; // and if we have reached the final value, tf, set finished to true
}
else {
printf("The step failed since est = %e was not less than tol = %e\n\n", est, tol);
}
h = hnew; // in any event, if not finished, we set the stepsize, h, to the new value, hnew
if ((t0 + h) > tf) h = tf - t0; // if new step takes us past final value, tf, reduce it to tf-t0
}
}
int main(int argc, char* argv[])
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
printf("Testing Implicit RK1210 ");
if (sho) {
printf(" for simple harmonic oscillator example problem \n\n");
}
else {
printf(" for predator - prey example problem \n\n");
}
rk1210();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds =0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Code took: %f milliseconds\n", milliseconds);
getchar();
return 0;
}
|
#include "hip/hip_runtime.h"
#include <hip/device_functions.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926535897932
#define MAXEQNS 10 // maximum number of differential equations in the system
const int itermax10 = 2; // number of iterations to use for rk10
const int itermax12 = 1; // number of additional iterations to use for rk12
const int neqns = 2; // number of differential equations in the system
const double tol = 1.0e-10; // the error tolerance
const double tol10 = tol / 10;
const bool sho = true; // set sho to true if you want the simple harmonic oscillator results
// set sho to false, if you want the predator - prey results
// the following constants are the 10th order method's coefficients
const double a0 = 0;
__constant__ double a1 = 0.11747233803526765;
__constant__ double a2 = 0.35738424175967745;
__constant__ double a3 = 0.64261575824032255;
__constant__ double a4 = 0.88252766196473235;
const double a5 = 1.0000000000000000;
__constant__ double b10 = 0.047323231137709573;
__constant__ double b11 = 0.077952072407795078;
__constant__ double b12 = -0.010133421269900587;
__constant__ double b13 = 0.0028864915990617097;
__constant__ double b14 = -0.00055603583939812082;
__constant__ double b20 = 0.021779075831486075;
__constant__ double b21 = 0.22367959757928498;
__constant__ double b22 = 0.12204792759220492;
__constant__ double b23 = -0.012091266674498959;
__constant__ double b24 = 0.0019689074312004371;
__constant__ double b30 = 0.044887590835180592;
__constant__ double b31 = 0.15973856856089786;
__constant__ double b32 = 0.32285378852557547;
__constant__ double b33 = 0.12204792759220492;
__constant__ double b34 = -0.0069121172735362915;
__constant__ double b40 = 0.019343435528957094;
__constant__ double b41 = 0.22312684732165494;
__constant__ double b42 = 0.23418268877986459;
__constant__ double b43 = 0.32792261792646064;
__constant__ double b44 = 0.077952072407795078;
const double b50 = 0.066666666666666667;
const double b51 = 0.10981508874708385;
const double b52 = 0.37359383699761912;
const double b53 = 0.18126454003786724;
const double b54 = 0.26865986755076313;
const double c0 = 0.033333333333333333;
const double c1 = 0.18923747814892349;
const double c2 = 0.27742918851774318;
const double c3 = 0.27742918851774318;
const double c4 = 0.18923747814892349;
const double c5 = 0.033333333333333333;
// the following coefficients allow us to get rk12 internal xk values from rk10 fk values
__constant__ double g10 = 0.043407276098971173;
__constant__ double g11 = 0.049891561330903419;
__constant__ double g12 = -0.012483721919363355;
__constant__ double g13 = 0.0064848904066894701;
__constant__ double g14 = -0.0038158693974615597;
__constant__ double g15 = 0.0014039153409773882;
__constant__ double g20 = 0.030385164419638569;
__constant__ double g21 = 0.19605322645426044;
__constant__ double g22 = 0.047860687574395354;
__constant__ double g23 = -0.012887249003100515;
__constant__ double g24 = 0.0064058521980400821;
__constant__ double g25 = -0.0022420783785910372;
__constant__ double g30 = 0.032291666666666667;
__constant__ double g31 = 0.19311806292811784;
__constant__ double g32 = 0.25797759963091718;
__constant__ double g33 = 0.019451588886825999;
__constant__ double g34 = -0.0038805847791943522;
__constant__ double g35 = 0.0010416666666666667;
__constant__ double g40 = 0.035575411711924371;
__constant__ double g41 = 0.18283162595088341;
__constant__ double g42 = 0.29031643752084369;
__constant__ double g43 = 0.22956850094334782;
__constant__ double g44 = -0.0068157483053369507;
__constant__ double g45 = 0.0029481689136947641;
__constant__ double g50 = 0.031929417992355945;
__constant__ double g51 = 0.19305334754638505;
__constant__ double g52 = 0.27094429811105371;
__constant__ double g53 = 0.28991291043710653;
__constant__ double g54 = 0.13934591681802007;
__constant__ double g55 = -0.010073942765637839;
const double g60 = 0.033333333333333333;
const double g61 = 0.18923747814892349;
const double g62 = 0.27742918851774318;
const double g63 = 0.27742918851774318;
const double g64 = 0.18923747814892349;
const double g65 = 0.033333333333333333;
// the following constants are the 12th order method's coefficients
const double ah0 = 0.0;
const double ah1 = 0.084888051860716535;
const double ah2 = 0.26557560326464289;
const double ah3 = 0.50000000000000000;
const double ah4 = 0.73442439673535711;
const double ah5 = 0.91511194813928346;
const double ah6 = 1.0000000000000000;
__constant__ double bh10 = 0.033684534770907752;
__constant__ double bh11 = 0.057301749935629582;
__constant__ double bh12 = -0.0082444880936983822;
__constant__ double bh13 = 0.0029151263642014432;
__constant__ double bh14 = -0.00096482361331657787;
__constant__ double bh15 = 0.00019595249699271744;
__constant__ double bh20 = 0.015902242088596380;
__constant__ double bh21 = 0.16276437062291593;
__constant__ double bh22 = 0.096031583397703751;
__constant__ double bh23 = -0.011758319711158930;
__constant__ double bh24 = 0.0032543514515832418;
__constant__ double bh25 = -0.00061862458499748489;
__constant__ double bh30 = 0.031250000000000000;
__constant__ double bh31 = 0.11881843285766042;
__constant__ double bh32 = 0.24868761828096535;
__constant__ double bh33 = 0.11000000000000000;
__constant__ double bh34 = -0.010410996557394222;
__constant__ double bh35 = 0.0016549454187684515;
__constant__ double bh40 = 0.015902242088596380;
__constant__ double bh41 = 0.15809680304274781;
__constant__ double bh42 = 0.18880881534382426;
__constant__ double bh43 = 0.28087114502765051;
__constant__ double bh44 = 0.096031583397703751;
__constant__ double bh45 = -0.0052861921651656089;
__constant__ double bh50 = 0.033684534770907752;
__constant__ double bh51 = 0.11440754737426645;
__constant__ double bh52 = 0.24657204460460206;
__constant__ double bh53 = 0.20929436236889375;
__constant__ double bh54 = 0.25385170908498387;
__constant__ double bh55 = 0.057301749935629582;
const double bh60 = 0;
const double bh61 = 0.19581988897471611;
const double bh62 = 0.14418011102528389;
const double bh63 = 0.32000000000000000;
const double bh64 = 0.14418011102528389;
const double bh65 = 0.19581988897471611;
const double ch0 = 0.023809523809523810;
const double ch1 = 0.13841302368078297;
const double ch2 = 0.21587269060493131;
const double ch3 = 0.24380952380952381;
const double ch4 = 0.21587269060493131;
const double ch5 = 0.13841302368078297;
const double ch6 = 0.023809523809523810;
#define cudaErrorCheck(call) { cudaAssert(call,__FILE__,__LINE__); }
void cudaAssert(const hipError_t err, const char *file, const int line)
{
if( hipSuccess != err) {
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",
file, line, hipGetErrorString(err) );
getchar();
exit(1);
}
}
//****************************************************************************
//Derivative kernel: takes pointers to x[], and f[] allocated on the device
__global__ void derKernel(double* device_x, double* device_f)
{
//2 elements in device_x represent 2 elements from individual arrays X1-X4;
//ie if thread id is 0 then the array number is 0x2 and work on elements tx*2 and tx*2 +1
int tx = threadIdx.x;
int xArrayNumber = tx *2;
device_f[xArrayNumber] = device_x[xArrayNumber+1];
__syncthreads();
device_f[xArrayNumber+1] = -device_x[xArrayNumber];
__syncthreads();
}
__global__ void guessKernel(double*device_X_Total, double* device_X_Not,double* device_F_Not, double h){
device_X_Total[threadIdx.x] = device_X_Not[threadIdx.x] + a1 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +2] = device_X_Not[threadIdx.x] + a2 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +4] = device_X_Not[threadIdx.x] + a3 * h * device_F_Not[threadIdx.x];
device_X_Total[threadIdx.x +6] = device_X_Not[threadIdx.x] + a4 * h * device_F_Not[threadIdx.x];
}
__global__ void order10Kernel(double*device_X_Total, double* device_X_Not, double *device_F_Not, double h, double *device_f)
{
int tx = threadIdx.x;
device_X_Total[tx]=device_X_Not[tx] + h*((b10 * device_F_Not[tx]) + (b11 * device_f[tx]) + (b12 * device_f[tx+2]) + (b13 * device_f[tx +4]) + (b14 * device_f[tx +6]));
__syncthreads();
device_X_Total[tx+2]=device_X_Not[tx] + h*((b20 * device_F_Not[tx]) + (b21 * device_f[tx]) + (b22 * device_f[tx+2]) + (b23 * device_f[tx +4]) + (b24 * device_f[tx +6]));
__syncthreads();
device_X_Total[tx+4]=device_X_Not[tx] + h*((b30 * device_F_Not[tx]) +( b31 * device_f[tx]) + (b32 * device_f[tx+2]) + (b33 * device_f[tx +4]) + (b34 * device_f[tx +6]));
__syncthreads();
device_X_Total[tx+6]=device_X_Not[tx] + h*((b40 * device_F_Not[tx]) + (b41 * device_f[tx]) +( b42 * device_f[tx+2]) + (b43 * device_f[tx +4]) +( b44 * device_f[tx +6]));
__syncthreads();
}
__global__ void Order10FkKernel(double*device_X_Total, double* device_X_Not, double* device_F_Not, double h, double*device_f)
{
int tx = threadIdx.x;
device_X_Total[tx] = device_X_Not[tx] + h*((g10*device_F_Not[tx])+ (g11 * device_f[tx]) + (g12 * device_f[tx+2])+ (g13 * device_f[tx + 4]) + (g14 * device_f[tx+ 6])+ (g15 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+2] = device_X_Not[tx] + h*((g20*device_F_Not[tx])+ (g21 * device_f[tx]) + (g22 * device_f[tx+2])+ (g23 * device_f[tx + 4]) + (g24 * device_f[tx+ 6])+ (g25 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+4] = device_X_Not[tx] + h*((g30*device_F_Not[tx])+ (g31 * device_f[tx]) + (g32 * device_f[tx+2])+ (g33 * device_f[tx + 4]) + (g34 * device_f[tx+ 6])+ (g35 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+6] = device_X_Not[tx] + h*((g40*device_F_Not[tx])+ (g41 * device_f[tx]) + (g42 * device_f[tx+2])+ (g43 * device_f[tx + 4]) + (g44 * device_f[tx+ 6])+ (g45 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+8] = device_X_Not[tx] + h*((g50*device_F_Not[tx])+ (g51 * device_f[tx]) + (g52 * device_f[tx+2])+ (g53 * device_f[tx + 4]) + (g54 * device_f[tx+ 6])+ (g55 *device_f[tx+8]));
__syncthreads();
}
__global__ void Order12Kernel(double*device_X_Total, double* device_X_Not, double* device_F_Not, double h, double*device_f){
int tx = threadIdx.x;
device_X_Total[tx] = device_X_Not[tx] + h*((bh10*device_F_Not[tx])+ (bh11 * device_f[tx]) + (bh12 * device_f[tx+2])+ (bh13 * device_f[tx + 4]) + (bh14 * device_f[tx+ 6])+ (bh15 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+2] = device_X_Not[tx] + h*((bh20*device_F_Not[tx])+ (bh21 * device_f[tx]) + (bh22 * device_f[tx+2])+ (bh23 * device_f[tx + 4]) + (bh24 * device_f[tx+ 6])+ (bh25 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+4] = device_X_Not[tx] + h*((bh30*device_F_Not[tx])+ (bh31 * device_f[tx]) + (bh32 * device_f[tx+2])+ (bh33 * device_f[tx + 4]) + (bh34 * device_f[tx+ 6])+ (bh35 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+6] = device_X_Not[tx] + h*((bh40*device_F_Not[tx])+ (bh41 * device_f[tx]) + (bh42 * device_f[tx+2])+ (bh43 * device_f[tx + 4]) + (bh44 * device_f[tx+ 6])+ (bh45 *device_f[tx+8]));
__syncthreads();
device_X_Total[tx+8] = device_X_Not[tx] + h*((bh50*device_F_Not[tx])+ (bh51 * device_f[tx]) + (bh52 * device_f[tx+2])+ (bh53 * device_f[tx + 4]) + (bh54 * device_f[tx+ 6])+ (bh55 *device_f[tx+8]));
__syncthreads();
}
//****************************************************************************
// the following function describes the ordinary differential equations
//** The function is still live for the non parallel function calls to der
//** still active in the program.
void der(double t, double x[], double f[]) {
if (sho) {
f[0] = x[1];
f[1] = -x[0];
}
else {
f[0] = x[0] * (2.0 - x[1]);
f[1] = x[1] * (x[0] - 1.0);
}
}
void rk1210() {
// Implicit Runge-Kutta of orders 12 and 10
double x0[MAXEQNS], x1[MAXEQNS], x2[MAXEQNS], x3[MAXEQNS], x4[MAXEQNS];
double x5[MAXEQNS], x6[MAXEQNS], xn10[MAXEQNS], xn12[MAXEQNS];
double t0, tf, h, hnew, est, esti, f0[MAXEQNS], f1[MAXEQNS], f2[MAXEQNS];
double f3[MAXEQNS], f4[MAXEQNS], f5[MAXEQNS], f6[MAXEQNS];
int iter;
bool finished = false; // becomes true when we have reached tf
if (sho) {
h = PI / 4.0; // initial guess for stepsize to use
x0[0] = 0.0; // initial value of first component
x0[1] = 1.0; // initial value of second component
t0 = 0.0; // initial t value, t0
tf = 2 * PI; // final t value, tf
}
else {
h = 1.0 / 2.0; // initial guess for stepsize to use
x0[0] = 2.0; // initial value of first component
x0[1] = 2.0; // initial value of second component
t0 = 0.0; // initial t value, t0
tf = 4.0; // final t value, tf
}
printf("Initial conditions are t0 = %8.5lf, x0[0] = %18.15lf, x0[1] = %18.15lf\n", t0, x0[0], x0[1]);
const int arraySize = 10; //there will be 8 elements being written from x1-x4 (Remaining 2 for when X5 is included);
int numOfXArrays =4;
double x_total[arraySize];
double f_total[arraySize];
while (!finished) { // keep going until we reach tf successfully
der(t0, x0, f0); // first, we will get 10th order results
//////////////////// THIS CAN BE DONE IN PARALLEL ///////////////////////
//for (int i = 0; i<neqns; i++) {
// x1[i] = x0[i] + a1*h*f0[i]; // just guess that solution is a straight line initially
// x2[i] = x0[i] + a2*h*f0[i]; // at the four internal points within the step
// x3[i] = x0[i] + a3*h*f0[i];
// x4[i] = x0[i] + a4*h*f0[i];
//}
//*************************************************************************************
double* device_x_total; double* device_x_not; double* device_f_not; //creating variables for the device
//allocating memory for device variables
hipMalloc((void**) &device_x_total, arraySize * sizeof(double));
hipMalloc((void**) &device_x_not, arraySize * sizeof(double));
hipMalloc((void**) &device_f_not, arraySize * sizeof(double));
//copying contents of x0 and f0 to the device variables
hipMemcpy(device_x_not, x0, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_f_not, f0, arraySize *sizeof(double), hipMemcpyHostToDevice);
guessKernel<<<1, neqns>>>(device_x_total, device_x_not, device_f_not, h);
hipMemcpy(x_total, device_x_total, arraySize*sizeof(double), hipMemcpyDeviceToHost);
//************************************************************************************
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1); // now, evaluate the derivatives at these four points
//der(t0 + a2*h, x2, f2);
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4);
//****************************************************************************************
double* device_f; //creating variables for device;
//allocating memory for x[], and f[]
hipMalloc((void**) &device_x_total, arraySize* sizeof(double));
hipMalloc((void**) &device_f, arraySize * sizeof(double));
//copying over t and x[]
hipMemcpy(device_x_total, x_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
//*******Creating timers to test 4 arrays *********
/*cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);*/
//kernel call
derKernel<<<1,numOfXArrays>>>(device_x_total, device_f);
/* cudaEventRecord(stop);*/
//copying data from device to host
hipMemcpy(f_total, device_f, arraySize*sizeof(double), hipMemcpyDeviceToHost);
/*cudaEventSynchronize(stop);
float milliseconds =0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Kernel took: %f milliseconds\n", milliseconds);
getchar();*/
//****************************************************************************************
hipMalloc((void**) &device_x_total, arraySize* sizeof(double));
hipMalloc((void**) &device_x_not, arraySize*sizeof(double));
hipMalloc((void**) &device_f_not, arraySize*sizeof(double));
hipMalloc((void**) &device_f, arraySize*sizeof(double));
for (iter = 0; iter<itermax10; iter++) { // now, we perform itermax10 iterations for the 10th order method
printf("iter = %d\n", iter);
////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
/*for (int i = 0; i<neqns; i++) x1[i] = x0[i] + h*(b10*f0[i] + b11*f1[i] + b12*f2[i] + b13*f3[i] + b14*f4[i]);
for (int i = 0; i<neqns; i++) x2[i] = x0[i] + h*(b20*f0[i] + b21*f1[i] + b22*f2[i] + b23*f3[i] + b24*f4[i]);
for (int i = 0; i<neqns; i++) x3[i] = x0[i] + h*(b30*f0[i] + b31*f1[i] + b32*f2[i] + b33*f3[i] + b34*f4[i]);
for (int i = 0; i<neqns; i++) x4[i] = x0[i] + h*(b40*f0[i] + b41*f1[i] + b42*f2[i] + b43*f3[i] + b44*f4[i]);*/
//*************Copying over f_total f0 and x0*******************************
hipMemcpy(device_f, f_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_f_not, f0, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_x_not, x0, arraySize*sizeof(double), hipMemcpyHostToDevice);
order10Kernel<<<1,neqns>>>(device_x_total,device_x_not,device_f_not, h,device_f);
hipMemcpy(x_total, device_x_total, arraySize*sizeof(double), hipMemcpyDeviceToHost);
//**********************************************************************************************************************
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1); // now, evaluate the derivatives at these four points
//der(t0 + a2*h, x2, f2);
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4);
//************************************************************************************
hipMalloc((void**) &device_x_total, arraySize* sizeof(double));
hipMalloc((void**) &device_f, arraySize * sizeof(double));
//copying over t and x[]
hipMemcpy(device_x_total, x_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
//kernel call
derKernel<<<1,numOfXArrays>>>(device_x_total, device_f);
//writeback
hipMemcpy(f_total, device_f, arraySize*sizeof(double), hipMemcpyDeviceToHost);
//*****************************************************************************************
//////////////////// END OF PARALLEL SECTION OF CODE ///////////////////////
}
hipFree(device_x_total);
hipFree(device_x_not);
hipFree(device_f_not);
hipFree(device_f);
memcpy(x1,x_total, 2*sizeof(double));
memcpy(x2,x_total +2, 2*sizeof(double));
memcpy(x3,x_total +4, 2 *sizeof(double));
memcpy(x4, x_total +6, 2*sizeof(double));
memcpy(f1,f_total, 2*sizeof(double));
memcpy(f2,f_total +2, 2*sizeof(double));
memcpy(f3,f_total +4, 2 *sizeof(double));
memcpy(f4, f_total +6, 2*sizeof(double));
for (int i = 0; i<neqns; i++) x5[i] = x0[i] + h*(b50*f0[i] + b51*f1[i] + b52*f2[i] + b53*f3[i] + b54*f4[i]); // now get x5
der(t0 + a5*h, x5, f5); // and get the derivative there, f5
for (int i = 0; i<neqns; i++) {
xn10[i] = x0[i] + h*(c0*f0[i] + c1*f1[i] + c2*f2[i] + c3*f3[i] + c4*f4[i] + c5*f5[i]); // now compute final 10th order answer
}
if (sho) {
printf("10th order iterations = %d, t = %8.5lf, xn10[0] = %18.15lf, xn10[1] = %18.15lf, error[0] = %e, error[1] = %e\n",
itermax10, t0 + h, xn10[0], xn10[1], xn10[0] - sin(t0 + h), xn10[1] - cos(t0 + h));
}
else {
printf("10th order iterations = %d, t = %8.5lf, xn10[0] = %18.15lf, xn10[1] = %18.15lf\n",
itermax10, t0 + h, xn10[0], xn10[1]);
}
//////////////////// THIS CAN BE DONE IN PARALLEL ///////////////////////
//for (int i = 0; i<neqns; i++) {
// x1[i] = x0[i] + h*(g10*f0[i] + g11*f1[i] + g12*f2[i] + g13*f3[i] + g14*f4[i] + g15*f5[i]); // these fk's are from 10th order method,
// x2[i] = x0[i] + h*(g20*f0[i] + g21*f1[i] + g22*f2[i] + g23*f3[i] + g24*f4[i] + g25*f5[i]); // and note that they are being
// x3[i] = x0[i] + h*(g30*f0[i] + g31*f1[i] + g32*f2[i] + g33*f3[i] + g34*f4[i] + g35*f5[i]); // used to build the five internal values
// x4[i] = x0[i] + h*(g40*f0[i] + g41*f1[i] + g42*f2[i] + g43*f3[i] + g44*f4[i] + g45*f5[i]); // used to construct the 12th order xk's,
// x5[i] = x0[i] + h*(g50*f0[i] + g51*f1[i] + g52*f2[i] + g53*f3[i] + g54*f4[i] + g55*f5[i]); // so these xk's are for the 12th order method
//}
//***************************************************************************************************************************************************
//copying f arrays to a single array f_total
memcpy(f_total,f1, 2*sizeof(double));
memcpy(f_total+2,f2, 2*sizeof(double));
memcpy(f_total+4,f3, 2 *sizeof(double));
memcpy(f_total+6, f4, 2*sizeof(double));
memcpy(f_total+8, f5, 2*sizeof(double));
//allocating memory
hipMalloc((void**) &device_x_total, arraySize*sizeof(double));
hipMalloc((void**) &device_f, arraySize*sizeof(double));
hipMalloc((void**) &device_x_not, arraySize*sizeof(double));
hipMalloc((void**) &device_f_not, arraySize*sizeof(double));
//copying over f0, x0, and f_total
hipMemcpy(device_f,f_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_x_not, x0, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_f_not, f0, arraySize*sizeof(double), hipMemcpyHostToDevice);
//calling order10 kernel
Order10FkKernel<<<1, neqns>>>(device_x_total, device_x_not, device_f_not, h, device_f);
//WriteBack of x_total
hipMemcpy(x_total, device_x_total, arraySize*sizeof(double), hipMemcpyDeviceToHost);
//***************************************************************************************************************************************************
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1);
//der(t0 + a2*h, x2, f2); // now we get the fk's to be used in the 12th order method
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4); // i.e., obtain derivatives at the five internal points needed for 12th order method
//der(t0 + a5*h, x5, f5);
//********************************************************************************************************************
numOfXArrays=5; //because we are passing in X1-X5
hipMemcpy(device_x_total, x_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
//kernel call
derKernel<<<1,numOfXArrays>>>(device_x_total, device_f);
//copying data from device to host
hipMemcpy(f_total, device_f, arraySize*sizeof(double), hipMemcpyDeviceToHost);
//****************************************************************************************************************************
//////////////////// END OF PARALLEL SECTION OF CODE ///////////////////////
for (iter = 0; iter<itermax12; iter++) { // now we can iterate to improve the values at the five internal points
//////////////////// THIS CAN BE DONE IN PARALLEL ///////////////////////
//for (int i = 0; i<neqns; i++) { // each time, we recompute the internal xk values used in the 12th order method
// x1[i] = x0[i] + h*(bh10*f0[i] + bh11*f1[i] + bh12*f2[i] + bh13*f3[i] + bh14*f4[i] + bh15*f5[i]);
// x2[i] = x0[i] + h*(bh20*f0[i] + bh21*f1[i] + bh22*f2[i] + bh23*f3[i] + bh24*f4[i] + bh25*f5[i]);
// x3[i] = x0[i] + h*(bh30*f0[i] + bh31*f1[i] + bh32*f2[i] + bh33*f3[i] + bh34*f4[i] + bh35*f5[i]);
// x4[i] = x0[i] + h*(bh40*f0[i] + bh41*f1[i] + bh42*f2[i] + bh43*f3[i] + bh44*f4[i] + bh45*f5[i]);
// x5[i] = x0[i] + h*(bh50*f0[i] + bh51*f1[i] + bh52*f2[i] + bh53*f3[i] + bh54*f4[i] + bh55*f5[i]);
//}
hipMemcpy(device_f, f_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_x_not, x0, arraySize*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(device_f_not, f0, arraySize*sizeof(double), hipMemcpyHostToDevice);
Order12Kernel<<<1,neqns>>>(device_x_total, device_x_not, device_f_not, h, device_f);
hipMemcpy(x_total, device_x_total, arraySize*sizeof(double), hipMemcpyDeviceToHost);
//////////////////// AND THIS CAN BE DONE IN PARALLEL ///////////////////////
//der(t0 + a1*h, x1, f1); // once again, obtain derivatives at the five internal points of the 12th order method
//der(t0 + a2*h, x2, f2);
//der(t0 + a3*h, x3, f3);
//der(t0 + a4*h, x4, f4);
//der(t0 + a5*h, x5, f5);
//////////////////// END OF PARALLEL SECTION OF CODE ///////////////////////
//*******************************************************************************************************************
hipMemcpy(device_x_total, x_total, arraySize*sizeof(double), hipMemcpyHostToDevice);
//kernel call
derKernel<<<1,numOfXArrays>>>(device_x_total, device_f);
//copying data from device to host
hipMemcpy(f_total, device_f, arraySize*sizeof(double), hipMemcpyDeviceToHost);
}
hipFree(device_x_total);
hipFree(device_f);
hipFree(device_x_not);
hipFree(device_f_not);
memcpy(f1,f_total, 2*sizeof(double));
memcpy(f2,f_total +2, 2*sizeof(double));
memcpy(f3,f_total +4, 2 *sizeof(double));
memcpy(f4, f_total +6, 2*sizeof(double));
memcpy(f5, f_total +8, 2*sizeof(double));
memcpy(x1,x_total, 2*sizeof(double));
memcpy(x2,x_total +2, 2*sizeof(double));
memcpy(x3,x_total +4, 2 *sizeof(double));
memcpy(x4, x_total +6, 2*sizeof(double));
memcpy(x5, x_total +8, 2*sizeof(double));
for (int i = 0; i<neqns; i++) { // iteration complete, so now compute final base value for 12th order method
x6[i] = x0[i] + h*(bh60*f0[i] + bh61*f1[i] + bh62*f2[i] + bh63*f3[i] + bh64*f4[i] + bh65*f5[i]);
}
der(t0 + ah6*h, x6, f6); // and get the derivative there
for (int i = 0; i<neqns; i++) { // now, compute the final 12th order approximation to the solution at the end of the step
xn12[i] = x0[i] + h*(ch0*f0[i] + ch1*f1[i] + ch2*f2[i] + ch3*f3[i] + ch4*f4[i] + ch5*f5[i] + ch6*f6[i]); // now compute final 12th order answer
}
printf(" The estimates of the errors in the 10-th order method by differencing with 12-th order method results are %e and %e\n", xn10[0] - xn12[0], xn10[1] - xn12[1]);
if (sho) {
printf("12th order iterations = %d, t = %8.5lf, xn12[0] = %18.15lf, xn12[1] = %18.15lf, error[0] = %e, error[1] = %e\n",
iter, t0 + h, xn12[0], xn12[1], xn12[0] - sin(t0 + h), xn12[1] - cos(t0 + h));
}
else {
printf("12th order iterations = %d, t = %8.5lf, xn12[0] = %18.15lf, xn12[1] = %18.15lf\n",
iter, t0 + h, xn12[0], xn12[1]);
}
est = 1.0e-30;
for (int i = 0; i<neqns; i++) { // now, just update the solution to prepare for the next step
esti = xn10[i] - xn12[i];
est = est + esti*esti;
}
est = sqrt(est); // sqrt of the sum of the squares of the errors in each component of the solution at t0 + h
hnew = h * pow(tol10 / est, 0.1);
if (est < tol) { // if error estimate is less than the error tolerance, then the step succeeded
printf("The step succeeded since est = %e was less than tol = %e\n\n", est, tol);
for (int i = 0; i<neqns; i++) { // now, just update the solution to prepare for the next step
x0[i] = xn12[i];
}
t0 = t0 + h; // and update the independent variable
if (t0 / tf >= 0.99999999999999) finished = true; // and if we have reached the final value, tf, set finished to true
}
else {
printf("The step failed since est = %e was not less than tol = %e\n\n", est, tol);
}
h = hnew; // in any event, if not finished, we set the stepsize, h, to the new value, hnew
if ((t0 + h) > tf) h = tf - t0; // if new step takes us past final value, tf, reduce it to tf-t0
}
}
int main(int argc, char* argv[])
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
printf("Testing Implicit RK1210 ");
if (sho) {
printf(" for simple harmonic oscillator example problem \n\n");
}
else {
printf(" for predator - prey example problem \n\n");
}
rk1210();
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds =0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Code took: %f milliseconds\n", milliseconds);
getchar();
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void initBucket(int *bucket)
{
int i = blockIdx.x*blockDim.x + threadIdx.x; // i = threadIdx.x in this code
bucket[i] = 0;
}
__global__ void setBucket(int *bucket, int *key)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
atomicAdd(&(bucket[key[i]]), 1);
}
__global__ void scan(int *ofst, int *bucket, int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
extern __shared__ int tmp[];
ofst[i] = bucket[i];
__syncthreads();
for (int j=1; j<n; j<<=1) {
tmp[i] = ofst[i];
__syncthreads();
if(i >= j) ofst[i] += tmp[i-j];
__syncthreads();
}
ofst[i] -= bucket[i];
}
__global__ void setKey(int *key, int *ofst, int *bucket)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
for (int j=0; j<bucket[i]; j++) {
key[ofst[i]+j] = i;
}
}
int main() {
int n = 50;
int range = 5;
std::vector<int> h_key(n);
int *d_key; cudaMalloc((void**) &d_key, sizeof(int)*n);
for (int i=0; i<n; i++) {
h_key[i] = rand() % range;
printf("%d ",h_key[i]);
}
printf("\n");
cudaMemcpy(d_key, &h_key[0], sizeof(int)*n, cudaMemcpyHostToDevice);
int *d_bucket; cudaMalloc((void**) &d_bucket, sizeof(int)*range);
// Initialize buckets
initBucket<<<1, range>>>(d_bucket);
cudaDeviceSynchronize();
// Count the occurences of each number [0:4]
setBucket<<<1, n>>>(d_bucket, d_key);
cudaDeviceSynchronize();
// Calculate offsets
int *d_ofst; cudaMalloc((void**) &d_ofst, sizeof(int)*range);
scan<<<1, range, range>>>(d_ofst, d_bucket, range);
cudaDeviceSynchronize();
// Sort the input key
setKey<<<1, range>>>(d_key, d_ofst, d_bucket);
cudaDeviceSynchronize();
cudaMemcpy(&h_key[0], d_key, sizeof(int)*n, cudaMemcpyDeviceToHost);
for (int i=0; i<n; i++) {
printf("%d ",h_key[i]);
}
printf("\n");
cudaFree(d_key);
cudaFree(d_bucket);
cudaFree(d_ofst);
}
|
#include <hip/hip_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void initBucket(int *bucket)
{
int i = blockIdx.x*blockDim.x + threadIdx.x; // i = threadIdx.x in this code
bucket[i] = 0;
}
__global__ void setBucket(int *bucket, int *key)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
atomicAdd(&(bucket[key[i]]), 1);
}
__global__ void scan(int *ofst, int *bucket, int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
extern __shared__ int tmp[];
ofst[i] = bucket[i];
__syncthreads();
for (int j=1; j<n; j<<=1) {
tmp[i] = ofst[i];
__syncthreads();
if(i >= j) ofst[i] += tmp[i-j];
__syncthreads();
}
ofst[i] -= bucket[i];
}
__global__ void setKey(int *key, int *ofst, int *bucket)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
for (int j=0; j<bucket[i]; j++) {
key[ofst[i]+j] = i;
}
}
int main() {
int n = 50;
int range = 5;
std::vector<int> h_key(n);
int *d_key; hipMalloc((void**) &d_key, sizeof(int)*n);
for (int i=0; i<n; i++) {
h_key[i] = rand() % range;
printf("%d ",h_key[i]);
}
printf("\n");
hipMemcpy(d_key, &h_key[0], sizeof(int)*n, hipMemcpyHostToDevice);
int *d_bucket; hipMalloc((void**) &d_bucket, sizeof(int)*range);
// Initialize buckets
initBucket<<<1, range>>>(d_bucket);
hipDeviceSynchronize();
// Count the occurences of each number [0:4]
setBucket<<<1, n>>>(d_bucket, d_key);
hipDeviceSynchronize();
// Calculate offsets
int *d_ofst; hipMalloc((void**) &d_ofst, sizeof(int)*range);
scan<<<1, range, range>>>(d_ofst, d_bucket, range);
hipDeviceSynchronize();
// Sort the input key
setKey<<<1, range>>>(d_key, d_ofst, d_bucket);
hipDeviceSynchronize();
hipMemcpy(&h_key[0], d_key, sizeof(int)*n, hipMemcpyDeviceToHost);
for (int i=0; i<n; i++) {
printf("%d ",h_key[i]);
}
printf("\n");
hipFree(d_key);
hipFree(d_bucket);
hipFree(d_ofst);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void assignColIds(int* colIds, const int* colOffsets) {
int myId = blockIdx.x;
int start = colOffsets[myId];
int end = colOffsets[myId + 1];
for (int id = start + threadIdx.x; id < end; id += blockDim.x) {
colIds[id] = myId;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void assignColIds(int* colIds, const int* colOffsets) {
int myId = blockIdx.x;
int start = colOffsets[myId];
int end = colOffsets[myId + 1];
for (int id = start + threadIdx.x; id < end; id += blockDim.x) {
colIds[id] = myId;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
extern "C"
__global__ void sconv_bprop_C128_N128 (
float* param_test,
float* param_O,
const float* param_I,
const float* param_F,
float param_alpha,
int param_N,
int param_K,
int param_D,
int param_H,
int param_W,
int param_WN,
int param_HWN,
int param_DHWN,
int param_C,
int param_CRST,
int param_RST,
int param_RS,
int param_magic_RS,
int param_shift_RS,
int param_S,
int param_magic_S,
int param_shift_S,
int param_pad_d,
int param_pad_h,
int param_pad_w,
int param_str_d,
int param_str_h,
int param_str_w,
int param_Q,
int param_PQ,
int param_QN,
int param_PQN,
int param_MPQN,
int param_magic_Q,
int param_shift_Q,
int param_magic_PQ,
int param_shift_PQ,
int param_R,
int param_T,
int param_magic_str_w,
int param_shift_str_w,
int param_magic_str_h,
int param_shift_str_h,
int param_magic_str_d,
int param_shift_str_d) {
__shared__ float share[128 * 8 * 4 + 8];
int tid = threadIdx.x;
share[tid] = 1;
*param_O = share[127-tid];
*param_test = share[127-tid];
}
|
#include <hip/hip_runtime.h>
extern "C"
__global__ void sconv_bprop_C128_N128 (
float* param_test,
float* param_O,
const float* param_I,
const float* param_F,
float param_alpha,
int param_N,
int param_K,
int param_D,
int param_H,
int param_W,
int param_WN,
int param_HWN,
int param_DHWN,
int param_C,
int param_CRST,
int param_RST,
int param_RS,
int param_magic_RS,
int param_shift_RS,
int param_S,
int param_magic_S,
int param_shift_S,
int param_pad_d,
int param_pad_h,
int param_pad_w,
int param_str_d,
int param_str_h,
int param_str_w,
int param_Q,
int param_PQ,
int param_QN,
int param_PQN,
int param_MPQN,
int param_magic_Q,
int param_shift_Q,
int param_magic_PQ,
int param_shift_PQ,
int param_R,
int param_T,
int param_magic_str_w,
int param_shift_str_w,
int param_magic_str_h,
int param_shift_str_h,
int param_magic_str_d,
int param_shift_str_d) {
__shared__ float share[128 * 8 * 4 + 8];
int tid = threadIdx.x;
share[tid] = 1;
*param_O = share[127-tid];
*param_test = share[127-tid];
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <cuda.h>
#define THREADS_PER_BLOCK 512
#define ARRAY_SIZE 512*512
__global__ void getMin(int* array, int* results, int n){
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= n){
array[i] = INT_MAX;
}
__syncthreads();
for(int s = blockDim.x/2; s > 0; s>>=1){
if(i < ARRAY_SIZE){
if(threadIdx.x < s){
if(array[i] > array[i + s]){
array[i] = array[i + s];
}
}
}
__syncthreads();
}
if(threadIdx.x == 0){
results[blockIdx.x] = array[i];
}
}
__global__ void getMin2(int* array){
int i = threadIdx.x + blockIdx.x * blockDim.x;
for(int s = blockDim.x/2; s > 0; s>>=1){
if(i < s){
if(threadIdx.x < s){
if(array[i] > array[i + s]){
array[i] = array[i + s];
}
}
}
__syncthreads();
}
}
/* Part B */
__global__ void last_digit(int n, int *A, int *B){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
B[i] = A[i] % 10;
}
int main(){
FILE* fp;
int temp;
char buff[256];
fp = fopen("inp.txt", "r");
//int size = 256;
int count = 0;
int numBlocks = (ARRAY_SIZE/THREADS_PER_BLOCK);
int* array = (int*)malloc(ARRAY_SIZE * sizeof(int));
int* A = array;
int* B = (int*)malloc(ARRAY_SIZE*sizeof(int));
int* d_A;
cudaMalloc((void**)&d_A, ARRAY_SIZE*sizeof(int));
int* d_B;
cudaMalloc((void**)&d_B, ARRAY_SIZE*sizeof(int));
while(fscanf(fp, "%d", &temp) != EOF){
array[count] = temp;
count++;
fscanf(fp, "%s", buff);
}
cudaMemcpy(d_A, A, ARRAY_SIZE*sizeof(int), cudaMemcpyHostToDevice);
/* Kernel B */
int blockSize = 256;
int numBlocks2 = (count + blockSize - 1) / blockSize;
last_digit<<<numBlocks2, blockSize>>>(count, d_A, d_B);
int tempC = count;
while(tempC < ARRAY_SIZE){
array[tempC] = INT_MAX;
tempC++;
}
int* d_array;
cudaMalloc((void **)&d_array, ARRAY_SIZE*sizeof(int));
cudaMemcpy(d_array, array, ARRAY_SIZE*sizeof(int), cudaMemcpyHostToDevice);
int* mid;
cudaMalloc((void **)&mid, numBlocks*sizeof(int));
getMin<<<numBlocks, THREADS_PER_BLOCK>>>(d_array, mid, count);
getMin2<<<16, 32>>>(mid);
cudaMemcpy(array, d_array, ARRAY_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
int* h_mid = (int*)malloc(numBlocks*sizeof(int));
cudaMemcpy(h_mid, mid, numBlocks*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = numBlocks - 1; i >= 0; i--){
printf("%d\n", h_mid[i]);
}
// for(int i = ARRAY_SIZE - 1; i >= 0; i--){
// printf("%d\n", array[i]);
// }
/* Part A to File */
FILE *f = fopen("q1a.txt", "w");
fprintf(f, "%d", h_mid[0]);
fclose(f);
// int* newA = (int*)malloc(ARRAY_SIZE*sizeof(int));
cudaMemcpy(B, d_B, ARRAY_SIZE*sizeof(int), cudaMemcpyDeviceToHost);
/* Part B to File */
FILE *f2 = fopen("q1b.txt", "w");
for (int i = 0; i < count; i++) {
fprintf(f2, "%d", B[i]);
if (i + 1 != count) { fprintf(f2, ", "); }
} fclose(f2);
return 0;
}
|
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 512
#define ARRAY_SIZE 512*512
__global__ void getMin(int* array, int* results, int n){
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= n){
array[i] = INT_MAX;
}
__syncthreads();
for(int s = blockDim.x/2; s > 0; s>>=1){
if(i < ARRAY_SIZE){
if(threadIdx.x < s){
if(array[i] > array[i + s]){
array[i] = array[i + s];
}
}
}
__syncthreads();
}
if(threadIdx.x == 0){
results[blockIdx.x] = array[i];
}
}
__global__ void getMin2(int* array){
int i = threadIdx.x + blockIdx.x * blockDim.x;
for(int s = blockDim.x/2; s > 0; s>>=1){
if(i < s){
if(threadIdx.x < s){
if(array[i] > array[i + s]){
array[i] = array[i + s];
}
}
}
__syncthreads();
}
}
/* Part B */
__global__ void last_digit(int n, int *A, int *B){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
B[i] = A[i] % 10;
}
int main(){
FILE* fp;
int temp;
char buff[256];
fp = fopen("inp.txt", "r");
//int size = 256;
int count = 0;
int numBlocks = (ARRAY_SIZE/THREADS_PER_BLOCK);
int* array = (int*)malloc(ARRAY_SIZE * sizeof(int));
int* A = array;
int* B = (int*)malloc(ARRAY_SIZE*sizeof(int));
int* d_A;
hipMalloc((void**)&d_A, ARRAY_SIZE*sizeof(int));
int* d_B;
hipMalloc((void**)&d_B, ARRAY_SIZE*sizeof(int));
while(fscanf(fp, "%d", &temp) != EOF){
array[count] = temp;
count++;
fscanf(fp, "%s", buff);
}
hipMemcpy(d_A, A, ARRAY_SIZE*sizeof(int), hipMemcpyHostToDevice);
/* Kernel B */
int blockSize = 256;
int numBlocks2 = (count + blockSize - 1) / blockSize;
last_digit<<<numBlocks2, blockSize>>>(count, d_A, d_B);
int tempC = count;
while(tempC < ARRAY_SIZE){
array[tempC] = INT_MAX;
tempC++;
}
int* d_array;
hipMalloc((void **)&d_array, ARRAY_SIZE*sizeof(int));
hipMemcpy(d_array, array, ARRAY_SIZE*sizeof(int), hipMemcpyHostToDevice);
int* mid;
hipMalloc((void **)&mid, numBlocks*sizeof(int));
getMin<<<numBlocks, THREADS_PER_BLOCK>>>(d_array, mid, count);
getMin2<<<16, 32>>>(mid);
hipMemcpy(array, d_array, ARRAY_SIZE*sizeof(int), hipMemcpyDeviceToHost);
int* h_mid = (int*)malloc(numBlocks*sizeof(int));
hipMemcpy(h_mid, mid, numBlocks*sizeof(int), hipMemcpyDeviceToHost);
for(int i = numBlocks - 1; i >= 0; i--){
printf("%d\n", h_mid[i]);
}
// for(int i = ARRAY_SIZE - 1; i >= 0; i--){
// printf("%d\n", array[i]);
// }
/* Part A to File */
FILE *f = fopen("q1a.txt", "w");
fprintf(f, "%d", h_mid[0]);
fclose(f);
// int* newA = (int*)malloc(ARRAY_SIZE*sizeof(int));
hipMemcpy(B, d_B, ARRAY_SIZE*sizeof(int), hipMemcpyDeviceToHost);
/* Part B to File */
FILE *f2 = fopen("q1b.txt", "w");
for (int i = 0; i < count; i++) {
fprintf(f2, "%d", B[i]);
if (i + 1 != count) { fprintf(f2, ", "); }
} fclose(f2);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void StarRadKernel (double *Qbase2, double *Vrad, double *QStar, double dt, int nrad, int nsec, double *invdiffRmed, double *Rmed, double *dq)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double dqm, dqp;
if (i<nrad && j<nsec){
if ((i == 0 || i == nrad-1)) dq[i + j*nrad] = 0.0;
else {
dqm = (Qbase2[i*nsec + j] - Qbase2[(i-1)*nsec + j])*invdiffRmed[i];
dqp = (Qbase2[(i+1)*nsec + j] - Qbase2[i*nsec + j])*invdiffRmed[i+1];
if (dqp * dqm > 0.0)
dq[i+j*nrad] = 2.0*dqp*dqm/(dqp+dqm);
else
dq[i+j*nrad] = 0.0;
}
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void StarRadKernel (double *Qbase2, double *Vrad, double *QStar, double dt, int nrad, int nsec, double *invdiffRmed, double *Rmed, double *dq)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double dqm, dqp;
if (i<nrad && j<nsec){
if ((i == 0 || i == nrad-1)) dq[i + j*nrad] = 0.0;
else {
dqm = (Qbase2[i*nsec + j] - Qbase2[(i-1)*nsec + j])*invdiffRmed[i];
dqp = (Qbase2[(i+1)*nsec + j] - Qbase2[i*nsec + j])*invdiffRmed[i+1];
if (dqp * dqm > 0.0)
dq[i+j*nrad] = 2.0*dqp*dqm/(dqp+dqm);
else
dq[i+j*nrad] = 0.0;
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
#define N 100000000
__global__ void daxpy_simple(int n, double alpha, double *x, double *y) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
y[idx] += alpha * x[idx];
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
#define N 100000000
__global__ void daxpy_simple(int n, double alpha, double *x, double *y) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
y[idx] += alpha * x[idx];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include<ctime>
using namespace std;
__global__ void mini(int *a,int *b,int n)
{
int tid = threadIdx.x;
int minn = INT_MAX;
for(int i=0;i<min(tid+256,n);i++)
{
if(minn>a[i])
minn = a[i];
}
b[tid] = minn;
}
int main()
{
int *a,*b,size,n;
int *d_a,*d_b;
cin>>n;
size = n*sizeof(int);
a = (int*)malloc(size);
b = (int *)malloc(sizeof(int));
cudaMalloc(&d_a,size);
cudaMalloc(&d_b,sizeof(int));
for(int i=0;i<n;i++)
{
a[i] = rand()%100;
}
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
clock_t start = clock();
mini<<<1,n>>>(d_a,d_b,n);
cout<<"time: "<<(float)(clock()-start)/CLOCKS_PER_SEC;
cudaMemcpy(b,d_b,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"min is:"<<b[0];
return 0;
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include<ctime>
using namespace std;
__global__ void mini(int *a,int *b,int n)
{
int tid = threadIdx.x;
int minn = INT_MAX;
for(int i=0;i<min(tid+256,n);i++)
{
if(minn>a[i])
minn = a[i];
}
b[tid] = minn;
}
int main()
{
int *a,*b,size,n;
int *d_a,*d_b;
cin>>n;
size = n*sizeof(int);
a = (int*)malloc(size);
b = (int *)malloc(sizeof(int));
hipMalloc(&d_a,size);
hipMalloc(&d_b,sizeof(int));
for(int i=0;i<n;i++)
{
a[i] = rand()%100;
}
hipMemcpy(d_a,a,size,hipMemcpyHostToDevice);
clock_t start = clock();
mini<<<1,n>>>(d_a,d_b,n);
cout<<"time: "<<(float)(clock()-start)/CLOCKS_PER_SEC;
hipMemcpy(b,d_b,sizeof(int),hipMemcpyDeviceToHost);
cout<<"min is:"<<b[0];
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
// Copyright 2018,2019,2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// common.cu
namespace nbla {
__global__ void cuda_increment_vector(float *a) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
a[i] = a[i] + 1;
}
void increment_vector(cudaStream_t stream, float *vec, size_t size) {
if (size <= 512) {
cuda_increment_vector<<<1, size, 0, stream>>>(vec);
} else {
cuda_increment_vector<<<size / 512, 512, 0, stream>>>(vec);
}
}
} // namespace nbla
|
#include <hip/hip_runtime.h>
// Copyright 2018,2019,2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// common.cu
namespace nbla {
__global__ void cuda_increment_vector(float *a) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
a[i] = a[i] + 1;
}
void increment_vector(hipStream_t stream, float *vec, size_t size) {
if (size <= 512) {
cuda_increment_vector<<<1, size, 0, stream>>>(vec);
} else {
cuda_increment_vector<<<size / 512, 512, 0, stream>>>(vec);
}
}
} // namespace nbla
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <math.h>
#include <limits.h>
#include "cuda_kernel.cuh"
int solveProblem(const int argc, const char* argv[]){
cudaError_t return_value;
if(argc == 2){
cudaEvent_t start, stop;
float time;
int threads = atoi(argv[1]);
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Everything runs on stream 0
cudaEventRecord(start);
// Start kernel
kernel<<<1,threads>>>();
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
return_value = cudaGetLastError();
if(return_value != cudaSuccess){
printf("Error in Kernel\n");
printf("%s\n",cudaGetErrorString(return_value));
return -1;
}
cudaEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
} else {
printf("parameter required\n");
return -1;
}
}
int main(const int argc, const char* argv[]){
int devices;
cudaError_t return_value;
return_value = cudaGetDeviceCount(&devices);
if(return_value != cudaSuccess){
printf("Could not get device count\n");
return -1;
}
if(devices > 0){
printf("%d devices found\n",devices);
for(int device = 0; device < devices; device++){
cudaDeviceProp device_info;
cudaGetDeviceProperties(&device_info, device);
printf("Name: %s\n",device_info.name);
printf("max. Memory: %.0f MB\n",(double)device_info.totalGlobalMem/(double)(1024*1024));
printf("max. Threads per Block: %d\n", device_info.maxThreadsPerBlock);
printf("max. Blocks per Grid: %d,%d,%d\n", device_info.maxGridSize[0], device_info.maxGridSize[1],device_info.maxGridSize[2]);
printf("\n");
}
return solveProblem(argc, argv);
}
else{
printf("No CUDA cards found\n");
return -1;
}
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <limits.h>
#include "cuda_kernel.cuh"
int solveProblem(const int argc, const char* argv[]){
hipError_t return_value;
if(argc == 2){
hipEvent_t start, stop;
float time;
int threads = atoi(argv[1]);
hipEventCreate(&start);
hipEventCreate(&stop);
// Everything runs on stream 0
hipEventRecord(start);
// Start kernel
kernel<<<1,threads>>>();
hipDeviceSynchronize();
hipEventRecord(stop);
hipEventSynchronize(stop);
return_value = hipGetLastError();
if(return_value != hipSuccess){
printf("Error in Kernel\n");
printf("%s\n",hipGetErrorString(return_value));
return -1;
}
hipEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
} else {
printf("parameter required\n");
return -1;
}
}
int main(const int argc, const char* argv[]){
int devices;
hipError_t return_value;
return_value = hipGetDeviceCount(&devices);
if(return_value != hipSuccess){
printf("Could not get device count\n");
return -1;
}
if(devices > 0){
printf("%d devices found\n",devices);
for(int device = 0; device < devices; device++){
hipDeviceProp_t device_info;
hipGetDeviceProperties(&device_info, device);
printf("Name: %s\n",device_info.name);
printf("max. Memory: %.0f MB\n",(double)device_info.totalGlobalMem/(double)(1024*1024));
printf("max. Threads per Block: %d\n", device_info.maxThreadsPerBlock);
printf("max. Blocks per Grid: %d,%d,%d\n", device_info.maxGridSize[0], device_info.maxGridSize[1],device_info.maxGridSize[2]);
printf("\n");
}
return solveProblem(argc, argv);
}
else{
printf("No CUDA cards found\n");
return -1;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__device__ float explicitLocalStepHeat( float unjpo, float unjmo, float unj, float r)
{
return (1 - 2 * r)*unj + r*unjmo + r * unjpo;
}
__global__ void explicitTimestepHeat( int size, float *d_currentVal, float *d_nextVal, float r )
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
if (i < 2)
{
d_nextVal[i] == 0;
}
else if (i > size - 2)
{
d_nextVal[i] == 0;
}
else
{
d_nextVal[i] = explicitLocalStepHeat(
d_currentVal[i + 1],
d_currentVal[i - 1],
d_currentVal[i],
r);
}
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__device__ float explicitLocalStepHeat( float unjpo, float unjmo, float unj, float r)
{
return (1 - 2 * r)*unj + r*unjmo + r * unjpo;
}
__global__ void explicitTimestepHeat( int size, float *d_currentVal, float *d_nextVal, float r )
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
if (i < 2)
{
d_nextVal[i] == 0;
}
else if (i > size - 2)
{
d_nextVal[i] == 0;
}
else
{
d_nextVal[i] = explicitLocalStepHeat(
d_currentVal[i + 1],
d_currentVal[i - 1],
d_currentVal[i],
r);
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
#define N 100
__global__ void kernel_1() { double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.1); }
}
__global__ void kernel_2() { double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.1); }
}
__global__ void kernel_3() { double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.1); }
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
int n_streams = 3;
cudaStream_t *streams = (cudaStream_t *)malloc(n_streams * sizeof(cudaStream_t));
for (int i = 0 ; i < n_streams; i++) {
cudaStreamCreate(&streams[i]);
}
dim3 block(1);
dim3 grid(1);
for (int i = 0; i < n_streams; i++) {
kernel_1<<<grid, block, 0, streams[i]>>>();
//}
//for (int i = 0; i < n_streams; i++) {
kernel_2<<<grid, block, 0, streams[i]>>>();
//}
//for (int i = 0; i < n_streams; i++) {
kernel_3<<<grid, block, 0, streams[i]>>>();
}
printf("done\n");
CUDA_CHECK_RETURN(cudaDeviceSynchronize()); // Wait for the GPU launched work to complete
// CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaDeviceReset());
return 0;
}
|
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
#define N 100
__global__ void kernel_1() { double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.1); }
}
__global__ void kernel_2() { double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.1); }
}
__global__ void kernel_3() { double sum = 0.0;
for (int i = 0; i < N; i++) {
sum = sum + tan(0.1) * tan(0.1); }
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void) {
int n_streams = 3;
hipStream_t *streams = (hipStream_t *)malloc(n_streams * sizeof(hipStream_t));
for (int i = 0 ; i < n_streams; i++) {
hipStreamCreate(&streams[i]);
}
dim3 block(1);
dim3 grid(1);
for (int i = 0; i < n_streams; i++) {
kernel_1<<<grid, block, 0, streams[i]>>>();
//}
//for (int i = 0; i < n_streams; i++) {
kernel_2<<<grid, block, 0, streams[i]>>>();
//}
//for (int i = 0; i < n_streams; i++) {
kernel_3<<<grid, block, 0, streams[i]>>>();
}
printf("done\n");
CUDA_CHECK_RETURN(hipDeviceSynchronize()); // Wait for the GPU launched work to complete
// CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(hipDeviceReset());
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cstdlib>
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define K_ELEMS_PER_GRID 2048
#define KERNEL_PHI_MAG_THREADS_PER_BLOCK 512
#define KERNEL_Q_THREADS_PER_BLOCK 256
#define KERNEL_Q_K_ELEMS_PER_GRID 1024
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
__constant__ __device__ kValues ck[KERNEL_Q_K_ELEMS_PER_GRID];
__global__ void
ComputePhiMag_GPU(float* phiR, float* phiI, float* phiMag, int numK) {
int indexK = blockIdx.x*KERNEL_PHI_MAG_THREADS_PER_BLOCK + threadIdx.x;
if (indexK < numK) {
float real = phiR[indexK];
float imag = phiI[indexK];
phiMag[indexK] = real*real + imag*imag;
}
}
__global__ void
ComputeQ_GPU(int numK, int kGlobalIndex,
float* x, float* y, float* z, float* Qr , float* Qi)
{
__shared__ float sx,sy,sz,sQr,sQi;
int xIndex = blockIdx.x*KERNEL_Q_THREADS_PER_BLOCK + threadIdx.x;
sx = x[xIndex];
sy = y[xIndex];
sz = z[xIndex];
sQr = Qr[xIndex];
sQi = Qi[xIndex];
for (int kIndex=0 ; (kIndex < KERNEL_Q_K_ELEMS_PER_GRID) && (kGlobalIndex < numK);
kIndex ++, kGlobalIndex ++) {
float expArg = PIx2 * (ck[kIndex].Kx * sx +
ck[kIndex].Ky * sy +
ck[kIndex].Kz * sz);
sQr += ck[kIndex].PhiMag * cos(expArg);
sQi += ck[kIndex].PhiMag * sin(expArg);
}
Qr[xIndex] = sQr;
Qi[xIndex] = sQi;
}
void computePhiMag_GPU(int numK, float* phiR_d, float* phiI_d, float* phiMag_d)
{
int phiMagBlocks = (numK-1)/ KERNEL_PHI_MAG_THREADS_PER_BLOCK+1;
dim3 DimPhiMagBlock(KERNEL_PHI_MAG_THREADS_PER_BLOCK, 1);
dim3 DimPhiMagGrid(phiMagBlocks, 1);
ComputePhiMag_GPU <<< DimPhiMagGrid, DimPhiMagBlock >>>
(phiR_d, phiI_d, phiMag_d, numK);
}
void computeQ_GPU(int numK, int numX,
float* x_d, float* y_d, float* z_d,
kValues* kVals,
float* Qr_d, float* Qi_d)
{
int QGrids = (numK-1)/ KERNEL_Q_K_ELEMS_PER_GRID+1;
int QBlocks =(numX-1)/ KERNEL_Q_THREADS_PER_BLOCK+1;
dim3 DimQBlock(KERNEL_Q_THREADS_PER_BLOCK, 1);
dim3 DimQGrid(QBlocks, 1);
for (int QGrid = 0; QGrid < QGrids; QGrid++) {
int QGridBase = QGrid * KERNEL_Q_K_ELEMS_PER_GRID;
kValues* kValsTile = kVals + QGridBase;
int numElems = MIN(KERNEL_Q_K_ELEMS_PER_GRID, numK - QGridBase);
cudaMemcpyToSymbol(ck, kValsTile, numElems * sizeof(kValues), 0);
ComputeQ_GPU <<< DimQGrid, DimQBlock >>>
(numK, QGridBase, x_d, y_d, z_d, Qr_d, Qi_d);
}
}
void createDataStructsCPU(int numK, int numX, float** phiMag,
float** Qr, float** Qi)
{
*phiMag = (float* ) malloc(numK * sizeof(float));
*Qr = (float*) malloc(numX * sizeof (float));
*Qi = (float*) malloc(numX * sizeof (float));
}
|
#include <hip/hip_runtime.h>
#include <cstdlib>
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define K_ELEMS_PER_GRID 2048
#define KERNEL_PHI_MAG_THREADS_PER_BLOCK 512
#define KERNEL_Q_THREADS_PER_BLOCK 256
#define KERNEL_Q_K_ELEMS_PER_GRID 1024
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
__constant__ __device__ kValues ck[KERNEL_Q_K_ELEMS_PER_GRID];
__global__ void
ComputePhiMag_GPU(float* phiR, float* phiI, float* phiMag, int numK) {
int indexK = blockIdx.x*KERNEL_PHI_MAG_THREADS_PER_BLOCK + threadIdx.x;
if (indexK < numK) {
float real = phiR[indexK];
float imag = phiI[indexK];
phiMag[indexK] = real*real + imag*imag;
}
}
__global__ void
ComputeQ_GPU(int numK, int kGlobalIndex,
float* x, float* y, float* z, float* Qr , float* Qi)
{
__shared__ float sx,sy,sz,sQr,sQi;
int xIndex = blockIdx.x*KERNEL_Q_THREADS_PER_BLOCK + threadIdx.x;
sx = x[xIndex];
sy = y[xIndex];
sz = z[xIndex];
sQr = Qr[xIndex];
sQi = Qi[xIndex];
for (int kIndex=0 ; (kIndex < KERNEL_Q_K_ELEMS_PER_GRID) && (kGlobalIndex < numK);
kIndex ++, kGlobalIndex ++) {
float expArg = PIx2 * (ck[kIndex].Kx * sx +
ck[kIndex].Ky * sy +
ck[kIndex].Kz * sz);
sQr += ck[kIndex].PhiMag * cos(expArg);
sQi += ck[kIndex].PhiMag * sin(expArg);
}
Qr[xIndex] = sQr;
Qi[xIndex] = sQi;
}
void computePhiMag_GPU(int numK, float* phiR_d, float* phiI_d, float* phiMag_d)
{
int phiMagBlocks = (numK-1)/ KERNEL_PHI_MAG_THREADS_PER_BLOCK+1;
dim3 DimPhiMagBlock(KERNEL_PHI_MAG_THREADS_PER_BLOCK, 1);
dim3 DimPhiMagGrid(phiMagBlocks, 1);
ComputePhiMag_GPU <<< DimPhiMagGrid, DimPhiMagBlock >>>
(phiR_d, phiI_d, phiMag_d, numK);
}
void computeQ_GPU(int numK, int numX,
float* x_d, float* y_d, float* z_d,
kValues* kVals,
float* Qr_d, float* Qi_d)
{
int QGrids = (numK-1)/ KERNEL_Q_K_ELEMS_PER_GRID+1;
int QBlocks =(numX-1)/ KERNEL_Q_THREADS_PER_BLOCK+1;
dim3 DimQBlock(KERNEL_Q_THREADS_PER_BLOCK, 1);
dim3 DimQGrid(QBlocks, 1);
for (int QGrid = 0; QGrid < QGrids; QGrid++) {
int QGridBase = QGrid * KERNEL_Q_K_ELEMS_PER_GRID;
kValues* kValsTile = kVals + QGridBase;
int numElems = MIN(KERNEL_Q_K_ELEMS_PER_GRID, numK - QGridBase);
hipMemcpyToSymbol(HIP_SYMBOL(ck), kValsTile, numElems * sizeof(kValues), 0);
ComputeQ_GPU <<< DimQGrid, DimQBlock >>>
(numK, QGridBase, x_d, y_d, z_d, Qr_d, Qi_d);
}
}
void createDataStructsCPU(int numK, int numX, float** phiMag,
float** Qr, float** Qi)
{
*phiMag = (float* ) malloc(numK * sizeof(float));
*Qr = (float*) malloc(numX * sizeof (float));
*Qi = (float*) malloc(numX * sizeof (float));
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cstdio>
#include <math.h>
#include <ctime>
#include <iostream>
using namespace std;
int performanceMeasure1();
int performanceMeasure2();
int performanceMeasure3();
int performanceMeasure4();
int performanceMeasure5();
int countSortSerial1();
int countSortSerial2();
int countSortSerial3();
int countSortSerial4();
int countSortSerial5();
//calculate the countArray or histogram of number of times a key appears
__global__ void histogram(int * c, int * a, int K, int n)
{
//for inputArray of size n
int entry = (blockIdx.x + blockIdx.y * gridDim.x ) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
c[entry] = 0;
//if out of range then return
if (entry < 0 || entry >= n) return;
//Get the value at the index
int value = a[entry];
//update the counterArray at the value index by 1
int *valueCount = &c[value];
atomicAdd(valueCount, 1);
}
//calculate the prefix sum using a naive stride method
__global__ void naivePrefixSum(int *b, int *c, int k)
{
int entry = threadIdx.x;
if (entry < 0 || entry >= k) return;
b[entry] = c[entry];
//printf("c %d\n", b[entry]);
__syncthreads();
//naive parallel stride prefix sum
for(int i = 1; i < k; i *= 2)
{
if(entry > i-1)
{
b[entry] = b[entry] + b[entry - i];
}
__syncthreads();
}
//printf("\nb %d", b[entry]);
}
//from the prefix sum, place the numbers in the correct postion in the array
__global__ void copyToArray(int * c, int * a, int * b, int Kp, int n)
{
extern __shared__ int temp[];
int entry = (blockIdx.x + blockIdx.y * gridDim.x ) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (entry < 0 || entry >= n) return;
//get value at the inputArray at an index
int value = a[entry];
//get the index for the value
int index = atomicAdd(&c[value], -1);
b[index-1] = value;
}
int main() {
//Start Debug Test
printf("\nDebug Start\n");
///Test n elements with certain number of keys
const int n = 1024;
const int keys = 257;
//Setup Array on host and device
int i_h[n] = {0};
printf("\nInput:\n ");
//An input array i_h (input array on host) with n elements with in the range of 0 to 256 and is a power of 2.
for(int i = 0; i < n; i++){
i_h[i] = pow(2,(std::rand() % 9));
printf("%d ", i_h[i]);
}
int o_h[keys] = {0};
int c_h[keys] = {0};
int *i_d, *o_d, *c_d;
//setup array on gpu
cudaMalloc((void **)&i_d, sizeof(int)*n);
cudaMalloc((void **)&o_d, sizeof(int)*n);
cudaMalloc((void **)&c_d, sizeof(int)*keys);
//copy values from input,etc..
cudaMemcpy(i_d, i_h, sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(o_d, o_d, sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, sizeof(int)*keys, cudaMemcpyHostToDevice);
//CountSortFunction
//Get histogram
histogram <<<6, n>>>(c_d,i_d,keys,n);
cudaMemcpy(c_h, c_d, sizeof(int)*keys, cudaMemcpyDeviceToHost);
//Calculate Prefix sum
naivePrefixSum<<<1,n>>>(c_d,c_d,keys);
//Fill in array
copyToArray<<<6,n>>>(c_d,i_d,o_d,keys,n);
//Get answer
cudaMemcpy(o_h, o_d, sizeof(int)*n, cudaMemcpyDeviceToHost);
//print answer
printf("\nOutput:\n ");
for (int i = 0; i < n; ++i) printf("%d ", o_h[i]);
//free memory
cudaFree(i_d);
cudaFree(o_d);
cudaFree(c_d);
//Finish Debug Test
printf("\nFinish debug\n");
//Performance test
printf("Parallel function doesn't work on 2^21 and larger\n");
printf("tried using clock but doesn't seem to work on serial function\n");
printf("Debug test works");
countSortSerial1();
countSortSerial2();
countSortSerial3();
countSortSerial4();
countSortSerial5();
performanceMeasure1();
//performanceMeasure2();
//performanceMeasure3();
//performanceMeasure4();
//performanceMeasure5();
return 0;
}
///////////////////////////////////////////////////////////Performance Function/////////////////////////////////////////////////////////////////
int countSortSerial1()
{
std::clock_t start;
double duration;
start = std::clock();
const int elements = 1048576;
const int keys = 257;
int inputArray[elements] = {0};
int output[elements] = {0};
for(int i = 0; i < elements; i++)
inputArray[i] = pow(2,(std::rand() % 9));
int count[elements + 1] = {0};
//Initalize the count array and count the number of keys
for(int i = 0; inputArray[i]; ++i)
++count[inputArray[i]];
//calculate the starting index for each key
int total = 0;
int oldCount;
for (int i = 0; i <= keys; ++i)
{
oldCount = count[i];
count[i] = total;
total += oldCount;
}
// Build the output character array
for (int i = 0; inputArray[i]; ++i)
{
output[count[inputArray[i]]-1] = inputArray[i];
--count[inputArray[i]];
}
cout << "serial counting 2^20 : " << duration << endl;
}
int countSortSerial2()
{
std::clock_t start;
double duration;
start = std::clock();
const int elements = 1048576*2;
const int keys = 257;
int inputArray[elements] = {0};
int output[elements] = {0};
for(int i = 0; i < elements; i++)
inputArray[i] = pow(2,(std::rand() % 9));
int count[elements + 1] = {0};
//Initalize the count array and count the number of keys
for(int i = 0; inputArray[i]; ++i)
++count[inputArray[i]];
//calculate the starting index for each key
int total = 0;
int oldCount;
for (int i = 0; i <= keys; ++i)
{
oldCount = count[i];
count[i] = total;
total += oldCount;
}
// Build the output character array
for (int i = 0; inputArray[i]; ++i)
{
output[count[inputArray[i]]-1] = inputArray[i];
--count[inputArray[i]];
}
cout << "serial counting 2^21 : " << duration << endl;
}
int countSortSerial3()
{
std::clock_t start;
double duration;
start = std::clock();
const int elements = 1048576*2;
const int keys = 257;
int inputArray[elements] = {0};
int output[elements] = {0};
for(int i = 0; i < elements; i++)
inputArray[i] = pow(2,(std::rand() % 9));
int count[elements + 1] = {0};
//Initalize the count array and count the number of keys
for(int i = 0; inputArray[i]; ++i)
++count[inputArray[i]];
//calculate the starting index for each key
int total = 0;
int oldCount;
for (int i = 0; i <= keys; ++i)
{
oldCount = count[i];
count[i] = total;
total += oldCount;
}
// Build the output character array
for (int i = 0; inputArray[i]; ++i)
{
output[count[inputArray[i]]-1] = inputArray[i];
--count[inputArray[i]];
}
cout << "serial counting 2^22 : " << duration << endl;
}
int countSortSerial4()
{
std::clock_t start;
double duration;
start = std::clock();
const int elements = 1048576*2;
const int keys = 257;
int inputArray[elements] = {0};
int output[elements] = {0};
for(int i = 0; i < elements; i++)
inputArray[i] = pow(2,(std::rand() % 9));
int count[elements + 1] = {0};
//Initalize the count array and count the number of keys
for(int i = 0; inputArray[i]; ++i)
++count[inputArray[i]];
//calculate the starting index for each key
int total = 0;
int oldCount;
for (int i = 0; i <= keys; ++i)
{
oldCount = count[i];
count[i] = total;
total += oldCount;
}
// Build the output character array
for (int i = 0; inputArray[i]; ++i)
{
output[count[inputArray[i]]-1] = inputArray[i];
--count[inputArray[i]];
}
cout << "serial counting 2^23 : " << duration << endl;
}
int countSortSerial5()
{
std::clock_t start;
double duration;
start = std::clock();
const int elements = 1048576*2;
const int keys = 257;
int inputArray[elements] = {0};
int output[elements] = {0};
for(int i = 0; i < elements; i++)
inputArray[i] = pow(2,(std::rand() % 9));
int count[elements + 1] = {0};
//Initalize the count array and count the number of keys
for(int i = 0; inputArray[i]; ++i)
++count[inputArray[i]];
//calculate the starting index for each key
int total = 0;
int oldCount;
for (int i = 0; i <= keys; ++i)
{
oldCount = count[i];
count[i] = total;
total += oldCount;
}
// Build the output character array
for (int i = 0; inputArray[i]; ++i)
{
output[count[inputArray[i]]-1] = inputArray[i];
--count[inputArray[i]];
}
cout << "serial counting 2^24 : " << duration << endl;
}
//Same function, but had trouble initalizing array from function parameter.
//so made copies of different performanceMeasure function 1 to 5 with different number of elements 2^20 to 2^24
int performanceMeasure1()
{
std::clock_t start;
double duration;
start = std::clock();
//number of elements and number of keys
const int elements = 1048576;
const int keys = 257;
//setup device and host array variables
int i_h[elements] = {0};
for(int i = 0; i < elements; i++)
i_h[i] = pow(2,(std::rand() % 9));
int o_h[keys] = {0};
int c_h[keys] = {0};
int *i_d, *o_d, *c_d;
//setup array on gpu
cudaMalloc((void **)&i_d, sizeof(int)*elements);
cudaMalloc((void **)&o_d, sizeof(int)*elements);
cudaMalloc((void **)&c_d, sizeof(int)*keys);
//copy values from input,etc..
cudaMemcpy(i_d, i_h, sizeof(int)*elements, cudaMemcpyHostToDevice);
cudaMemcpy(o_d, o_d, sizeof(int)*elements, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, sizeof(int)*keys, cudaMemcpyHostToDevice);
//countsort
//Get histogram
histogram <<<6, elements>>>(c_d,i_d,keys,elements);
cudaMemcpy(c_h, c_d, sizeof(int)*keys, cudaMemcpyDeviceToHost);
//Calculate Prefix sum
naivePrefixSum<<<1,elements>>>(c_d,c_d,keys);
//Fill in array
copyToArray<<<6,elements>>>(c_d,i_d,o_d,keys,elements);
//Get answer
cudaMemcpy(o_h, o_d, sizeof(int)*elements, cudaMemcpyDeviceToHost);
//free memory
cudaFree(i_d);
cudaFree(o_d);
cudaFree(c_d);
duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC;
cout << "parallel counting 2^20 : " << duration << endl;
return 0;
}
int performanceMeasure2()
{
std::clock_t start;
double duration;
start = std::clock();
//number of elements and number of keys
const int elements = 2097152;
const int keys = 257;
//setup device and host array variables
int i_h[elements] = {0};
for(int i = 0; i < elements; i++)
i_h[i] = pow(2,(std::rand() % 9));
int o_h[keys] = {0};
int c_h[keys] = {0};
int *i_d, *o_d, *c_d;
//setup array on gpu
cudaMalloc((void **)&i_d, sizeof(int)*elements);
cudaMalloc((void **)&o_d, sizeof(int)*elements);
cudaMalloc((void **)&c_d, sizeof(int)*keys);
//copy values from input,etc..
cudaMemcpy(i_d, i_h, sizeof(int)*elements, cudaMemcpyHostToDevice);
cudaMemcpy(o_d, o_d, sizeof(int)*elements, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, sizeof(int)*keys, cudaMemcpyHostToDevice);
//countsort
//Get histogram
histogram <<<6, elements>>>(c_d,i_d,keys,elements);
cudaMemcpy(c_h, c_d, sizeof(int)*keys, cudaMemcpyDeviceToHost);
//Calculate Prefix sum
naivePrefixSum<<<1,elements>>>(c_d,c_d,keys);
//Fill in array
copyToArray<<<6,elements>>>(c_d,i_d,o_d,keys,elements);
//Get answer
cudaMemcpy(o_h, o_d, sizeof(int)*elements, cudaMemcpyDeviceToHost);
//free memory
cudaFree(i_d);
cudaFree(o_d);
cudaFree(c_d);
cout << "parallel counting 2^21 : " << duration << endl;
return 0;
}
int performanceMeasure3()
{
std::clock_t start;
double duration;
start = std::clock();
//number of elements and number of keys
const int elements = 4194304;
const int keys = 257;
//setup device and host array variables
int i_h[elements] = {0};
for(int i = 0; i < elements; i++)
i_h[i] = pow(2,(std::rand() % 9));
int o_h[keys] = {0};
int c_h[keys] = {0};
int *i_d, *o_d, *c_d;
//setup array on gpu
cudaMalloc((void **)&i_d, sizeof(int)*elements);
cudaMalloc((void **)&o_d, sizeof(int)*elements);
cudaMalloc((void **)&c_d, sizeof(int)*keys);
//copy values from input,etc..
cudaMemcpy(i_d, i_h, sizeof(int)*elements, cudaMemcpyHostToDevice);
cudaMemcpy(o_d, o_d, sizeof(int)*elements, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, sizeof(int)*keys, cudaMemcpyHostToDevice);
//countsort
//Get histogram
histogram <<<6, elements>>>(c_d,i_d,keys,elements);
cudaMemcpy(c_h, c_d, sizeof(int)*keys, cudaMemcpyDeviceToHost);
//Calculate Prefix sum
naivePrefixSum<<<1,elements>>>(c_d,c_d,keys);
//Fill in array
copyToArray<<<6,elements>>>(c_d,i_d,o_d,keys,elements);
//Get answer
cudaMemcpy(o_h, o_d, sizeof(int)*elements, cudaMemcpyDeviceToHost);
//free memory
cudaFree(i_d);
cudaFree(o_d);
cudaFree(c_d);
cout << "parallel counting 2^22 : " << duration << endl;
return 0;
}
int performanceMeasure4()
{
std::clock_t start;
double duration;
start = std::clock();
//number of elements and number of keys
const int elements = 8388608;
const int keys = 257;
//setup device and host array variables
int i_h[elements] = {0};
for(int i = 0; i < elements; i++)
i_h[i] = pow(2,(std::rand() % 9));
int o_h[keys] = {0};
int c_h[keys] = {0};
int *i_d, *o_d, *c_d;
//setup array on gpu
cudaMalloc((void **)&i_d, sizeof(int)*elements);
cudaMalloc((void **)&o_d, sizeof(int)*elements);
cudaMalloc((void **)&c_d, sizeof(int)*keys);
//copy values from input,etc..
cudaMemcpy(i_d, i_h, sizeof(int)*elements, cudaMemcpyHostToDevice);
cudaMemcpy(o_d, o_d, sizeof(int)*elements, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, sizeof(int)*keys, cudaMemcpyHostToDevice);
//countsort
//Get histogram
histogram <<<6, elements>>>(c_d,i_d,keys,elements);
cudaMemcpy(c_h, c_d, sizeof(int)*keys, cudaMemcpyDeviceToHost);
//Calculate Prefix sum
naivePrefixSum<<<1,elements>>>(c_d,c_d,keys);
//Fill in array
copyToArray<<<6,elements>>>(c_d,i_d,o_d,keys,elements);
//Get answer
cudaMemcpy(o_h, o_d, sizeof(int)*elements, cudaMemcpyDeviceToHost);
//free memory
cudaFree(i_d);
cudaFree(o_d);
cudaFree(c_d);
cout << "parallel counting 2^23 : " << duration << endl;
return 0;
}
int performanceMeasure5()
{
std::clock_t start;
double duration;
start = std::clock();
//number of elements and number of keys
const int elements = 16777216;
const int keys = 257;
//setup device and host array variables
int i_h[elements] = {0};
for(int i = 0; i < elements; i++)
i_h[i] = pow(2,(std::rand() % 9));
int o_h[keys] = {0};
int c_h[keys] = {0};
int *i_d, *o_d, *c_d;
//setup array on gpu
cudaMalloc((void **)&i_d, sizeof(int)*elements);
cudaMalloc((void **)&o_d, sizeof(int)*elements);
cudaMalloc((void **)&c_d, sizeof(int)*keys);
//copy values from input,etc..
cudaMemcpy(i_d, i_h, sizeof(int)*elements, cudaMemcpyHostToDevice);
cudaMemcpy(o_d, o_d, sizeof(int)*elements, cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, sizeof(int)*keys, cudaMemcpyHostToDevice);
//countsort
//Get histogram
histogram <<<6, elements>>>(c_d,i_d,keys,elements);
cudaMemcpy(c_h, c_d, sizeof(int)*keys, cudaMemcpyDeviceToHost);
//Calculate Prefix sum
naivePrefixSum<<<1,elements>>>(c_d,c_d,keys);
//Fill in array
copyToArray<<<6,elements>>>(c_d,i_d,o_d,keys,elements);
//Get answer
cudaMemcpy(o_h, o_d, sizeof(int)*elements, cudaMemcpyDeviceToHost);
//free memory
cudaFree(i_d);
cudaFree(o_d);
cudaFree(c_d);
cout << "parallel counting 2^24 : " << duration << endl;
return 0;
}
|
#include <hip/hip_runtime.h>
#include <cstdio>
#include <math.h>
#include <ctime>
#include <iostream>
using namespace std;
int performanceMeasure1();
int performanceMeasure2();
int performanceMeasure3();
int performanceMeasure4();
int performanceMeasure5();
int countSortSerial1();
int countSortSerial2();
int countSortSerial3();
int countSortSerial4();
int countSortSerial5();
//calculate the countArray or histogram of number of times a key appears
__global__ void histogram(int * c, int * a, int K, int n)
{
//for inputArray of size n
int entry = (blockIdx.x + blockIdx.y * gridDim.x ) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
c[entry] = 0;
//if out of range then return
if (entry < 0 || entry >= n) return;
//Get the value at the index
int value = a[entry];
//update the counterArray at the value index by 1
int *valueCount = &c[value];
atomicAdd(valueCount, 1);
}
//calculate the prefix sum using a naive stride method
__global__ void naivePrefixSum(int *b, int *c, int k)
{
int entry = threadIdx.x;
if (entry < 0 || entry >= k) return;
b[entry] = c[entry];
//printf("c %d\n", b[entry]);
__syncthreads();
//naive parallel stride prefix sum
for(int i = 1; i < k; i *= 2)
{
if(entry > i-1)
{
b[entry] = b[entry] + b[entry - i];
}
__syncthreads();
}
//printf("\nb %d", b[entry]);
}
//from the prefix sum, place the numbers in the correct postion in the array
__global__ void copyToArray(int * c, int * a, int * b, int Kp, int n)
{
extern __shared__ int temp[];
int entry = (blockIdx.x + blockIdx.y * gridDim.x ) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (entry < 0 || entry >= n) return;
//get value at the inputArray at an index
int value = a[entry];
//get the index for the value
int index = atomicAdd(&c[value], -1);
b[index-1] = value;
}
int main() {
//Start Debug Test
printf("\nDebug Start\n");
///Test n elements with certain number of keys
const int n = 1024;
const int keys = 257;
//Setup Array on host and device
int i_h[n] = {0};
printf("\nInput:\n ");
//An input array i_h (input array on host) with n elements with in the range of 0 to 256 and is a power of 2.
for(int i = 0; i < n; i++){
i_h[i] = pow(2,(std::rand() % 9));
printf("%d ", i_h[i]);
}
int o_h[keys] = {0};
int c_h[keys] = {0};
int *i_d, *o_d, *c_d;
//setup array on gpu
hipMalloc((void **)&i_d, sizeof(int)*n);
hipMalloc((void **)&o_d, sizeof(int)*n);
hipMalloc((void **)&c_d, sizeof(int)*keys);
//copy values from input,etc..
hipMemcpy(i_d, i_h, sizeof(int)*n, hipMemcpyHostToDevice);
hipMemcpy(o_d, o_d, sizeof(int)*n, hipMemcpyHostToDevice);
hipMemcpy(c_d, c_h, sizeof(int)*keys, hipMemcpyHostToDevice);
//CountSortFunction
//Get histogram
histogram <<<6, n>>>(c_d,i_d,keys,n);
hipMemcpy(c_h, c_d, sizeof(int)*keys, hipMemcpyDeviceToHost);
//Calculate Prefix sum
naivePrefixSum<<<1,n>>>(c_d,c_d,keys);
//Fill in array
copyToArray<<<6,n>>>(c_d,i_d,o_d,keys,n);
//Get answer
hipMemcpy(o_h, o_d, sizeof(int)*n, hipMemcpyDeviceToHost);
//print answer
printf("\nOutput:\n ");
for (int i = 0; i < n; ++i) printf("%d ", o_h[i]);
//free memory
hipFree(i_d);
hipFree(o_d);
hipFree(c_d);
//Finish Debug Test
printf("\nFinish debug\n");
//Performance test
printf("Parallel function doesn't work on 2^21 and larger\n");
printf("tried using clock but doesn't seem to work on serial function\n");
printf("Debug test works");
countSortSerial1();
countSortSerial2();
countSortSerial3();
countSortSerial4();
countSortSerial5();
performanceMeasure1();
//performanceMeasure2();
//performanceMeasure3();
//performanceMeasure4();
//performanceMeasure5();
return 0;
}
///////////////////////////////////////////////////////////Performance Function/////////////////////////////////////////////////////////////////
int countSortSerial1()
{
std::clock_t start;
double duration;
start = std::clock();
const int elements = 1048576;
const int keys = 257;
int inputArray[elements] = {0};
int output[elements] = {0};
for(int i = 0; i < elements; i++)
inputArray[i] = pow(2,(std::rand() % 9));
int count[elements + 1] = {0};
//Initalize the count array and count the number of keys
for(int i = 0; inputArray[i]; ++i)
++count[inputArray[i]];
//calculate the starting index for each key
int total = 0;
int oldCount;
for (int i = 0; i <= keys; ++i)
{
oldCount = count[i];
count[i] = total;
total += oldCount;
}
// Build the output character array
for (int i = 0; inputArray[i]; ++i)
{
output[count[inputArray[i]]-1] = inputArray[i];
--count[inputArray[i]];
}
cout << "serial counting 2^20 : " << duration << endl;
}
int countSortSerial2()
{
std::clock_t start;
double duration;
start = std::clock();
const int elements = 1048576*2;
const int keys = 257;
int inputArray[elements] = {0};
int output[elements] = {0};
for(int i = 0; i < elements; i++)
inputArray[i] = pow(2,(std::rand() % 9));
int count[elements + 1] = {0};
//Initalize the count array and count the number of keys
for(int i = 0; inputArray[i]; ++i)
++count[inputArray[i]];
//calculate the starting index for each key
int total = 0;
int oldCount;
for (int i = 0; i <= keys; ++i)
{
oldCount = count[i];
count[i] = total;
total += oldCount;
}
// Build the output character array
for (int i = 0; inputArray[i]; ++i)
{
output[count[inputArray[i]]-1] = inputArray[i];
--count[inputArray[i]];
}
cout << "serial counting 2^21 : " << duration << endl;
}
int countSortSerial3()
{
std::clock_t start;
double duration;
start = std::clock();
const int elements = 1048576*2;
const int keys = 257;
int inputArray[elements] = {0};
int output[elements] = {0};
for(int i = 0; i < elements; i++)
inputArray[i] = pow(2,(std::rand() % 9));
int count[elements + 1] = {0};
//Initalize the count array and count the number of keys
for(int i = 0; inputArray[i]; ++i)
++count[inputArray[i]];
//calculate the starting index for each key
int total = 0;
int oldCount;
for (int i = 0; i <= keys; ++i)
{
oldCount = count[i];
count[i] = total;
total += oldCount;
}
// Build the output character array
for (int i = 0; inputArray[i]; ++i)
{
output[count[inputArray[i]]-1] = inputArray[i];
--count[inputArray[i]];
}
cout << "serial counting 2^22 : " << duration << endl;
}
int countSortSerial4()
{
std::clock_t start;
double duration;
start = std::clock();
const int elements = 1048576*2;
const int keys = 257;
int inputArray[elements] = {0};
int output[elements] = {0};
for(int i = 0; i < elements; i++)
inputArray[i] = pow(2,(std::rand() % 9));
int count[elements + 1] = {0};
//Initalize the count array and count the number of keys
for(int i = 0; inputArray[i]; ++i)
++count[inputArray[i]];
//calculate the starting index for each key
int total = 0;
int oldCount;
for (int i = 0; i <= keys; ++i)
{
oldCount = count[i];
count[i] = total;
total += oldCount;
}
// Build the output character array
for (int i = 0; inputArray[i]; ++i)
{
output[count[inputArray[i]]-1] = inputArray[i];
--count[inputArray[i]];
}
cout << "serial counting 2^23 : " << duration << endl;
}
int countSortSerial5()
{
std::clock_t start;
double duration;
start = std::clock();
const int elements = 1048576*2;
const int keys = 257;
int inputArray[elements] = {0};
int output[elements] = {0};
for(int i = 0; i < elements; i++)
inputArray[i] = pow(2,(std::rand() % 9));
int count[elements + 1] = {0};
//Initalize the count array and count the number of keys
for(int i = 0; inputArray[i]; ++i)
++count[inputArray[i]];
//calculate the starting index for each key
int total = 0;
int oldCount;
for (int i = 0; i <= keys; ++i)
{
oldCount = count[i];
count[i] = total;
total += oldCount;
}
// Build the output character array
for (int i = 0; inputArray[i]; ++i)
{
output[count[inputArray[i]]-1] = inputArray[i];
--count[inputArray[i]];
}
cout << "serial counting 2^24 : " << duration << endl;
}
//Same function, but had trouble initalizing array from function parameter.
//so made copies of different performanceMeasure function 1 to 5 with different number of elements 2^20 to 2^24
int performanceMeasure1()
{
std::clock_t start;
double duration;
start = std::clock();
//number of elements and number of keys
const int elements = 1048576;
const int keys = 257;
//setup device and host array variables
int i_h[elements] = {0};
for(int i = 0; i < elements; i++)
i_h[i] = pow(2,(std::rand() % 9));
int o_h[keys] = {0};
int c_h[keys] = {0};
int *i_d, *o_d, *c_d;
//setup array on gpu
hipMalloc((void **)&i_d, sizeof(int)*elements);
hipMalloc((void **)&o_d, sizeof(int)*elements);
hipMalloc((void **)&c_d, sizeof(int)*keys);
//copy values from input,etc..
hipMemcpy(i_d, i_h, sizeof(int)*elements, hipMemcpyHostToDevice);
hipMemcpy(o_d, o_d, sizeof(int)*elements, hipMemcpyHostToDevice);
hipMemcpy(c_d, c_h, sizeof(int)*keys, hipMemcpyHostToDevice);
//countsort
//Get histogram
histogram <<<6, elements>>>(c_d,i_d,keys,elements);
hipMemcpy(c_h, c_d, sizeof(int)*keys, hipMemcpyDeviceToHost);
//Calculate Prefix sum
naivePrefixSum<<<1,elements>>>(c_d,c_d,keys);
//Fill in array
copyToArray<<<6,elements>>>(c_d,i_d,o_d,keys,elements);
//Get answer
hipMemcpy(o_h, o_d, sizeof(int)*elements, hipMemcpyDeviceToHost);
//free memory
hipFree(i_d);
hipFree(o_d);
hipFree(c_d);
duration = ( std::clock() - start ) / (double) CLOCKS_PER_SEC;
cout << "parallel counting 2^20 : " << duration << endl;
return 0;
}
int performanceMeasure2()
{
std::clock_t start;
double duration;
start = std::clock();
//number of elements and number of keys
const int elements = 2097152;
const int keys = 257;
//setup device and host array variables
int i_h[elements] = {0};
for(int i = 0; i < elements; i++)
i_h[i] = pow(2,(std::rand() % 9));
int o_h[keys] = {0};
int c_h[keys] = {0};
int *i_d, *o_d, *c_d;
//setup array on gpu
hipMalloc((void **)&i_d, sizeof(int)*elements);
hipMalloc((void **)&o_d, sizeof(int)*elements);
hipMalloc((void **)&c_d, sizeof(int)*keys);
//copy values from input,etc..
hipMemcpy(i_d, i_h, sizeof(int)*elements, hipMemcpyHostToDevice);
hipMemcpy(o_d, o_d, sizeof(int)*elements, hipMemcpyHostToDevice);
hipMemcpy(c_d, c_h, sizeof(int)*keys, hipMemcpyHostToDevice);
//countsort
//Get histogram
histogram <<<6, elements>>>(c_d,i_d,keys,elements);
hipMemcpy(c_h, c_d, sizeof(int)*keys, hipMemcpyDeviceToHost);
//Calculate Prefix sum
naivePrefixSum<<<1,elements>>>(c_d,c_d,keys);
//Fill in array
copyToArray<<<6,elements>>>(c_d,i_d,o_d,keys,elements);
//Get answer
hipMemcpy(o_h, o_d, sizeof(int)*elements, hipMemcpyDeviceToHost);
//free memory
hipFree(i_d);
hipFree(o_d);
hipFree(c_d);
cout << "parallel counting 2^21 : " << duration << endl;
return 0;
}
int performanceMeasure3()
{
std::clock_t start;
double duration;
start = std::clock();
//number of elements and number of keys
const int elements = 4194304;
const int keys = 257;
//setup device and host array variables
int i_h[elements] = {0};
for(int i = 0; i < elements; i++)
i_h[i] = pow(2,(std::rand() % 9));
int o_h[keys] = {0};
int c_h[keys] = {0};
int *i_d, *o_d, *c_d;
//setup array on gpu
hipMalloc((void **)&i_d, sizeof(int)*elements);
hipMalloc((void **)&o_d, sizeof(int)*elements);
hipMalloc((void **)&c_d, sizeof(int)*keys);
//copy values from input,etc..
hipMemcpy(i_d, i_h, sizeof(int)*elements, hipMemcpyHostToDevice);
hipMemcpy(o_d, o_d, sizeof(int)*elements, hipMemcpyHostToDevice);
hipMemcpy(c_d, c_h, sizeof(int)*keys, hipMemcpyHostToDevice);
//countsort
//Get histogram
histogram <<<6, elements>>>(c_d,i_d,keys,elements);
hipMemcpy(c_h, c_d, sizeof(int)*keys, hipMemcpyDeviceToHost);
//Calculate Prefix sum
naivePrefixSum<<<1,elements>>>(c_d,c_d,keys);
//Fill in array
copyToArray<<<6,elements>>>(c_d,i_d,o_d,keys,elements);
//Get answer
hipMemcpy(o_h, o_d, sizeof(int)*elements, hipMemcpyDeviceToHost);
//free memory
hipFree(i_d);
hipFree(o_d);
hipFree(c_d);
cout << "parallel counting 2^22 : " << duration << endl;
return 0;
}
int performanceMeasure4()
{
std::clock_t start;
double duration;
start = std::clock();
//number of elements and number of keys
const int elements = 8388608;
const int keys = 257;
//setup device and host array variables
int i_h[elements] = {0};
for(int i = 0; i < elements; i++)
i_h[i] = pow(2,(std::rand() % 9));
int o_h[keys] = {0};
int c_h[keys] = {0};
int *i_d, *o_d, *c_d;
//setup array on gpu
hipMalloc((void **)&i_d, sizeof(int)*elements);
hipMalloc((void **)&o_d, sizeof(int)*elements);
hipMalloc((void **)&c_d, sizeof(int)*keys);
//copy values from input,etc..
hipMemcpy(i_d, i_h, sizeof(int)*elements, hipMemcpyHostToDevice);
hipMemcpy(o_d, o_d, sizeof(int)*elements, hipMemcpyHostToDevice);
hipMemcpy(c_d, c_h, sizeof(int)*keys, hipMemcpyHostToDevice);
//countsort
//Get histogram
histogram <<<6, elements>>>(c_d,i_d,keys,elements);
hipMemcpy(c_h, c_d, sizeof(int)*keys, hipMemcpyDeviceToHost);
//Calculate Prefix sum
naivePrefixSum<<<1,elements>>>(c_d,c_d,keys);
//Fill in array
copyToArray<<<6,elements>>>(c_d,i_d,o_d,keys,elements);
//Get answer
hipMemcpy(o_h, o_d, sizeof(int)*elements, hipMemcpyDeviceToHost);
//free memory
hipFree(i_d);
hipFree(o_d);
hipFree(c_d);
cout << "parallel counting 2^23 : " << duration << endl;
return 0;
}
int performanceMeasure5()
{
std::clock_t start;
double duration;
start = std::clock();
//number of elements and number of keys
const int elements = 16777216;
const int keys = 257;
//setup device and host array variables
int i_h[elements] = {0};
for(int i = 0; i < elements; i++)
i_h[i] = pow(2,(std::rand() % 9));
int o_h[keys] = {0};
int c_h[keys] = {0};
int *i_d, *o_d, *c_d;
//setup array on gpu
hipMalloc((void **)&i_d, sizeof(int)*elements);
hipMalloc((void **)&o_d, sizeof(int)*elements);
hipMalloc((void **)&c_d, sizeof(int)*keys);
//copy values from input,etc..
hipMemcpy(i_d, i_h, sizeof(int)*elements, hipMemcpyHostToDevice);
hipMemcpy(o_d, o_d, sizeof(int)*elements, hipMemcpyHostToDevice);
hipMemcpy(c_d, c_h, sizeof(int)*keys, hipMemcpyHostToDevice);
//countsort
//Get histogram
histogram <<<6, elements>>>(c_d,i_d,keys,elements);
hipMemcpy(c_h, c_d, sizeof(int)*keys, hipMemcpyDeviceToHost);
//Calculate Prefix sum
naivePrefixSum<<<1,elements>>>(c_d,c_d,keys);
//Fill in array
copyToArray<<<6,elements>>>(c_d,i_d,o_d,keys,elements);
//Get answer
hipMemcpy(o_h, o_d, sizeof(int)*elements, hipMemcpyDeviceToHost);
//free memory
hipFree(i_d);
hipFree(o_d);
hipFree(c_d);
cout << "parallel counting 2^24 : " << duration << endl;
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void cube_select(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 8;
float temp_dist[8];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 8;j ++) {
temp_dist[j] = 1e8;
idx_out[i * 8 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty) + (z - tz) * (z - tz);
if(dist > judge_dist) continue;
int _x = (tx > x);
int _y = (ty > y);
int _z = (tz > z);
int temp_idx = _x * 4 + _y * 2 + _z;
if(dist < temp_dist[temp_idx]) {
idx_out[i * 8 + temp_idx] = j;
temp_dist[temp_idx] = dist;
}
}
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cube_select(int b, int n,float radius, const float* xyz, int* idx_out) {
int batch_idx = blockIdx.x;
xyz += batch_idx * n * 3;
idx_out += batch_idx * n * 8;
float temp_dist[8];
float judge_dist = radius * radius;
for(int i = threadIdx.x; i < n;i += blockDim.x) {
float x = xyz[i * 3];
float y = xyz[i * 3 + 1];
float z = xyz[i * 3 + 2];
for(int j = 0;j < 8;j ++) {
temp_dist[j] = 1e8;
idx_out[i * 8 + j] = i; // if not found, just return itself..
}
for(int j = 0;j < n;j ++) {
if(i == j) continue;
float tx = xyz[j * 3];
float ty = xyz[j * 3 + 1];
float tz = xyz[j * 3 + 2];
float dist = (x - tx) * (x - tx) + (y - ty) * (y - ty) + (z - tz) * (z - tz);
if(dist > judge_dist) continue;
int _x = (tx > x);
int _y = (ty > y);
int _z = (tz > z);
int temp_idx = _x * 4 + _y * 2 + _z;
if(dist < temp_dist[temp_idx]) {
idx_out[i * 8 + temp_idx] = j;
temp_dist[temp_idx] = dist;
}
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cuda_runtime.h>
__global__ void matmult_gpu1Kernel(int m, int n, int k, double * d_A, double * d_B, double * d_C);
extern "C" {
void matmult_gpu1(int m, int n, int k, double * A, double * B, double * C){
double * d_A, * d_B, * d_C;
cudaMalloc((void **)&d_A, m * k * sizeof(double *));
cudaMalloc((void **)&d_B, k * n * sizeof(double *));
cudaMalloc((void **)&d_C, m * n * sizeof(double *));
cudaMemcpy(d_A, A, m * k * sizeof(double *), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, k * n * sizeof(double *), cudaMemcpyHostToDevice);
matmult_gpu1Kernel<<<1,1>>>(m, n, k, d_A, d_B, d_C);
cudaMemcpy(C, d_C, m * n * sizeof(double *), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
}
__global__ void matmult_gpu1Kernel(int m, int n, int k, double * d_A, double * d_B, double * d_C){
int i, j, l;
double x;
for(i=0;i < m; i++){
for(j=0;j<n;j++){
d_C[i*n + j]=0;
}
for(l=0;l < k;l++){
x = d_A[i*k + l];
for(j=0;j < n; j++){
d_C[i*n + j] += x * d_B[l*n + j];
}
}
}
}
|
#include <hip/hip_runtime.h>
__global__ void matmult_gpu1Kernel(int m, int n, int k, double * d_A, double * d_B, double * d_C);
extern "C" {
void matmult_gpu1(int m, int n, int k, double * A, double * B, double * C){
double * d_A, * d_B, * d_C;
hipMalloc((void **)&d_A, m * k * sizeof(double *));
hipMalloc((void **)&d_B, k * n * sizeof(double *));
hipMalloc((void **)&d_C, m * n * sizeof(double *));
hipMemcpy(d_A, A, m * k * sizeof(double *), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, k * n * sizeof(double *), hipMemcpyHostToDevice);
matmult_gpu1Kernel<<<1,1>>>(m, n, k, d_A, d_B, d_C);
hipMemcpy(C, d_C, m * n * sizeof(double *), hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
}
__global__ void matmult_gpu1Kernel(int m, int n, int k, double * d_A, double * d_B, double * d_C){
int i, j, l;
double x;
for(i=0;i < m; i++){
for(j=0;j<n;j++){
d_C[i*n + j]=0;
}
for(l=0;l < k;l++){
x = d_A[i*k + l];
for(j=0;j < n; j++){
d_C[i*n + j] += x * d_B[l*n + j];
}
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
// CUDA example: finds row sums of an integer matrix m
// find1elt() finds the row sum of one row of the nxn matrix m,
// storing the result in the corresponding position in the
// rowsum array rs; matrix is in 1-dimensional, row-major order
// this is the "kernel", which each thread on the GPU executes
__global__ void find1elt(int *m, int *rs, int n)
{
// this thread will handle row # rownum
int rownum = blockIdx.x;
int sum = 0;
for (int k = 0; k < n; k++)
sum += m[rownum*n+k];
rs[rownum] = sum;
}
// the remaining code is executed on the CPU
int main(int argc, char **argv)
{
int n = atoi(argv[1]); // number of matrix rows/cols
int *hm, // host matrix
*dm, // device matrix
*hrs, // host rowsums
*drs; // device rowsums
// size of matrix in bytes
int msize = n * n * sizeof(int);
// allocate space for host matrix
hm = (int *) malloc(msize);
// as a test, fill matrix with consecutive integers
int t = 0,i,j;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
hm[i*n+j] = t++;
}
}
// allocate matrix space at device
cudaMalloc((void **)&dm,msize);
// copy host matrix to device matrix
cudaMemcpy(dm,hm,msize,cudaMemcpyHostToDevice);
// allocate host, device rowsum arrays
int rssize = n * sizeof(int);
hrs = (int *) malloc(rssize);
cudaMalloc((void **)&drs,rssize);
// set up threads structure parameters
dim3 dimGrid(n,1); // n blocks in the grid
dim3 dimBlock(1,1,1); // 1 thread per block
// launch the kernel
find1elt<<<dimGrid,dimBlock>>>(dm,drs,n);
// wait until kernel finishes
cudaThreadSynchronize();
// copy row vector from device to host
cudaMemcpy(hrs,drs,rssize,cudaMemcpyDeviceToHost);
// check results
if (n < 10) for(int i=0; i<n; i++) printf("%d\n",hrs[i]);
// clean up, very important
free(hm);
cudaFree(dm);
free(hrs);
cudaFree(drs);
}
|
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
// CUDA example: finds row sums of an integer matrix m
// find1elt() finds the row sum of one row of the nxn matrix m,
// storing the result in the corresponding position in the
// rowsum array rs; matrix is in 1-dimensional, row-major order
// this is the "kernel", which each thread on the GPU executes
__global__ void find1elt(int *m, int *rs, int n)
{
// this thread will handle row # rownum
int rownum = blockIdx.x;
int sum = 0;
for (int k = 0; k < n; k++)
sum += m[rownum*n+k];
rs[rownum] = sum;
}
// the remaining code is executed on the CPU
int main(int argc, char **argv)
{
int n = atoi(argv[1]); // number of matrix rows/cols
int *hm, // host matrix
*dm, // device matrix
*hrs, // host rowsums
*drs; // device rowsums
// size of matrix in bytes
int msize = n * n * sizeof(int);
// allocate space for host matrix
hm = (int *) malloc(msize);
// as a test, fill matrix with consecutive integers
int t = 0,i,j;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
hm[i*n+j] = t++;
}
}
// allocate matrix space at device
hipMalloc((void **)&dm,msize);
// copy host matrix to device matrix
hipMemcpy(dm,hm,msize,hipMemcpyHostToDevice);
// allocate host, device rowsum arrays
int rssize = n * sizeof(int);
hrs = (int *) malloc(rssize);
hipMalloc((void **)&drs,rssize);
// set up threads structure parameters
dim3 dimGrid(n,1); // n blocks in the grid
dim3 dimBlock(1,1,1); // 1 thread per block
// launch the kernel
find1elt<<<dimGrid,dimBlock>>>(dm,drs,n);
// wait until kernel finishes
hipDeviceSynchronize();
// copy row vector from device to host
hipMemcpy(hrs,drs,rssize,hipMemcpyDeviceToHost);
// check results
if (n < 10) for(int i=0; i<n; i++) printf("%d\n",hrs[i]);
// clean up, very important
free(hm);
hipFree(dm);
free(hrs);
hipFree(drs);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void gpu_stencil37_hack2_cp_slices(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int d_xpitch,int d_ypitch,int d_zpitch,int s_xpitch,int s_ypitch, int s_zpitch, int n_rows, int n_cols,int n_slices, int tile_x,int tile_y, int tile_z){
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy slices: begin!\n");
printf("copy slices: n_cols=%d,n_rows=%d,n_slices=%d\n",n_cols,n_rows,n_slices);
printf("copy slices: gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("copy slices: blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("copy slices: tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = tile_y * blockIdx.y;
int base_global_col = blockDim.x * blockIdx.x;
//int area = n_rows*n_cols;
//int base_global_idx = base_global_slice*area + base_global_row * n_cols + base_global_col;
//int d_area = n_rows*d_xpitch;
//int s_area = n_rows*n_cols;
int d_area = d_ypitch*d_xpitch;
int s_area = s_ypitch*s_xpitch;
int base_global_idx = base_global_slice*d_area + base_global_row * d_xpitch + base_global_col;
int nextSlice = base_global_slice+1;
bool legalNextSlice = (nextSlice<n_slices);
int tx = threadIdx.x;
bool legalCurCol = (base_global_col + tx)<n_cols;
for(int ty=0;ty<tile_y;++ty){
bool legalCurRow = (base_global_row + ty)<n_rows;
//int s_idx = blockIdx.z*s_area*2 + (base_global_row+ty)*n_cols + base_global_col+tx ;
//int dst_idx = base_global_idx + ty*n_cols+tx;
int s_idx = blockIdx.z*s_area*2 + (base_global_row+ty)*s_xpitch + base_global_col+tx ;
int d_idx = base_global_idx + ty*d_xpitch+tx;
if(legalCurCol&&legalCurRow){
shared_slices[s_idx] = dst[d_idx];
}
if(legalNextSlice&&legalCurCol&&legalCurRow){
shared_slices[s_idx+s_area] = dst[d_idx+d_area];
}
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.z ==0 && blockIdx.y==0 && blockIdx.x==0 ){
// printf("shared_slices: addr:%d, val = %f\n",n_cols*n_rows + threadIdx.x,shared_slices[n_cols*n_rows+threadIdx.x]);
if(threadIdx.x==0||threadIdx.x==1||threadIdx.x==2){
int addr = s_xpitch*s_ypitch + blockDim.x*blockIdx.x+threadIdx.x;
int addr1 = s_xpitch*s_ypitch + blockDim.x*blockIdx.x+threadIdx.x+s_xpitch;
int addr2 = s_xpitch*s_ypitch + blockDim.x*blockIdx.x+threadIdx.x+s_xpitch*2;
int daddr = d_xpitch*d_ypitch + blockDim.x*blockIdx.x+threadIdx.x;
int daddr1 = d_xpitch*d_ypitch + blockDim.x*blockIdx.x+threadIdx.x+d_xpitch;
int daddr2 = d_xpitch*d_ypitch + blockDim.x*blockIdx.x+threadIdx.x+d_xpitch*2;
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,dst: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, daddr,dst[daddr]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,dst: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, daddr1,dst[daddr1]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,dst: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, daddr2,dst[daddr2]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,shared_slices: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, addr,shared_slices[addr]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,shared_slices: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, addr1,shared_slices[addr1]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,shared_slices: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, addr2,shared_slices[addr2]);
}
}
#endif
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy slices end!\n");
}
#endif
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpu_stencil37_hack2_cp_slices(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int d_xpitch,int d_ypitch,int d_zpitch,int s_xpitch,int s_ypitch, int s_zpitch, int n_rows, int n_cols,int n_slices, int tile_x,int tile_y, int tile_z){
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy slices: begin!\n");
printf("copy slices: n_cols=%d,n_rows=%d,n_slices=%d\n",n_cols,n_rows,n_slices);
printf("copy slices: gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("copy slices: blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("copy slices: tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = tile_y * blockIdx.y;
int base_global_col = blockDim.x * blockIdx.x;
//int area = n_rows*n_cols;
//int base_global_idx = base_global_slice*area + base_global_row * n_cols + base_global_col;
//int d_area = n_rows*d_xpitch;
//int s_area = n_rows*n_cols;
int d_area = d_ypitch*d_xpitch;
int s_area = s_ypitch*s_xpitch;
int base_global_idx = base_global_slice*d_area + base_global_row * d_xpitch + base_global_col;
int nextSlice = base_global_slice+1;
bool legalNextSlice = (nextSlice<n_slices);
int tx = threadIdx.x;
bool legalCurCol = (base_global_col + tx)<n_cols;
for(int ty=0;ty<tile_y;++ty){
bool legalCurRow = (base_global_row + ty)<n_rows;
//int s_idx = blockIdx.z*s_area*2 + (base_global_row+ty)*n_cols + base_global_col+tx ;
//int dst_idx = base_global_idx + ty*n_cols+tx;
int s_idx = blockIdx.z*s_area*2 + (base_global_row+ty)*s_xpitch + base_global_col+tx ;
int d_idx = base_global_idx + ty*d_xpitch+tx;
if(legalCurCol&&legalCurRow){
shared_slices[s_idx] = dst[d_idx];
}
if(legalNextSlice&&legalCurCol&&legalCurRow){
shared_slices[s_idx+s_area] = dst[d_idx+d_area];
}
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.z ==0 && blockIdx.y==0 && blockIdx.x==0 ){
// printf("shared_slices: addr:%d, val = %f\n",n_cols*n_rows + threadIdx.x,shared_slices[n_cols*n_rows+threadIdx.x]);
if(threadIdx.x==0||threadIdx.x==1||threadIdx.x==2){
int addr = s_xpitch*s_ypitch + blockDim.x*blockIdx.x+threadIdx.x;
int addr1 = s_xpitch*s_ypitch + blockDim.x*blockIdx.x+threadIdx.x+s_xpitch;
int addr2 = s_xpitch*s_ypitch + blockDim.x*blockIdx.x+threadIdx.x+s_xpitch*2;
int daddr = d_xpitch*d_ypitch + blockDim.x*blockIdx.x+threadIdx.x;
int daddr1 = d_xpitch*d_ypitch + blockDim.x*blockIdx.x+threadIdx.x+d_xpitch;
int daddr2 = d_xpitch*d_ypitch + blockDim.x*blockIdx.x+threadIdx.x+d_xpitch*2;
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,dst: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, daddr,dst[daddr]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,dst: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, daddr1,dst[daddr1]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,dst: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, daddr2,dst[daddr2]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,shared_slices: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, addr,shared_slices[addr]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,shared_slices: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, addr1,shared_slices[addr1]);
printf("copy slices: blockIdx.x=%d, blockIdx.y=%d, blockIdx.z=%d,shared_slices: addr= %d, val= %f\n",blockIdx.x, blockIdx.y, blockIdx.z, addr2,shared_slices[addr2]);
}
}
#endif
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy slices end!\n");
}
#endif
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void x_avpb_py_f32 (float* x, float a, float* v, float b, float* y, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] += x[idx] * (a * v[idx] + b);
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void x_avpb_py_f32 (float* x, float a, float* v, float b, float* y, int len) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < len) {
y[idx] += x[idx] * (a * v[idx] + b);
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void reduction(int* input, int* output) {
__shared__ int tmp[TPB];
tmp[threadIdx.x] = input[threadIdx.x + blockIdx.x * blockDim.x];
__syncthreads();
if(threadIdx.x < blockDim.x / 2)
tmp[threadIdx.x] += tmp[threadIdx.x + blockDim.x / 2];
__syncthreads();
if(threadIdx.x < blockDim.x / 4)
tmp[threadIdx.x] += tmp[threadIdx.x + blockDim.x / 4];
__syncthreads();
if(threadIdx.x < blockDim.x / 8)
tmp[threadIdx.x] += tmp[threadIdx.x + blockDim.x / 8];
__syncthreads();
if(threadIdx.x == 0) {
tmp[threadIdx.x] += tmp[threadIdx.x + 1];
output[blockIdx.x] = tmp[threadIdx.x];
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void reduction(int* input, int* output) {
__shared__ int tmp[TPB];
tmp[threadIdx.x] = input[threadIdx.x + blockIdx.x * blockDim.x];
__syncthreads();
if(threadIdx.x < blockDim.x / 2)
tmp[threadIdx.x] += tmp[threadIdx.x + blockDim.x / 2];
__syncthreads();
if(threadIdx.x < blockDim.x / 4)
tmp[threadIdx.x] += tmp[threadIdx.x + blockDim.x / 4];
__syncthreads();
if(threadIdx.x < blockDim.x / 8)
tmp[threadIdx.x] += tmp[threadIdx.x + blockDim.x / 8];
__syncthreads();
if(threadIdx.x == 0) {
tmp[threadIdx.x] += tmp[threadIdx.x + 1];
output[blockIdx.x] = tmp[threadIdx.x];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// Kernel that executes on the CUDA device
__global__ void square_array(float *a, float *b, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
a[idx] = a[idx] * a[idx];
}
}
// main routine that executes on the host
int main(void)
{
float elapsedTime;
float *a, *a_d,*b,*b_d; // Pointer to host & device arrays
const int N = 12; // Number of elements in arrays
int ind=0,iteraciones=10;
size_t size = N * sizeof(float);
a = (float *)malloc(size); // Allocate array on host
b = (float *)malloc(size); // Allocate array on host
cudaMalloc((void **) &a_d, size); // Allocate array on device
cudaMalloc((void **) &b_d, size); // Allocate array on device
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Initialize host array and copy it to CUDA device
for (int i=0; i<N; i++)
{
a[i] = (float)i;
b[i] = (float)i+1;
}
cudaMemcpy(a_d, a, size,cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, size,cudaMemcpyHostToDevice);
// Do calculation on device:
int block_size = 4;
int n_blocks = N/block_size;
cudaEventRecord(start,0);
while(ind<iteraciones)
{
square_array <<< n_blocks, block_size >>> (a_d,b_d, N);
ind++;
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("El tiempo tomado para %d iteraciones fue de %3.3f ms\n",iteraciones,elapsedTime/10);
// Retrieve result from device and store it in host array
cudaMemcpy(a, a_d, sizeof(float)*N,cudaMemcpyDeviceToHost);
/*// Print results
for (int i=0; i<N; i++)
printf("%d %f\n", i, a[i]);
*/
// Cleanup
free(a);
free(b);
cudaFree(a_d);
cudaFree(b_d);
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// Kernel that executes on the CUDA device
__global__ void square_array(float *a, float *b, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N)
{
a[idx] = a[idx] * a[idx];
}
}
// main routine that executes on the host
int main(void)
{
float elapsedTime;
float *a, *a_d,*b,*b_d; // Pointer to host & device arrays
const int N = 12; // Number of elements in arrays
int ind=0,iteraciones=10;
size_t size = N * sizeof(float);
a = (float *)malloc(size); // Allocate array on host
b = (float *)malloc(size); // Allocate array on host
hipMalloc((void **) &a_d, size); // Allocate array on device
hipMalloc((void **) &b_d, size); // Allocate array on device
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Initialize host array and copy it to CUDA device
for (int i=0; i<N; i++)
{
a[i] = (float)i;
b[i] = (float)i+1;
}
hipMemcpy(a_d, a, size,hipMemcpyHostToDevice);
hipMemcpy(b_d, b, size,hipMemcpyHostToDevice);
// Do calculation on device:
int block_size = 4;
int n_blocks = N/block_size;
hipEventRecord(start,0);
while(ind<iteraciones)
{
square_array <<< n_blocks, block_size >>> (a_d,b_d, N);
ind++;
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
printf("El tiempo tomado para %d iteraciones fue de %3.3f ms\n",iteraciones,elapsedTime/10);
// Retrieve result from device and store it in host array
hipMemcpy(a, a_d, sizeof(float)*N,hipMemcpyDeviceToHost);
/*// Print results
for (int i=0; i<N; i++)
printf("%d %f\n", i, a[i]);
*/
// Cleanup
free(a);
free(b);
hipFree(a_d);
hipFree(b_d);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
//nvcc -ptx EM4.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64"
__device__ void EM1( double * x,
double * y,
double * z,
double * vx,
double * vy,
double * vz,
double * r,
double * phi,
double * vr,
double * Er,
double * Ez,
double * Hphi,
double * charge,
double * m,
double * E,
int *parDelete,
const int parNum,
const double Rp,
const double Lp,
const double PHIp,
const int gridR,
const int gridZ,
const double mu,
const double c,
const double dr,
const double dz,
const double dt ) {
int globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
int threadsPerBlock = blockDim.x*blockDim.y;
int n = localThreadIdx + globalBlockIndex*threadsPerBlock;
if ( n >= parNum || parDelete[n]==1){
return;
}
double r0 = r[n];
double z0 = z[n];
int ar,az,a1,a2,a3,a4;
double Er0,Ez0,Hphi0;
// Er
ar = floor(r0/dr-0.5);
az = floor(z0/dz);
a1 = ar + az * gridR;
a2 = (ar+1) + az * gridR;
a3 = ar + (az+1) * gridR;
a4 = (ar+1) + (az+1) * gridR;
if( ar<0 ){
Er0 = ( Er[a2]*(az*dz+dz-z0) + Er[a4]*(z0-az*dz) )/dz;
}
else if( ar >= gridR-1 ){
Er0 = ( Er[a1]*(az*dz+dz-z0) + Er[a3]*(z0-az*dz) )/dz;
}
else{
Er0 = ( Er[a1]*((ar+1.5)*dr-r0)*((az+1)*dz-z0)
+ Er[a2]*(r0-(ar+0.5)*dr)*((az+1)*dz-z0)
+ Er[a3]*((ar+1.5)*dr-r0)*(z0-az*dz)
+ Er[a4]*(r0-(ar+0.5)*dr)*(z0-az*dz) )/(dr*dz);
}
// Ez
ar = floor(r0/dr);
az = floor(z0/dz-0.5);
a1 = ar + az * (gridR+1);
a2 = (ar+1) + az * (gridR+1);
a3 = ar + (az+1) * (gridR+1);
a4 = (ar+1) + (az+1) * (gridR+1);
if( az<0 ){
Ez0 = ( Ez[a3]*(ar*dr+dr-r0) + Ez[a4]*(r0-ar*dr) )/dr;
}
else if( az >= gridZ-1 ){
Ez0 = ( Ez[a1]*(ar*dr+dr-r0) + Ez[a2]*(r0-ar*dr) )/dr;
}
else{
Ez0 = ( Ez[a1]*((ar+1)*dr-r0)*((az+1.5)*dz-z0)
+ Ez[a2]*(r0-ar*dr)*((az+1.5)*dz-z0)
+ Ez[a3]*((ar+1)*dr-r0)*(z0-(az+0.5)*dz)
+ Ez[a4]*(r0-ar*dr)*(z0-(az+0.5)*dz) )/(dr*dz);
}
// Hphi
ar = floor(r0/dr-0.5);
az = floor(z0/dz-0.5);
a1 = ar + az * gridR;
a2 = (ar+1) + az * gridR;
a3 = ar + (az+1) * gridR;
a4 = (ar+1) + (az+1) * gridR;
if( ar<0 ){
if( az<0 ){
Hphi0 = Hphi[a4];
}
else if( az>=gridZ-1 ){
Hphi0 = Hphi[a2];
}
else{
Hphi0 = ( Hphi[a2]*((az+1.5)*dz-z0) + Hphi[a4]*(z0-(az+0.5)*dz) )/dz;
}
}
else if( ar>=gridR-1 ){
if( az<0 ){
Hphi0 = Hphi[a3];
}
else if( az>=gridZ-1 ){
Hphi0 = Hphi[a1];
}
else{
Hphi0 = ( Hphi[a1]*((az+1.5)*dz-z0) + Hphi[a3]*(z0-(az+0.5)*dz) )/dz;
}
}
else if( az<0 ){
Hphi0 = ( Hphi[a3]*((ar+1.5)*dr-r0) + Hphi[a4]*(r0-(ar+0.5)*dr) )/dr;
}
else if( az>=gridZ-1 ){
Hphi0 = ( Hphi[a1]*((ar+1.5)*dr-r0) + Hphi[a2]*(r0-(ar+0.5)*dr) )/dr;
}
else{
Hphi0 = ( Hphi[a1]*((ar+1.5)*dr-r0)*((az+1.5)*dz-z0)
+ Hphi[a2]*(r0-(ar+0.5)*dr)*((az+1.5)*dz-z0)
+ Hphi[a3]*((ar+1.5)*dr-r0)*(z0-(az+0.5)*dz)
+ Hphi[a4]*(r0-(ar+0.5)*dr)*(z0-(az+0.5)*dz) )/(dr*dz);
}
//F
double Fx,Fy,Fz,Fr;
Fr = charge[n] * (Er0 + (-vz[n]*Hphi0*mu));
Fz = charge[n] * (Ez0 + (vr[n]*Hphi0*mu));
Fx = Fr*cos(phi[n]);
Fy = Fr*sin(phi[n]);
//v
double gamma,ux,uy,uz;
gamma = 1/sqrt( 1-( vx[n]*vx[n] + vy[n]*vy[n] + vz[n]*vz[n] )/(c*c) );
ux = gamma * vx[n] + Fx/m[n]*dt;
uy = gamma * vy[n] + Fy/m[n]*dt;
uz = gamma * vz[n] + Fz/m[n]*dt;
gamma = sqrt( 1+ (ux*ux + uy*uy + uz*uz)/(c*c) );
E[n] = (gamma-1)*m[n]*c*c;
vx[n] = ux/gamma;
vy[n] = uy/gamma;
vz[n] = uz/gamma;
x[n] = x[n] + 0.5*dt*vx[n];
y[n] = y[n] + 0.5*dt*vy[n];
z[n] = z[n] + 0.5*dt*vz[n];
r[n] = sqrt( x[n]*x[n] + y[n]*y[n] );
phi[n] = atan( y[n]/x[n] );
if (r[n] > Rp){
parDelete[n] = 1;
}
double vx1;
while(phi[n]<0){
phi[n] = phi[n] + PHIp;
vx1 = vx[n] * cos(PHIp) - vy[n] * sin(PHIp);
vy[n] = vx[n] * sin(PHIp) + vy[n] * cos(PHIp);
vx[n] = vx1;
}
while(phi[n]>PHIp){
phi[n] = phi[n] - PHIp;
vx1 = vx[n] * cos(PHIp) + vy[n] * sin(PHIp);
vy[n] = -vx[n] * sin(PHIp) + vy[n] * cos(PHIp);
vx[n] = vx1;
}
x[n] = r[n] * cos(phi[n]);
y[n] = r[n] * sin(phi[n]);
if (z[n]>Lp){
z[n] = 2*Lp - z[n];
vz[n] = -vz[n];
}
if (z[n]<0){
z[n] = -z[n];
vz[n] = -vz[n];
}
}
__global__ void processMandelbrotElement(
double * x,
double * y,
double * z,
double * vx,
double * vy,
double * vz,
double * r,
double * phi,
double * vr,
double * Er,
double * Ez,
double * Hphi,
double * charge,
double * m,
double * E,
int *parDelete,
const int parNum,
const double Rp,
const double Lp,
const double PHIp,
const int gridR,
const int gridZ,
const double mu,
const double c,
const double dr,
const double dz,
const double dt ) {
EM1(x, y, z, vx, vy, vz, r, phi, vr, Er, Ez, Hphi, charge, m, E, parDelete, parNum, Rp, Lp, PHIp, gridR, gridZ, mu, c, dr, dz, dt);
}
|
#include <hip/hip_runtime.h>
//nvcc -ptx EM4.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64"
__device__ void EM1( double * x,
double * y,
double * z,
double * vx,
double * vy,
double * vz,
double * r,
double * phi,
double * vr,
double * Er,
double * Ez,
double * Hphi,
double * charge,
double * m,
double * E,
int *parDelete,
const int parNum,
const double Rp,
const double Lp,
const double PHIp,
const int gridR,
const int gridZ,
const double mu,
const double c,
const double dr,
const double dz,
const double dt ) {
int globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
int threadsPerBlock = blockDim.x*blockDim.y;
int n = localThreadIdx + globalBlockIndex*threadsPerBlock;
if ( n >= parNum || parDelete[n]==1){
return;
}
double r0 = r[n];
double z0 = z[n];
int ar,az,a1,a2,a3,a4;
double Er0,Ez0,Hphi0;
// Er
ar = floor(r0/dr-0.5);
az = floor(z0/dz);
a1 = ar + az * gridR;
a2 = (ar+1) + az * gridR;
a3 = ar + (az+1) * gridR;
a4 = (ar+1) + (az+1) * gridR;
if( ar<0 ){
Er0 = ( Er[a2]*(az*dz+dz-z0) + Er[a4]*(z0-az*dz) )/dz;
}
else if( ar >= gridR-1 ){
Er0 = ( Er[a1]*(az*dz+dz-z0) + Er[a3]*(z0-az*dz) )/dz;
}
else{
Er0 = ( Er[a1]*((ar+1.5)*dr-r0)*((az+1)*dz-z0)
+ Er[a2]*(r0-(ar+0.5)*dr)*((az+1)*dz-z0)
+ Er[a3]*((ar+1.5)*dr-r0)*(z0-az*dz)
+ Er[a4]*(r0-(ar+0.5)*dr)*(z0-az*dz) )/(dr*dz);
}
// Ez
ar = floor(r0/dr);
az = floor(z0/dz-0.5);
a1 = ar + az * (gridR+1);
a2 = (ar+1) + az * (gridR+1);
a3 = ar + (az+1) * (gridR+1);
a4 = (ar+1) + (az+1) * (gridR+1);
if( az<0 ){
Ez0 = ( Ez[a3]*(ar*dr+dr-r0) + Ez[a4]*(r0-ar*dr) )/dr;
}
else if( az >= gridZ-1 ){
Ez0 = ( Ez[a1]*(ar*dr+dr-r0) + Ez[a2]*(r0-ar*dr) )/dr;
}
else{
Ez0 = ( Ez[a1]*((ar+1)*dr-r0)*((az+1.5)*dz-z0)
+ Ez[a2]*(r0-ar*dr)*((az+1.5)*dz-z0)
+ Ez[a3]*((ar+1)*dr-r0)*(z0-(az+0.5)*dz)
+ Ez[a4]*(r0-ar*dr)*(z0-(az+0.5)*dz) )/(dr*dz);
}
// Hphi
ar = floor(r0/dr-0.5);
az = floor(z0/dz-0.5);
a1 = ar + az * gridR;
a2 = (ar+1) + az * gridR;
a3 = ar + (az+1) * gridR;
a4 = (ar+1) + (az+1) * gridR;
if( ar<0 ){
if( az<0 ){
Hphi0 = Hphi[a4];
}
else if( az>=gridZ-1 ){
Hphi0 = Hphi[a2];
}
else{
Hphi0 = ( Hphi[a2]*((az+1.5)*dz-z0) + Hphi[a4]*(z0-(az+0.5)*dz) )/dz;
}
}
else if( ar>=gridR-1 ){
if( az<0 ){
Hphi0 = Hphi[a3];
}
else if( az>=gridZ-1 ){
Hphi0 = Hphi[a1];
}
else{
Hphi0 = ( Hphi[a1]*((az+1.5)*dz-z0) + Hphi[a3]*(z0-(az+0.5)*dz) )/dz;
}
}
else if( az<0 ){
Hphi0 = ( Hphi[a3]*((ar+1.5)*dr-r0) + Hphi[a4]*(r0-(ar+0.5)*dr) )/dr;
}
else if( az>=gridZ-1 ){
Hphi0 = ( Hphi[a1]*((ar+1.5)*dr-r0) + Hphi[a2]*(r0-(ar+0.5)*dr) )/dr;
}
else{
Hphi0 = ( Hphi[a1]*((ar+1.5)*dr-r0)*((az+1.5)*dz-z0)
+ Hphi[a2]*(r0-(ar+0.5)*dr)*((az+1.5)*dz-z0)
+ Hphi[a3]*((ar+1.5)*dr-r0)*(z0-(az+0.5)*dz)
+ Hphi[a4]*(r0-(ar+0.5)*dr)*(z0-(az+0.5)*dz) )/(dr*dz);
}
//F
double Fx,Fy,Fz,Fr;
Fr = charge[n] * (Er0 + (-vz[n]*Hphi0*mu));
Fz = charge[n] * (Ez0 + (vr[n]*Hphi0*mu));
Fx = Fr*cos(phi[n]);
Fy = Fr*sin(phi[n]);
//v
double gamma,ux,uy,uz;
gamma = 1/sqrt( 1-( vx[n]*vx[n] + vy[n]*vy[n] + vz[n]*vz[n] )/(c*c) );
ux = gamma * vx[n] + Fx/m[n]*dt;
uy = gamma * vy[n] + Fy/m[n]*dt;
uz = gamma * vz[n] + Fz/m[n]*dt;
gamma = sqrt( 1+ (ux*ux + uy*uy + uz*uz)/(c*c) );
E[n] = (gamma-1)*m[n]*c*c;
vx[n] = ux/gamma;
vy[n] = uy/gamma;
vz[n] = uz/gamma;
x[n] = x[n] + 0.5*dt*vx[n];
y[n] = y[n] + 0.5*dt*vy[n];
z[n] = z[n] + 0.5*dt*vz[n];
r[n] = sqrt( x[n]*x[n] + y[n]*y[n] );
phi[n] = atan( y[n]/x[n] );
if (r[n] > Rp){
parDelete[n] = 1;
}
double vx1;
while(phi[n]<0){
phi[n] = phi[n] + PHIp;
vx1 = vx[n] * cos(PHIp) - vy[n] * sin(PHIp);
vy[n] = vx[n] * sin(PHIp) + vy[n] * cos(PHIp);
vx[n] = vx1;
}
while(phi[n]>PHIp){
phi[n] = phi[n] - PHIp;
vx1 = vx[n] * cos(PHIp) + vy[n] * sin(PHIp);
vy[n] = -vx[n] * sin(PHIp) + vy[n] * cos(PHIp);
vx[n] = vx1;
}
x[n] = r[n] * cos(phi[n]);
y[n] = r[n] * sin(phi[n]);
if (z[n]>Lp){
z[n] = 2*Lp - z[n];
vz[n] = -vz[n];
}
if (z[n]<0){
z[n] = -z[n];
vz[n] = -vz[n];
}
}
__global__ void processMandelbrotElement(
double * x,
double * y,
double * z,
double * vx,
double * vy,
double * vz,
double * r,
double * phi,
double * vr,
double * Er,
double * Ez,
double * Hphi,
double * charge,
double * m,
double * E,
int *parDelete,
const int parNum,
const double Rp,
const double Lp,
const double PHIp,
const int gridR,
const int gridZ,
const double mu,
const double c,
const double dr,
const double dz,
const double dt ) {
EM1(x, y, z, vx, vy, vz, r, phi, vr, Er, Ez, Hphi, charge, m, E, parDelete, parNum, Rp, Lp, PHIp, gridR, gridZ, mu, c, dr, dz, dt);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <unistd.h>
#include <stdio.h>
#define THREADS 256
#define START 0
#define END 1000
__global__ void sum(int* result) {
__shared__ int partials[THREADS];
int start = ((END - START) / blockDim.x) * threadIdx.x;
int end = start + ((END - START) / blockDim.x) - 1;
if (threadIdx.x == (THREADS - 1)) {
end = END;
}
partials[threadIdx.x] = 0;
for (int i = start; i <= end; i++) {
partials[threadIdx.x] += i;
}
int i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i) {
partials[threadIdx.x] += partials[threadIdx.x + i];
}
i /= 2;
}
if (threadIdx.x == 0) {
*result = partials[0];
}
}
int main(void) {
int result;
int* gpu_result;
cudaMalloc((void**) &gpu_result, sizeof(int));
sum<<<1, THREADS>>>(gpu_result);
cudaMemcpy(&result, gpu_result, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_result);
printf("GPU sum = %d.\n", result);
int sum = 0;
for (int i = START; i <= END; i++) {
sum += i;
}
printf("CPU sum = %d.\n", sum);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <unistd.h>
#include <stdio.h>
#define THREADS 256
#define START 0
#define END 1000
__global__ void sum(int* result) {
__shared__ int partials[THREADS];
int start = ((END - START) / blockDim.x) * threadIdx.x;
int end = start + ((END - START) / blockDim.x) - 1;
if (threadIdx.x == (THREADS - 1)) {
end = END;
}
partials[threadIdx.x] = 0;
for (int i = start; i <= end; i++) {
partials[threadIdx.x] += i;
}
int i = blockDim.x / 2;
while (i != 0) {
if (threadIdx.x < i) {
partials[threadIdx.x] += partials[threadIdx.x + i];
}
i /= 2;
}
if (threadIdx.x == 0) {
*result = partials[0];
}
}
int main(void) {
int result;
int* gpu_result;
hipMalloc((void**) &gpu_result, sizeof(int));
sum<<<1, THREADS>>>(gpu_result);
hipMemcpy(&result, gpu_result, sizeof(int), hipMemcpyDeviceToHost);
hipFree(gpu_result);
printf("GPU sum = %d.\n", result);
int sum = 0;
for (int i = START; i <= END; i++) {
sum += i;
}
printf("CPU sum = %d.\n", sum);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<bits/stdc++.h>
#include <numeric>
#include<math.h>
//#define 10 10
using namespace std;
__global__ void cuda_add(int *da,int *db,int *dc)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
//dc[i] = da[i]=db[i]=0;
dc[i] = da[i]+db[i];
//printf("IN GLOBAL %d > %d %d %d \n",i,da[i],db[i],dc[i]);
}
int main()
{
int a[10],b[10],c[10];
cout<<"INITIALIZE ARRAY A";
for(int i=0;i<10;i++)
{
cin>>a[i];
}
cout<<"INITIALIZE ARRAY B";
for(int i=0;i<10;i++)
{
cin>>b[i];
}
int *da, *db, *dc;
cudaMalloc(&da,10*sizeof(int));
cudaMalloc(&db,10*sizeof(int));
cudaMalloc(&dc,10*sizeof(int));
cudaMemcpy(da,a,10*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(db,b,10*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dc,c,10*sizeof(int),cudaMemcpyHostToDevice);
cuda_add<<<2,(10/2)>>>(da,db,dc);
cudaMemcpy(c,dc,10*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<10;i++)
{
cout<<c[i]<<"\t";
}
cudaFree(da);
cudaFree(db);
cudaFree(dc);
return 0;
}
|
#include "hip/hip_runtime.h"
#include<bits/stdc++.h>
#include <numeric>
#include<math.h>
//#define 10 10
using namespace std;
__global__ void cuda_add(int *da,int *db,int *dc)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
//dc[i] = da[i]=db[i]=0;
dc[i] = da[i]+db[i];
//printf("IN GLOBAL %d > %d %d %d \n",i,da[i],db[i],dc[i]);
}
int main()
{
int a[10],b[10],c[10];
cout<<"INITIALIZE ARRAY A";
for(int i=0;i<10;i++)
{
cin>>a[i];
}
cout<<"INITIALIZE ARRAY B";
for(int i=0;i<10;i++)
{
cin>>b[i];
}
int *da, *db, *dc;
hipMalloc(&da,10*sizeof(int));
hipMalloc(&db,10*sizeof(int));
hipMalloc(&dc,10*sizeof(int));
hipMemcpy(da,a,10*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(db,b,10*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(dc,c,10*sizeof(int),hipMemcpyHostToDevice);
cuda_add<<<2,(10/2)>>>(da,db,dc);
hipMemcpy(c,dc,10*sizeof(int),hipMemcpyDeviceToHost);
for(int i=0;i<10;i++)
{
cout<<c[i]<<"\t";
}
hipFree(da);
hipFree(db);
hipFree(dc);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <vector>
/**
* Several variations of a simple 3D 7-point Laplacian.
*
* Notes:
* - Since it is a very simple example which naturally fits the parallelization
* model of CUDA, there are not many elaborate optimization.
* - Using shared memory might help a lot once several stencils are fused to
* store intermediate values. In this example there is no/negligible performance
* improvement (depending on the GPU architecture).
*/
const size_t Nx = 128;
const size_t Ny = 512;
const size_t Nz = 512;
const size_t nrepeat = 100;
__host__ __device__ __forceinline__ int index(const int i, const int j,
const int k, const dim3 sizes) {
const int istride = 1;
const int jstride = sizes.x;
const int kstride = sizes.x * sizes.y;
return i * istride + j * jstride + k * kstride;
}
__host__ __device__ __forceinline__ int
index_strides(const int i, const int j, const int k, const dim3 strides) {
return i * strides.x + j * strides.y + k * strides.z;
}
/**
* Naive/non-optimized version.
*/
__global__ void laplace3d_no_ldg(double *d, double *n, const dim3 sizes,
const dim3 strides) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
d[index_strides(i, j, k, strides)] =
1. / 2. * ( //
(n[index_strides(i - 1, j, k, strides)]) //
+ (n[index_strides(i + 1, j, k, strides)]) //
+ (n[index_strides(i, j - 1, k, strides)]) //
+ (n[index_strides(i, j + 1, k, strides)]) //
+ (n[index_strides(i, j, k - 1, strides)]) //
+ (n[index_strides(i, j, k + 1, strides)]) //
- 6. * (n[index_strides(i, j, k, strides)]));
}
/**
* Putting const __restrict__ on the read only pointers allows the compiler to
* automatically detect that the read-only data cache can be used (no need for
* explicit __ldg())
*/
__global__ void laplace3d_ldg(double *d, double *n, const dim3 sizes,
const dim3 strides) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
d[index_strides(i, j, k, strides)] =
1. / 2. *
( //
__ldg(&n[index_strides(i - 1, j, k, strides)]) //
+ __ldg(&n[index_strides(i + 1, j, k, strides)]) //
+ __ldg(&n[index_strides(i, j - 1, k, strides)]) //
+ __ldg(&n[index_strides(i, j + 1, k, strides)]) //
+ __ldg(&n[index_strides(i, j, k - 1, strides)]) //
+ __ldg(&n[index_strides(i, j, k + 1, strides)]) //
- 6. * __ldg(&n[index_strides(i, j, k, strides)]));
}
/**
* Relative indexing should reduce the number of integer computations which
* could have an impact.
*/
__global__ void laplace3d_relative_indexing(double *d, double *n,
const dim3 sizes,
const dim3 strides) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
int index = index_strides(i, j, k, strides);
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
d[index] = 1. / 2. * ( //
__ldg(&n[index - strides.x]) //
+ __ldg(&n[index + strides.x]) //
+ __ldg(&n[index - strides.y]) //
+ __ldg(&n[index + strides.y]) //
+ __ldg(&n[index - strides.z]) //
+ __ldg(&n[index + strides.z]) //
- 6. * __ldg(&n[index]));
}
/**
* Putting const __restrict__ on the read only pointers allows the compiler to
* automatically detect that the read-only data cache can be used (no need for
* explicit __ldg())
*/
__global__ void laplace3d_const_restrict(double *__restrict__ d,
const double *__restrict__ n,
const dim3 sizes, const dim3 strides) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
d[index_strides(i, j, k, strides)] =
1. / 2. * ( //
(n[index_strides(i - 1, j, k, strides)]) //
+ (n[index_strides(i + 1, j, k, strides)]) //
+ (n[index_strides(i, j - 1, k, strides)]) //
+ (n[index_strides(i, j + 1, k, strides)]) //
+ (n[index_strides(i, j, k - 1, strides)]) //
+ (n[index_strides(i, j, k + 1, strides)]) //
- 6. * (n[index_strides(i, j, k, strides)]));
}
__host__ __device__ __forceinline__ int index_smem(const int i, const int j,
const int k) {
return (i + 1) + (j + 1) * (blockDim.x + 2) +
(k + 1) * (blockDim.x + 2) * (blockDim.y + 2);
}
/**
* Shared memory is a per block scratch pad (user-managed cache), which usually
* is very beneficial for storing intermediate values.
*
* Here we just copy the local input of the stencil for each block into its
* buffer and read from the buffer.
* The halo region is filled by dedicated threads (first and last in all
* directions).
*
* Note: Another option would be to add extra threads for the halo points to
* each block and let them sleep for the actual computation.
*/
__global__ void laplace3d_smem(double *d, double *n, const dim3 sizes,
const dim3 strides) {
extern __shared__ double smem[];
// global indices
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// local indices
int ii = threadIdx.x;
int jj = threadIdx.y;
int kk = threadIdx.z;
// copy all elements of the compute domain into the shared mem buffer (on
// block level)
smem[index_smem(ii, jj, kk)] = __ldg(&n[index_strides(i, j, k, strides)]);
// first and last threads (in all dimensions copy the halo region into the
// shared mem buffer
if (ii == 0)
if (i > 0)
smem[index_smem(-1, jj, kk)] =
__ldg(&n[index_strides(i - 1, j, k, strides)]);
if (ii == blockDim.x - 1)
if (i < sizes.x - 1)
smem[index_smem(blockDim.x, jj, kk)] =
__ldg(&n[index_strides(i + 1, j, k, strides)]);
if (jj == 0)
if (j > 0)
smem[index_smem(ii, -1, kk)] =
__ldg(&n[index_strides(i, j - 1, k, strides)]);
if (jj == blockDim.y - 1)
if (j < sizes.y - 1)
smem[index_smem(ii, blockDim.y, kk)] =
__ldg(&n[index_strides(i, j + 1, k, strides)]);
if (kk == 0)
if (k > 0)
smem[index_smem(ii, jj, -1)] =
__ldg(&n[index_strides(i, j, k - 1, strides)]);
if (kk == blockDim.z - 1)
if (k < sizes.z - 1)
smem[index_smem(ii, jj, blockDim.z)] =
__ldg(&n[index_strides(i, j, k + 1, strides)]);
__syncthreads();
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
// read only from the shared mem buffer
d[index_strides(i, j, k, strides)] =
1. / 2. * ( //
smem[index_smem(ii - 1, jj, kk)] //
+ smem[index_smem(ii + 1, jj, kk)] //
+ smem[index_smem(ii, jj - 1, kk)] //
+ smem[index_smem(ii, jj + 1, kk)] //
+ smem[index_smem(ii, jj, kk - 1)] //
+ smem[index_smem(ii, jj, kk + 1)] //
- 6. * smem[index_smem(ii, jj, kk)]);
}
__global__ void laplace3d_smem_relative(double *__restrict__ d,
const double *__restrict__ n,
const dim3 sizes, const dim3 strides) {
extern __shared__ double smem[];
// global indices
const int i = threadIdx.x + blockIdx.x * blockDim.x;
const int j = threadIdx.y + blockIdx.y * blockDim.y;
const int k = threadIdx.z + blockIdx.z * blockDim.z;
// local indices
const int ii = threadIdx.x;
const int jj = threadIdx.y;
const int kk = threadIdx.z;
// local strides
const int is = blockDim.x;
const int js = blockDim.y;
const int ks = blockDim.z;
int glob_index = index_strides(i, j, k, strides);
int loc_index = index_smem(ii, jj, kk);
// copy all elements of the compute domain into the shared mem buffer (on
// block level)
smem[loc_index] = __ldg(&n[glob_index]);
// first and last threads (in all dimensions copy the halo region into the
// shared mem buffer
if (ii == 0)
if (i > 0)
smem[loc_index - is] = __ldg(&n[glob_index - strides.x]);
if (ii == blockDim.x - 1)
if (i < sizes.x - 1)
smem[loc_index + is] = __ldg(&n[glob_index + strides.x]);
if (jj == 0)
if (j > 0)
smem[loc_index - js] = __ldg(&n[glob_index - strides.y]);
if (jj == blockDim.y - 1)
if (j < sizes.y - 1)
smem[loc_index + js] = __ldg(&n[glob_index + strides.y]);
if (kk == 0)
if (k > 0)
smem[loc_index - ks] = __ldg(&n[glob_index - strides.z]);
if (kk == blockDim.z - 1)
if (k < sizes.z - 1)
smem[loc_index + ks] = __ldg(&n[glob_index + strides.z]);
__syncthreads();
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
// read only from the shared mem buffer
d[glob_index] = 1. / 2. * ( //
smem[loc_index - is] //
+ smem[loc_index + is] //
+ smem[loc_index - js] //
+ smem[loc_index + js] //
+ smem[loc_index - ks] //
+ smem[loc_index + ks] //
- 6. * smem[loc_index]);
}
__host__ __device__ __forceinline__ int index_smem2(const int i, const int j,
const int k) {
return i + j * (blockDim.x) + k * (blockDim.x) * (blockDim.y);
}
/**
* Using extra threads for copying halo points to
* each block and let them sleep for the actual computation.
*/
__global__ void laplace3d_smem2(double *d, double *n, const dim3 sizes,
const dim3 strides) {
extern __shared__ double smem[];
// global indices
int i = -1 + (int)(threadIdx.x + blockIdx.x * (blockDim.x - 2));
int j = -1 + (int)(threadIdx.y + blockIdx.y * (blockDim.y - 2));
int k = -1 + (int)(threadIdx.z + blockIdx.z * (blockDim.z - 2));
// local indices
int ii = threadIdx.x;
int jj = threadIdx.y;
int kk = threadIdx.z;
// copy all elements of the compute domain into the shared mem buffer
if (i >= 0 && i < sizes.x)
if (j >= 0 && j < sizes.y)
if (k >= 0 && k < sizes.z) {
smem[index_smem2(ii, jj, kk)] =
__ldg(&n[index_strides(i, j, k, strides)]);
}
__syncthreads();
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
if (ii > 0 && ii < blockDim.x - 1)
if (jj > 0 && jj < blockDim.y - 1)
if (kk > 0 && kk < blockDim.z - 1) {
d[index_strides(i, j, k, strides)] =
1. / 2. *
( //
smem[index_smem2(ii - 1, jj, kk)] //
+ smem[index_smem2(ii + 1, jj, kk)] //
+ smem[index_smem2(ii, jj - 1, kk)] //
+ smem[index_smem2(ii, jj + 1, kk)] //
+ smem[index_smem2(ii, jj, kk - 1)] //
+ smem[index_smem2(ii, jj, kk + 1)] //
- 6. * smem[index_smem2(ii, jj, kk)]);
}
}
void init(double *n, const dim3 sizes) {
for (size_t i = 0; i < sizes.x; ++i)
for (size_t j = 0; j < sizes.y; ++j)
for (size_t k = 0; k < sizes.z; ++k) {
n[index(i, j, k, sizes)] =
sin((double)i / ((double)sizes.x - 1.) * M_PI) *
sin((double)j / ((double)sizes.y - 1.) * M_PI) *
sin((double)k / ((double)sizes.z - 1.) * M_PI);
}
}
void print(double *n, const dim3 sizes) {
for (size_t i = 0; i < sizes.x; ++i) {
std::cout << (double)i / (double)(sizes.x - 1) << " \t"
<< -1. * n[index(i, sizes.y / 2, sizes.z / 2, sizes)] /
pow(2. * M_PI / sizes.x, 3)
<< std::endl;
}
}
float elapsed(cudaEvent_t &start, cudaEvent_t &stop) {
float result;
cudaEventElapsedTime(&result, start, stop);
return result;
}
enum class Variation {
LDG,
SHARED_MEM,
SHARED_MEM_REL,
NO_LDG,
RELATIVE,
CONST_RESTRICT,
SHARED_MEM2
};
std::ostream &operator<<(std::ostream &s, Variation const &var) {
switch (var) {
case Variation::NO_LDG:
s << "no optimization, ";
break;
case Variation::LDG:
s << "__ldg, ";
break;
case Variation::SHARED_MEM:
s << "shared memory, ";
break;
case Variation::SHARED_MEM_REL:
s << "shared memory relative, ";
break;
case Variation::SHARED_MEM2:
s << "shared memory v2, ";
break;
case Variation::RELATIVE:
s << "relative indexing, ";
break;
case Variation::CONST_RESTRICT:
s << "const __restrict__, ";
break;
default:
s << "n/a";
}
return s;
}
/**
* Warning: one of the stencils is modifying the threadsPerBlock.
*/
template <Variation Var>
void execute(dim3 threadsPerBlock, double *dd, double *dn) {
const dim3 sizes(Nx, Ny, Nz);
const dim3 strides(1, Nx, Nx * Ny);
cudaEvent_t start_;
cudaEvent_t stop_;
cudaEventCreate(&start_);
cudaEventCreate(&stop_);
int nBlocksX = Nx / threadsPerBlock.x;
int nBlocksY = Ny / threadsPerBlock.y;
int nBlocksZ = Nz / threadsPerBlock.z;
if (Nx % threadsPerBlock.x != 0) {
nBlocksX++;
throw std::runtime_error("there is a bug for non divisible sizes");
}
if (Ny % threadsPerBlock.y != 0) {
nBlocksY++;
throw std::runtime_error("there is a bug for non divisible sizes");
}
if (Nz % threadsPerBlock.z != 0) {
nBlocksZ++;
throw std::runtime_error("there is a bug for non divisible sizes");
}
dim3 nBlocks(nBlocksX, nBlocksY, nBlocksZ);
cudaEventRecord(start_, 0);
for (size_t i = 0; i < nrepeat; ++i) {
if (Var == Variation::SHARED_MEM) {
size_t smem_size = (threadsPerBlock.x + 2) *
(threadsPerBlock.y + 2) *
(threadsPerBlock.z + 2);
laplace3d_smem<<<nBlocks, threadsPerBlock,
smem_size * sizeof(double)>>>(dd, dn, sizes,
strides);
} else if (Var == Variation::SHARED_MEM_REL) {
size_t smem_size = (threadsPerBlock.x + 2) *
(threadsPerBlock.y + 2) *
(threadsPerBlock.z + 2);
laplace3d_smem_relative<<<nBlocks, threadsPerBlock,
smem_size * sizeof(double)>>>(
dd, dn, sizes, strides);
} else if (Var == Variation::SHARED_MEM2) {
size_t smem_size = (threadsPerBlock.x + 2) *
(threadsPerBlock.y + 2) *
(threadsPerBlock.z + 2);
if (smem_size <= 1024) {
dim3 enlargedBlock(threadsPerBlock.x + 2, threadsPerBlock.y + 2,
threadsPerBlock.z + 2);
laplace3d_smem2<<<nBlocks, enlargedBlock,
smem_size * sizeof(double)>>>(dd, dn, sizes,
strides);
}
} else if (Var == Variation::LDG)
laplace3d_ldg<<<nBlocks, threadsPerBlock>>>(dd, dn, sizes, strides);
else if (Var == Variation::NO_LDG)
laplace3d_no_ldg<<<nBlocks, threadsPerBlock>>>(dd, dn, sizes,
strides);
else if (Var == Variation::CONST_RESTRICT)
laplace3d_const_restrict<<<nBlocks, threadsPerBlock>>>(
dd, dn, sizes, strides);
else if (Var == Variation::RELATIVE)
laplace3d_relative_indexing<<<nBlocks, threadsPerBlock>>>(
dd, dn, sizes, strides);
}
cudaEventRecord(stop_, 0);
cudaEventSynchronize(stop_);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: %s in %s at line %d\n",
cudaGetErrorString(error), __FILE__, __LINE__);
exit(-1);
}
std::cout << "# Variation: " << Var;
std::cout << "threads/block = (" << threadsPerBlock.x << "/"
<< threadsPerBlock.y << "/" << threadsPerBlock.z << "), \t";
std::cout << "blocks = (" << nBlocks.x << "/" << nBlocks.y << "/"
<< nBlocks.z << "), \t";
std::cout << "time = " << elapsed(start_, stop_) / (float)nrepeat << "ms"
<< std::endl;
cudaEventDestroy(start_);
cudaEventDestroy(stop_);
}
bool verify(double *ref, double *out, dim3 sizes) {
for (size_t i = 1; i < sizes.x - 1; ++i)
for (size_t j = 1; j < sizes.y - 1; ++j)
for (size_t k = 1; k < sizes.z - 1; ++k) {
if (ref[i] != out[i]) {
std::cout << "in index " << i << " val=" << out[i]
<< ", ref=" << ref[i] << std::endl;
return false;
}
}
return true;
}
int main() {
dim3 sizes(Nx, Ny, Nz);
size_t total_size = Nx * Ny * Nz;
double *d = new double[total_size];
double *n = new double[total_size];
double *ref = new double[total_size];
init(n, sizes);
double *dd;
cudaMalloc(&dd, sizeof(double) * total_size);
double *dn;
cudaMalloc(&dn, sizeof(double) * total_size);
cudaMemcpy(dn, n, sizeof(double) * total_size, cudaMemcpyHostToDevice);
// execute one of variations to be used as a reference
execute<Variation::NO_LDG>(dim3(8, 8, 8), dd, dn);
cudaMemcpy(ref, dd, sizeof(double) * total_size, cudaMemcpyDeviceToHost);
std::vector<dim3> threadsPerBlock;
// the smem version does not work for non-full blocks, difficult exercise:
// threadsPerBlock.emplace_back(14, 6, 6);
threadsPerBlock.emplace_back(32, 4, 4);
threadsPerBlock.emplace_back(8, 8, 8);
threadsPerBlock.emplace_back(16, 8, 8);
threadsPerBlock.emplace_back(16, 4, 4);
for (auto dim : threadsPerBlock) {
execute<Variation::NO_LDG>(dim, dd, dn);
execute<Variation::LDG>(dim, dd, dn);
execute<Variation::CONST_RESTRICT>(dim, dd, dn);
execute<Variation::RELATIVE>(dim, dd, dn);
execute<Variation::SHARED_MEM>(dim, dd, dn);
execute<Variation::SHARED_MEM2>(dim, dd, dn);
execute<Variation::SHARED_MEM_REL>(dim, dd, dn);
}
cudaMemcpy(d, dd, sizeof(double) * total_size, cudaMemcpyDeviceToHost);
if (!verify(ref, d, sizes)) {
std::cout << "ERROR: the last executed variant didn't validate"
<< std::endl;
} else {
std::cout << "OK: last variation verified against the non-optimized "
"CUDA version"
<< std::endl;
}
// print(d, sizes);
delete[] d;
delete[] n;
delete[] ref;
cudaFree(dd);
cudaFree(dn);
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <vector>
/**
* Several variations of a simple 3D 7-point Laplacian.
*
* Notes:
* - Since it is a very simple example which naturally fits the parallelization
* model of CUDA, there are not many elaborate optimization.
* - Using shared memory might help a lot once several stencils are fused to
* store intermediate values. In this example there is no/negligible performance
* improvement (depending on the GPU architecture).
*/
const size_t Nx = 128;
const size_t Ny = 512;
const size_t Nz = 512;
const size_t nrepeat = 100;
__host__ __device__ __forceinline__ int index(const int i, const int j,
const int k, const dim3 sizes) {
const int istride = 1;
const int jstride = sizes.x;
const int kstride = sizes.x * sizes.y;
return i * istride + j * jstride + k * kstride;
}
__host__ __device__ __forceinline__ int
index_strides(const int i, const int j, const int k, const dim3 strides) {
return i * strides.x + j * strides.y + k * strides.z;
}
/**
* Naive/non-optimized version.
*/
__global__ void laplace3d_no_ldg(double *d, double *n, const dim3 sizes,
const dim3 strides) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
d[index_strides(i, j, k, strides)] =
1. / 2. * ( //
(n[index_strides(i - 1, j, k, strides)]) //
+ (n[index_strides(i + 1, j, k, strides)]) //
+ (n[index_strides(i, j - 1, k, strides)]) //
+ (n[index_strides(i, j + 1, k, strides)]) //
+ (n[index_strides(i, j, k - 1, strides)]) //
+ (n[index_strides(i, j, k + 1, strides)]) //
- 6. * (n[index_strides(i, j, k, strides)]));
}
/**
* Putting const __restrict__ on the read only pointers allows the compiler to
* automatically detect that the read-only data cache can be used (no need for
* explicit __ldg())
*/
__global__ void laplace3d_ldg(double *d, double *n, const dim3 sizes,
const dim3 strides) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
d[index_strides(i, j, k, strides)] =
1. / 2. *
( //
__ldg(&n[index_strides(i - 1, j, k, strides)]) //
+ __ldg(&n[index_strides(i + 1, j, k, strides)]) //
+ __ldg(&n[index_strides(i, j - 1, k, strides)]) //
+ __ldg(&n[index_strides(i, j + 1, k, strides)]) //
+ __ldg(&n[index_strides(i, j, k - 1, strides)]) //
+ __ldg(&n[index_strides(i, j, k + 1, strides)]) //
- 6. * __ldg(&n[index_strides(i, j, k, strides)]));
}
/**
* Relative indexing should reduce the number of integer computations which
* could have an impact.
*/
__global__ void laplace3d_relative_indexing(double *d, double *n,
const dim3 sizes,
const dim3 strides) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
int index = index_strides(i, j, k, strides);
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
d[index] = 1. / 2. * ( //
__ldg(&n[index - strides.x]) //
+ __ldg(&n[index + strides.x]) //
+ __ldg(&n[index - strides.y]) //
+ __ldg(&n[index + strides.y]) //
+ __ldg(&n[index - strides.z]) //
+ __ldg(&n[index + strides.z]) //
- 6. * __ldg(&n[index]));
}
/**
* Putting const __restrict__ on the read only pointers allows the compiler to
* automatically detect that the read-only data cache can be used (no need for
* explicit __ldg())
*/
__global__ void laplace3d_const_restrict(double *__restrict__ d,
const double *__restrict__ n,
const dim3 sizes, const dim3 strides) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
d[index_strides(i, j, k, strides)] =
1. / 2. * ( //
(n[index_strides(i - 1, j, k, strides)]) //
+ (n[index_strides(i + 1, j, k, strides)]) //
+ (n[index_strides(i, j - 1, k, strides)]) //
+ (n[index_strides(i, j + 1, k, strides)]) //
+ (n[index_strides(i, j, k - 1, strides)]) //
+ (n[index_strides(i, j, k + 1, strides)]) //
- 6. * (n[index_strides(i, j, k, strides)]));
}
__host__ __device__ __forceinline__ int index_smem(const int i, const int j,
const int k) {
return (i + 1) + (j + 1) * (blockDim.x + 2) +
(k + 1) * (blockDim.x + 2) * (blockDim.y + 2);
}
/**
* Shared memory is a per block scratch pad (user-managed cache), which usually
* is very beneficial for storing intermediate values.
*
* Here we just copy the local input of the stencil for each block into its
* buffer and read from the buffer.
* The halo region is filled by dedicated threads (first and last in all
* directions).
*
* Note: Another option would be to add extra threads for the halo points to
* each block and let them sleep for the actual computation.
*/
__global__ void laplace3d_smem(double *d, double *n, const dim3 sizes,
const dim3 strides) {
extern __shared__ double smem[];
// global indices
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
int k = threadIdx.z + blockIdx.z * blockDim.z;
// local indices
int ii = threadIdx.x;
int jj = threadIdx.y;
int kk = threadIdx.z;
// copy all elements of the compute domain into the shared mem buffer (on
// block level)
smem[index_smem(ii, jj, kk)] = __ldg(&n[index_strides(i, j, k, strides)]);
// first and last threads (in all dimensions copy the halo region into the
// shared mem buffer
if (ii == 0)
if (i > 0)
smem[index_smem(-1, jj, kk)] =
__ldg(&n[index_strides(i - 1, j, k, strides)]);
if (ii == blockDim.x - 1)
if (i < sizes.x - 1)
smem[index_smem(blockDim.x, jj, kk)] =
__ldg(&n[index_strides(i + 1, j, k, strides)]);
if (jj == 0)
if (j > 0)
smem[index_smem(ii, -1, kk)] =
__ldg(&n[index_strides(i, j - 1, k, strides)]);
if (jj == blockDim.y - 1)
if (j < sizes.y - 1)
smem[index_smem(ii, blockDim.y, kk)] =
__ldg(&n[index_strides(i, j + 1, k, strides)]);
if (kk == 0)
if (k > 0)
smem[index_smem(ii, jj, -1)] =
__ldg(&n[index_strides(i, j, k - 1, strides)]);
if (kk == blockDim.z - 1)
if (k < sizes.z - 1)
smem[index_smem(ii, jj, blockDim.z)] =
__ldg(&n[index_strides(i, j, k + 1, strides)]);
__syncthreads();
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
// read only from the shared mem buffer
d[index_strides(i, j, k, strides)] =
1. / 2. * ( //
smem[index_smem(ii - 1, jj, kk)] //
+ smem[index_smem(ii + 1, jj, kk)] //
+ smem[index_smem(ii, jj - 1, kk)] //
+ smem[index_smem(ii, jj + 1, kk)] //
+ smem[index_smem(ii, jj, kk - 1)] //
+ smem[index_smem(ii, jj, kk + 1)] //
- 6. * smem[index_smem(ii, jj, kk)]);
}
__global__ void laplace3d_smem_relative(double *__restrict__ d,
const double *__restrict__ n,
const dim3 sizes, const dim3 strides) {
extern __shared__ double smem[];
// global indices
const int i = threadIdx.x + blockIdx.x * blockDim.x;
const int j = threadIdx.y + blockIdx.y * blockDim.y;
const int k = threadIdx.z + blockIdx.z * blockDim.z;
// local indices
const int ii = threadIdx.x;
const int jj = threadIdx.y;
const int kk = threadIdx.z;
// local strides
const int is = blockDim.x;
const int js = blockDim.y;
const int ks = blockDim.z;
int glob_index = index_strides(i, j, k, strides);
int loc_index = index_smem(ii, jj, kk);
// copy all elements of the compute domain into the shared mem buffer (on
// block level)
smem[loc_index] = __ldg(&n[glob_index]);
// first and last threads (in all dimensions copy the halo region into the
// shared mem buffer
if (ii == 0)
if (i > 0)
smem[loc_index - is] = __ldg(&n[glob_index - strides.x]);
if (ii == blockDim.x - 1)
if (i < sizes.x - 1)
smem[loc_index + is] = __ldg(&n[glob_index + strides.x]);
if (jj == 0)
if (j > 0)
smem[loc_index - js] = __ldg(&n[glob_index - strides.y]);
if (jj == blockDim.y - 1)
if (j < sizes.y - 1)
smem[loc_index + js] = __ldg(&n[glob_index + strides.y]);
if (kk == 0)
if (k > 0)
smem[loc_index - ks] = __ldg(&n[glob_index - strides.z]);
if (kk == blockDim.z - 1)
if (k < sizes.z - 1)
smem[loc_index + ks] = __ldg(&n[glob_index + strides.z]);
__syncthreads();
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
// read only from the shared mem buffer
d[glob_index] = 1. / 2. * ( //
smem[loc_index - is] //
+ smem[loc_index + is] //
+ smem[loc_index - js] //
+ smem[loc_index + js] //
+ smem[loc_index - ks] //
+ smem[loc_index + ks] //
- 6. * smem[loc_index]);
}
__host__ __device__ __forceinline__ int index_smem2(const int i, const int j,
const int k) {
return i + j * (blockDim.x) + k * (blockDim.x) * (blockDim.y);
}
/**
* Using extra threads for copying halo points to
* each block and let them sleep for the actual computation.
*/
__global__ void laplace3d_smem2(double *d, double *n, const dim3 sizes,
const dim3 strides) {
extern __shared__ double smem[];
// global indices
int i = -1 + (int)(threadIdx.x + blockIdx.x * (blockDim.x - 2));
int j = -1 + (int)(threadIdx.y + blockIdx.y * (blockDim.y - 2));
int k = -1 + (int)(threadIdx.z + blockIdx.z * (blockDim.z - 2));
// local indices
int ii = threadIdx.x;
int jj = threadIdx.y;
int kk = threadIdx.z;
// copy all elements of the compute domain into the shared mem buffer
if (i >= 0 && i < sizes.x)
if (j >= 0 && j < sizes.y)
if (k >= 0 && k < sizes.z) {
smem[index_smem2(ii, jj, kk)] =
__ldg(&n[index_strides(i, j, k, strides)]);
}
__syncthreads();
if (i > 0 && i < sizes.x - 1)
if (j > 0 && j < sizes.y - 1)
if (k > 0 && k < sizes.z - 1)
if (ii > 0 && ii < blockDim.x - 1)
if (jj > 0 && jj < blockDim.y - 1)
if (kk > 0 && kk < blockDim.z - 1) {
d[index_strides(i, j, k, strides)] =
1. / 2. *
( //
smem[index_smem2(ii - 1, jj, kk)] //
+ smem[index_smem2(ii + 1, jj, kk)] //
+ smem[index_smem2(ii, jj - 1, kk)] //
+ smem[index_smem2(ii, jj + 1, kk)] //
+ smem[index_smem2(ii, jj, kk - 1)] //
+ smem[index_smem2(ii, jj, kk + 1)] //
- 6. * smem[index_smem2(ii, jj, kk)]);
}
}
void init(double *n, const dim3 sizes) {
for (size_t i = 0; i < sizes.x; ++i)
for (size_t j = 0; j < sizes.y; ++j)
for (size_t k = 0; k < sizes.z; ++k) {
n[index(i, j, k, sizes)] =
sin((double)i / ((double)sizes.x - 1.) * M_PI) *
sin((double)j / ((double)sizes.y - 1.) * M_PI) *
sin((double)k / ((double)sizes.z - 1.) * M_PI);
}
}
void print(double *n, const dim3 sizes) {
for (size_t i = 0; i < sizes.x; ++i) {
std::cout << (double)i / (double)(sizes.x - 1) << " \t"
<< -1. * n[index(i, sizes.y / 2, sizes.z / 2, sizes)] /
pow(2. * M_PI / sizes.x, 3)
<< std::endl;
}
}
float elapsed(hipEvent_t &start, hipEvent_t &stop) {
float result;
hipEventElapsedTime(&result, start, stop);
return result;
}
enum class Variation {
LDG,
SHARED_MEM,
SHARED_MEM_REL,
NO_LDG,
RELATIVE,
CONST_RESTRICT,
SHARED_MEM2
};
std::ostream &operator<<(std::ostream &s, Variation const &var) {
switch (var) {
case Variation::NO_LDG:
s << "no optimization, ";
break;
case Variation::LDG:
s << "__ldg, ";
break;
case Variation::SHARED_MEM:
s << "shared memory, ";
break;
case Variation::SHARED_MEM_REL:
s << "shared memory relative, ";
break;
case Variation::SHARED_MEM2:
s << "shared memory v2, ";
break;
case Variation::RELATIVE:
s << "relative indexing, ";
break;
case Variation::CONST_RESTRICT:
s << "const __restrict__, ";
break;
default:
s << "n/a";
}
return s;
}
/**
* Warning: one of the stencils is modifying the threadsPerBlock.
*/
template <Variation Var>
void execute(dim3 threadsPerBlock, double *dd, double *dn) {
const dim3 sizes(Nx, Ny, Nz);
const dim3 strides(1, Nx, Nx * Ny);
hipEvent_t start_;
hipEvent_t stop_;
hipEventCreate(&start_);
hipEventCreate(&stop_);
int nBlocksX = Nx / threadsPerBlock.x;
int nBlocksY = Ny / threadsPerBlock.y;
int nBlocksZ = Nz / threadsPerBlock.z;
if (Nx % threadsPerBlock.x != 0) {
nBlocksX++;
throw std::runtime_error("there is a bug for non divisible sizes");
}
if (Ny % threadsPerBlock.y != 0) {
nBlocksY++;
throw std::runtime_error("there is a bug for non divisible sizes");
}
if (Nz % threadsPerBlock.z != 0) {
nBlocksZ++;
throw std::runtime_error("there is a bug for non divisible sizes");
}
dim3 nBlocks(nBlocksX, nBlocksY, nBlocksZ);
hipEventRecord(start_, 0);
for (size_t i = 0; i < nrepeat; ++i) {
if (Var == Variation::SHARED_MEM) {
size_t smem_size = (threadsPerBlock.x + 2) *
(threadsPerBlock.y + 2) *
(threadsPerBlock.z + 2);
laplace3d_smem<<<nBlocks, threadsPerBlock,
smem_size * sizeof(double)>>>(dd, dn, sizes,
strides);
} else if (Var == Variation::SHARED_MEM_REL) {
size_t smem_size = (threadsPerBlock.x + 2) *
(threadsPerBlock.y + 2) *
(threadsPerBlock.z + 2);
laplace3d_smem_relative<<<nBlocks, threadsPerBlock,
smem_size * sizeof(double)>>>(
dd, dn, sizes, strides);
} else if (Var == Variation::SHARED_MEM2) {
size_t smem_size = (threadsPerBlock.x + 2) *
(threadsPerBlock.y + 2) *
(threadsPerBlock.z + 2);
if (smem_size <= 1024) {
dim3 enlargedBlock(threadsPerBlock.x + 2, threadsPerBlock.y + 2,
threadsPerBlock.z + 2);
laplace3d_smem2<<<nBlocks, enlargedBlock,
smem_size * sizeof(double)>>>(dd, dn, sizes,
strides);
}
} else if (Var == Variation::LDG)
laplace3d_ldg<<<nBlocks, threadsPerBlock>>>(dd, dn, sizes, strides);
else if (Var == Variation::NO_LDG)
laplace3d_no_ldg<<<nBlocks, threadsPerBlock>>>(dd, dn, sizes,
strides);
else if (Var == Variation::CONST_RESTRICT)
laplace3d_const_restrict<<<nBlocks, threadsPerBlock>>>(
dd, dn, sizes, strides);
else if (Var == Variation::RELATIVE)
laplace3d_relative_indexing<<<nBlocks, threadsPerBlock>>>(
dd, dn, sizes, strides);
}
hipEventRecord(stop_, 0);
hipEventSynchronize(stop_);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: %s in %s at line %d\n",
hipGetErrorString(error), __FILE__, __LINE__);
exit(-1);
}
std::cout << "# Variation: " << Var;
std::cout << "threads/block = (" << threadsPerBlock.x << "/"
<< threadsPerBlock.y << "/" << threadsPerBlock.z << "), \t";
std::cout << "blocks = (" << nBlocks.x << "/" << nBlocks.y << "/"
<< nBlocks.z << "), \t";
std::cout << "time = " << elapsed(start_, stop_) / (float)nrepeat << "ms"
<< std::endl;
hipEventDestroy(start_);
hipEventDestroy(stop_);
}
bool verify(double *ref, double *out, dim3 sizes) {
for (size_t i = 1; i < sizes.x - 1; ++i)
for (size_t j = 1; j < sizes.y - 1; ++j)
for (size_t k = 1; k < sizes.z - 1; ++k) {
if (ref[i] != out[i]) {
std::cout << "in index " << i << " val=" << out[i]
<< ", ref=" << ref[i] << std::endl;
return false;
}
}
return true;
}
int main() {
dim3 sizes(Nx, Ny, Nz);
size_t total_size = Nx * Ny * Nz;
double *d = new double[total_size];
double *n = new double[total_size];
double *ref = new double[total_size];
init(n, sizes);
double *dd;
hipMalloc(&dd, sizeof(double) * total_size);
double *dn;
hipMalloc(&dn, sizeof(double) * total_size);
hipMemcpy(dn, n, sizeof(double) * total_size, hipMemcpyHostToDevice);
// execute one of variations to be used as a reference
execute<Variation::NO_LDG>(dim3(8, 8, 8), dd, dn);
hipMemcpy(ref, dd, sizeof(double) * total_size, hipMemcpyDeviceToHost);
std::vector<dim3> threadsPerBlock;
// the smem version does not work for non-full blocks, difficult exercise:
// threadsPerBlock.emplace_back(14, 6, 6);
threadsPerBlock.emplace_back(32, 4, 4);
threadsPerBlock.emplace_back(8, 8, 8);
threadsPerBlock.emplace_back(16, 8, 8);
threadsPerBlock.emplace_back(16, 4, 4);
for (auto dim : threadsPerBlock) {
execute<Variation::NO_LDG>(dim, dd, dn);
execute<Variation::LDG>(dim, dd, dn);
execute<Variation::CONST_RESTRICT>(dim, dd, dn);
execute<Variation::RELATIVE>(dim, dd, dn);
execute<Variation::SHARED_MEM>(dim, dd, dn);
execute<Variation::SHARED_MEM2>(dim, dd, dn);
execute<Variation::SHARED_MEM_REL>(dim, dd, dn);
}
hipMemcpy(d, dd, sizeof(double) * total_size, hipMemcpyDeviceToHost);
if (!verify(ref, d, sizes)) {
std::cout << "ERROR: the last executed variant didn't validate"
<< std::endl;
} else {
std::cout << "OK: last variation verified against the non-optimized "
"CUDA version"
<< std::endl;
}
// print(d, sizes);
delete[] d;
delete[] n;
delete[] ref;
hipFree(dd);
hipFree(dn);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include <cmath>
/*
Ryan McDonald
CSUF Spring 2021
CPSC 479 - Dr. Bein
*/
__global__
void squareMatrix(int* matrix, int* result, int N) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if (row < N && col < N)
{
for (int i = 0; i < N; i++)
{
sum += matrix[row * N + i] * matrix[i * N + col];
}
result[row * N + col] = sum;
}
}
__global__ void findLowestVal(int* matrix, int* lowestVal, int N) {
__shared__ int c[256];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
int temporary = matrix[0];
while (index + offset < N)
{
temporary = min(temporary, matrix[index + offset]);
offset += stride;
}
c[threadIdx.x] = temporary;
int u = blockDim.x / 2;
while (u != 0) {
if (threadIdx.x < u) {
c[threadIdx.x] = min(c[threadIdx.x], c[threadIdx.x + u]);
}
__syncthreads();
u = u/2;
}
if (threadIdx.x == 0)
{
*lowestVal = min(*lowestVal, c[0]);
}
}
//Function to fill matrix with random values
void fill_matrix(int* matrix, int N) {
for (int i = 0; i < N; i++)
{
matrix[i] = rand() % 100;
}
}
int main(int argc, char* argv[]) {
//Set size of matrix [16x16] Square Matrix
int N = 16 * 16;
int* myMatrix;
int* myMatrixSquared;
int* lowestVal;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&lowestVal, sizeof(int));
cudaMallocManaged(&myMatrix, N * sizeof(int));
cudaMallocManaged(&myMatrixSquared, N * sizeof(int));
//Populate Matrix with random values
fill_matrix(myMatrix, N);
int blockSize = 16;
int numBlocks = (N + blockSize - 1) / blockSize;
findLowestVal <<<numBlocks, blockSize >>> (myMatrix, lowestVal, N);
dim3 threadsPerBlock(blockSize, blockSize);
dim3 blocksPerGrid(numBlocks, numBlocks);
squareMatrix<<<blocksPerGrid, threadsPerBlock >>> (myMatrix, myMatrixSquared, N);
//Waits for GPU to finish before accessing data on the host
cudaDeviceSynchronize();
//Free memory
cudaFree(lowestVal);
cudaFree(myMatrix);
cudaFree(myMatrixSquared);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include <cmath>
/*
Ryan McDonald
CSUF Spring 2021
CPSC 479 - Dr. Bein
*/
__global__
void squareMatrix(int* matrix, int* result, int N) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if (row < N && col < N)
{
for (int i = 0; i < N; i++)
{
sum += matrix[row * N + i] * matrix[i * N + col];
}
result[row * N + col] = sum;
}
}
__global__ void findLowestVal(int* matrix, int* lowestVal, int N) {
__shared__ int c[256];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int offset = 0;
int temporary = matrix[0];
while (index + offset < N)
{
temporary = min(temporary, matrix[index + offset]);
offset += stride;
}
c[threadIdx.x] = temporary;
int u = blockDim.x / 2;
while (u != 0) {
if (threadIdx.x < u) {
c[threadIdx.x] = min(c[threadIdx.x], c[threadIdx.x + u]);
}
__syncthreads();
u = u/2;
}
if (threadIdx.x == 0)
{
*lowestVal = min(*lowestVal, c[0]);
}
}
//Function to fill matrix with random values
void fill_matrix(int* matrix, int N) {
for (int i = 0; i < N; i++)
{
matrix[i] = rand() % 100;
}
}
int main(int argc, char* argv[]) {
//Set size of matrix [16x16] Square Matrix
int N = 16 * 16;
int* myMatrix;
int* myMatrixSquared;
int* lowestVal;
// Allocate Unified Memory – accessible from CPU or GPU
hipMallocManaged(&lowestVal, sizeof(int));
hipMallocManaged(&myMatrix, N * sizeof(int));
hipMallocManaged(&myMatrixSquared, N * sizeof(int));
//Populate Matrix with random values
fill_matrix(myMatrix, N);
int blockSize = 16;
int numBlocks = (N + blockSize - 1) / blockSize;
findLowestVal <<<numBlocks, blockSize >>> (myMatrix, lowestVal, N);
dim3 threadsPerBlock(blockSize, blockSize);
dim3 blocksPerGrid(numBlocks, numBlocks);
squareMatrix<<<blocksPerGrid, threadsPerBlock >>> (myMatrix, myMatrixSquared, N);
//Waits for GPU to finish before accessing data on the host
hipDeviceSynchronize();
//Free memory
hipFree(lowestVal);
hipFree(myMatrix);
hipFree(myMatrixSquared);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "cuda_runtime.h"
#include <cuda.h>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <vector>
#include "device_launch_parameters.h"
using namespace std;
const int MAX_STRING_LENGTH = 256;
const int THREADS = 3;
const string DATA_FILE = "/home/lukasz/Documents/GitHub/Lygretus_Programavimas/lab3_cuda/IFF-8-8_ZumarasLukas_L1_dat_1.txt"; // 1, 2, 3
const string REZ_FILE = "/home/lukasz/Documents/GitHub/Lygretus_Programavimas/lab3_cuda/IFF-8-8_ZumarasLukas_L1_rez.txt"; // 1, 2, 3
struct BenchmarkGPU {
char Name[MAX_STRING_LENGTH];
int MSRP = -1;
double Score = -1;
};
void readGPUFile(BenchmarkGPU *data);
__global__ void sum_on_gpu(BenchmarkGPU* gpus, int* count, int* n, int* chunk_size, char* results);
__device__ void gpu_memset(char* dest, int add);
__device__ int gpu_strcat(char* dest, char* src, int offset, bool nLine);
int main() {
// Host
int n = 25;
BenchmarkGPU data[n];
readGPUFile(data);
char sresults[n * MAX_STRING_LENGTH];
int chunk_size = n / THREADS;
int count = 0;
// GPU
BenchmarkGPU* d_all_gpus;
int* d_count;
int* d_n;
int* d_chunk_size;
char* d_sresults;
// Memory allocation for GPU
cudaMalloc((void**)&d_all_gpus, n * sizeof(BenchmarkGPU));
cudaMalloc((void**)&d_sresults, n * sizeof(char) * MAX_STRING_LENGTH);
cudaMalloc((void**)&d_count, sizeof(int));
cudaMalloc((void**)&d_n, sizeof(int));
cudaMalloc((void**)&d_chunk_size, sizeof(int));
// Copies memory from CPU to GPU
cudaMemcpy(d_all_gpus, data, n * sizeof(BenchmarkGPU), cudaMemcpyHostToDevice);
cudaMemcpy(d_count, &count, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_chunk_size, &chunk_size, sizeof(int), cudaMemcpyHostToDevice);
sum_on_gpu<<<1,THREADS>>>(d_all_gpus, d_count, d_n, d_chunk_size, d_sresults);
cudaDeviceSynchronize();
// Copies memory from GPU to CPU
cudaMemcpy(&sresults, d_sresults, n * MAX_STRING_LENGTH * sizeof(char), cudaMemcpyDeviceToHost);
cudaMemcpy(&count, d_count, 1, cudaMemcpyDeviceToHost);
// Free memory
cudaFree(d_all_gpus);
cudaFree(d_count);
cudaFree(d_n);
cudaFree(d_chunk_size);
cudaFree(d_sresults);
cout << "Printing" << endl;
ofstream file;
file.open(REZ_FILE);
file << "Resultatai" << endl;
file << "" << endl;
if(count == 0)
file << "Neivienas objektas nepraejo filtro" << endl;
else
file << sresults << endl;
cout << "Finished" << endl;
return 0;
}
/**
* GPU
* Sums gpus list chunk data properties
* @param gpus BenchmarkGPUs list
* @param count BenchmarkGPUs list size
* @param chunk_size Summed items per thread
* @param results Summed chunk results
*/
__global__ void sum_on_gpu(BenchmarkGPU* gpus, int* count, int* n, int* chunk_size, char* results) {
int start_index = threadIdx.x * *chunk_size;
int end_index = start_index + 1 * *chunk_size;
if (threadIdx.x == blockDim.x -1)
end_index = *n;
printf("Thread: %d Start Index: %d End Index: %d\n", threadIdx.x, start_index, end_index);
for (int i = start_index; i < end_index; ++i) {
double my_number = gpus[i].MSRP / gpus[i].Score;
char tmp_res[256];
gpu_memset(tmp_res, 0);
tmp_res[0] = 'F';
tmp_res[1] = '-';
if(my_number < 70)
tmp_res[0] = 'E';
if(my_number < 60)
tmp_res[0] = 'D';
if(my_number < 50)
tmp_res[0] = 'C';
if(my_number < 40)
tmp_res[0] = 'B';
if(my_number < 30)
tmp_res[0] = 'A';
int cou = 2;
cou += gpu_strcat(tmp_res, gpus[i].Name, 2, true);
if(tmp_res[0] < 'E')
{
int index = atomicAdd(count, cou);
gpu_strcat(results, tmp_res,index, false);
// printf("Thread: %d Index: %d Result: %s ", threadIdx.x, cou, tmp.result);
}
}
}
__device__ int gpu_strcat(char* dest, char* src, int offset, bool nLine) {
int i = 0;
do {
if(src[i] == 0 )
{
if(nLine)
{
dest[offset + i] = '\n';
return i+1;
}
return i;
}
else
dest[offset + i] = src[i];
i++;}
while (i != MAX_STRING_LENGTH);
}
__device__ void gpu_memset(char* dest, int add) {
for (int i = 0; i < MAX_STRING_LENGTH + add; ++i) {
dest[i] = 0;
}
}
void readGPUFile(BenchmarkGPU *data)
{
string line;
ifstream myfile;
myfile.open(DATA_FILE);
if(!myfile.is_open()) {
perror("Error open");
exit(EXIT_FAILURE);
}
int ch = 0;
int count = 0;
while(getline(myfile, line)) {
string::size_type pos;
pos=line.find(' ',0);
line = line.substr(pos+1);
switch (ch) {
case 0:
strcpy(data[count].Name, line.c_str());
break;
case 1:
data[count].MSRP = stoi(line);
break;
case 2:
data[count].Score = stoi(line);
count++;
ch = -1;
break;
}
ch++;
}
}
|
#include "hip/hip_runtime.h"
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <vector>
using namespace std;
const int MAX_STRING_LENGTH = 256;
const int THREADS = 3;
const string DATA_FILE = "/home/lukasz/Documents/GitHub/Lygretus_Programavimas/lab3_cuda/IFF-8-8_ZumarasLukas_L1_dat_1.txt"; // 1, 2, 3
const string REZ_FILE = "/home/lukasz/Documents/GitHub/Lygretus_Programavimas/lab3_cuda/IFF-8-8_ZumarasLukas_L1_rez.txt"; // 1, 2, 3
struct BenchmarkGPU {
char Name[MAX_STRING_LENGTH];
int MSRP = -1;
double Score = -1;
};
void readGPUFile(BenchmarkGPU *data);
__global__ void sum_on_gpu(BenchmarkGPU* gpus, int* count, int* n, int* chunk_size, char* results);
__device__ void gpu_memset(char* dest, int add);
__device__ int gpu_strcat(char* dest, char* src, int offset, bool nLine);
int main() {
// Host
int n = 25;
BenchmarkGPU data[n];
readGPUFile(data);
char sresults[n * MAX_STRING_LENGTH];
int chunk_size = n / THREADS;
int count = 0;
// GPU
BenchmarkGPU* d_all_gpus;
int* d_count;
int* d_n;
int* d_chunk_size;
char* d_sresults;
// Memory allocation for GPU
hipMalloc((void**)&d_all_gpus, n * sizeof(BenchmarkGPU));
hipMalloc((void**)&d_sresults, n * sizeof(char) * MAX_STRING_LENGTH);
hipMalloc((void**)&d_count, sizeof(int));
hipMalloc((void**)&d_n, sizeof(int));
hipMalloc((void**)&d_chunk_size, sizeof(int));
// Copies memory from CPU to GPU
hipMemcpy(d_all_gpus, data, n * sizeof(BenchmarkGPU), hipMemcpyHostToDevice);
hipMemcpy(d_count, &count, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_n, &n, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_chunk_size, &chunk_size, sizeof(int), hipMemcpyHostToDevice);
sum_on_gpu<<<1,THREADS>>>(d_all_gpus, d_count, d_n, d_chunk_size, d_sresults);
hipDeviceSynchronize();
// Copies memory from GPU to CPU
hipMemcpy(&sresults, d_sresults, n * MAX_STRING_LENGTH * sizeof(char), hipMemcpyDeviceToHost);
hipMemcpy(&count, d_count, 1, hipMemcpyDeviceToHost);
// Free memory
hipFree(d_all_gpus);
hipFree(d_count);
hipFree(d_n);
hipFree(d_chunk_size);
hipFree(d_sresults);
cout << "Printing" << endl;
ofstream file;
file.open(REZ_FILE);
file << "Resultatai" << endl;
file << "" << endl;
if(count == 0)
file << "Neivienas objektas nepraejo filtro" << endl;
else
file << sresults << endl;
cout << "Finished" << endl;
return 0;
}
/**
* GPU
* Sums gpus list chunk data properties
* @param gpus BenchmarkGPUs list
* @param count BenchmarkGPUs list size
* @param chunk_size Summed items per thread
* @param results Summed chunk results
*/
__global__ void sum_on_gpu(BenchmarkGPU* gpus, int* count, int* n, int* chunk_size, char* results) {
int start_index = threadIdx.x * *chunk_size;
int end_index = start_index + 1 * *chunk_size;
if (threadIdx.x == blockDim.x -1)
end_index = *n;
printf("Thread: %d Start Index: %d End Index: %d\n", threadIdx.x, start_index, end_index);
for (int i = start_index; i < end_index; ++i) {
double my_number = gpus[i].MSRP / gpus[i].Score;
char tmp_res[256];
gpu_memset(tmp_res, 0);
tmp_res[0] = 'F';
tmp_res[1] = '-';
if(my_number < 70)
tmp_res[0] = 'E';
if(my_number < 60)
tmp_res[0] = 'D';
if(my_number < 50)
tmp_res[0] = 'C';
if(my_number < 40)
tmp_res[0] = 'B';
if(my_number < 30)
tmp_res[0] = 'A';
int cou = 2;
cou += gpu_strcat(tmp_res, gpus[i].Name, 2, true);
if(tmp_res[0] < 'E')
{
int index = atomicAdd(count, cou);
gpu_strcat(results, tmp_res,index, false);
// printf("Thread: %d Index: %d Result: %s ", threadIdx.x, cou, tmp.result);
}
}
}
__device__ int gpu_strcat(char* dest, char* src, int offset, bool nLine) {
int i = 0;
do {
if(src[i] == 0 )
{
if(nLine)
{
dest[offset + i] = '\n';
return i+1;
}
return i;
}
else
dest[offset + i] = src[i];
i++;}
while (i != MAX_STRING_LENGTH);
}
__device__ void gpu_memset(char* dest, int add) {
for (int i = 0; i < MAX_STRING_LENGTH + add; ++i) {
dest[i] = 0;
}
}
void readGPUFile(BenchmarkGPU *data)
{
string line;
ifstream myfile;
myfile.open(DATA_FILE);
if(!myfile.is_open()) {
perror("Error open");
exit(EXIT_FAILURE);
}
int ch = 0;
int count = 0;
while(getline(myfile, line)) {
string::size_type pos;
pos=line.find(' ',0);
line = line.substr(pos+1);
switch (ch) {
case 0:
strcpy(data[count].Name, line.c_str());
break;
case 1:
data[count].MSRP = stoi(line);
break;
case 2:
data[count].Score = stoi(line);
count++;
ch = -1;
break;
}
ch++;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/fill.h>
// examples
// [1] https://github.com/thrust/thrust/wiki/Quick-Start-Guide
void thrust_reduce_by_key(thrust::device_vector<int> &d_keys,
thrust::device_vector<float> &d_array,
const int Ksize, const int Len);
void reduce_by_key_v1(thrust::device_vector<int> &d_keys,
thrust::device_vector<int> &d_norep_keys,
thrust::device_vector<float> &d_array,
const int Ksize, const int Len);
__global__ void reduce_by_key_kernel_v1 (int* g_keys,
int* g_norep_keys,
float* g_input,
const int Ksize,
const int Len,
float* g_output);
__global__ void reduce_by_key_kernel_more (int* g_keys,
int* g_norep_keys,
float* g_input,
const int Ksize,
const int Len,
float* g_output);
int main(void)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("Device name: %s\n", prop.name);
cudaSetDevice(0);
int major = THRUST_MAJOR_VERSION;
int minor = THRUST_MINOR_VERSION;
std::cout << "Thrust v" << major << "." << minor << std::endl;
//-----------------------------------------------------------------------//
std::cout << "initialize keys on the host\n";
const int Len = 2047;
const int Ksize = 124;
int REAP = Len / Ksize;
thrust::host_vector<int> h_keys(Len);
thrust::host_vector<float> h_array(Len);
for(int i=0; i<Len; i++) {
//int ii = i / 7;
int ii = i / REAP;
int k = Ksize - (ii + 1); // repeat the value
if(k <0) k =0;
h_keys[i] = k;
h_array[i] = (float) k;
//std::cout << h_keys[i] << " ";
}
//std::cout << std::endl;
//-----------------------------------------------------------------------//
std::cout << "no-repeat keys in order\n";
thrust::host_vector<int> h_norep_keys;
int prev, curr;
for(int i=0; i<Len; i++) {
if(i==0) {
prev = h_keys[i];
h_norep_keys.push_back(prev);
continue;
}
curr = h_keys[i];
if(curr != prev) {
h_norep_keys.push_back(curr);
prev = curr;
}
}
std::cout << "no repeat keys size = " << h_norep_keys.size() << std::endl;
if(h_norep_keys.size() != Ksize) {
std::cerr << "Ksize is not equal to norep_keys.size()\n";
return -1;
}
for(int i=0; i<h_norep_keys.size(); i++) {
std::cout << h_norep_keys[i] << " ";
}
std::cout << std::endl;
//-----------------------------------------------------------------------//
std::cout << "\ncopy host to device\n";
thrust::device_vector<int> d_keys=h_keys;
thrust::device_vector<int> d_norep_keys=h_norep_keys;
thrust::device_vector<float> d_array=h_array;
//-----------------------------------------------------------------------//
std::cout << "\ntesting thrust::reduce_by_key\n";
thrust_reduce_by_key(d_keys, d_array, Ksize, Len);
//-----------------------------------------------------------------------//
std::cout << "\ntesting customized reduce_by_key (v1)\n";
reduce_by_key_v1(d_keys, d_norep_keys, d_array, Ksize, Len);
return 0;
}
void thrust_reduce_by_key(thrust::device_vector<int> &d_keys,
thrust::device_vector<float> &d_array,
const int Ksize, const int Len)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float local_ms;
float gputime_ms = 0.f;
// initialize output
thrust::device_vector<int> d_out_keys(Ksize, 0);
thrust::device_vector<float> d_out_vals(Ksize, 0.f);
for(int reps=0; reps<100; reps++)
{
local_ms = 0.f;
cudaEventRecord(start, 0);
thrust::reduce_by_key(d_keys.begin(),
d_keys.end(),
d_array.begin(),
d_out_keys.begin(),
d_out_vals.begin()
);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&local_ms, start, stop);
gputime_ms += local_ms;
}
printf("(thrust::reduce_by_key) runtime = %lf (ms)\n", gputime_ms * 0.01);
thrust::host_vector<int> h_out_keys;
thrust::host_vector<float> h_out_vals;
h_out_keys = d_out_keys;
h_out_vals = d_out_vals;
//---------------------//
// check output
//---------------------//
std::cout << "\noutput keys\n";
for(int i=0; i<h_out_keys.size(); i++) {
std::cout << h_out_keys[i] << " ";
}
std::cout << std::endl;
std::cout << "\noutput vals\n";
for(int i=0; i<h_out_vals.size(); i++) {
std::cout << h_out_vals[i] << " ";
}
std::cout << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
__global__ void reduce_by_key_kernel_v1 (int* g_keys,
int* g_norep_keys,
float* g_input,
const int Ksize,
const int Len,
float* g_output)
{
__shared__ int sm_keys[256];
__shared__ float sm_vals[256];
int lid = threadIdx.x;
if(lid < Ksize) { // 124
sm_keys[lid] = g_norep_keys[lid];
sm_vals[lid] = 0.f;
}
__syncthreads();
if(lid < Len) {
int my_key = g_keys[lid];
float my_val = g_input[lid];
for(int i=0; i<Ksize; i++) {
if(my_key == sm_keys[i]) {
atomicAdd(&sm_vals[i], my_val);
break;
}
}
}
__syncthreads();
if(lid < Ksize) {
g_output[lid] = sm_vals[lid];
}
}
__global__ void reduce_by_key_kernel_more (int* g_keys,
int* g_norep_keys,
float* g_input,
const int Ksize,
const int Len,
float* g_output)
{
__shared__ int sm_keys[1024];
__shared__ float sm_vals[1024];
int lid = threadIdx.x;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
// norep_keys should be smaller than 1024, and the size of shared memory
if(lid < Ksize) {
sm_keys[lid] = g_norep_keys[lid];
sm_vals[lid] = 0.f;
}
__syncthreads();
if(gid < Len) {
int my_key = g_keys[gid];
float my_val = g_input[gid];
for(int i=0; i<Ksize; i++) {
if(my_key == sm_keys[i]) {
atomicAdd(&sm_vals[i], my_val);
break;
}
}
}
__syncthreads();
if(lid < Ksize) {
atomicAdd(&g_output[lid], sm_vals[lid]);
}
}
void reduce_by_key_v1(thrust::device_vector<int> &d_keys,
thrust::device_vector<int> &d_norep_keys,
thrust::device_vector<float> &d_array,
const int Ksize, const int Len)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float local_ms;
float gputime_ms = 0.f;
// initialize output
thrust::device_vector<float> d_out_vals(Ksize, 0.f);
int *g_keys = thrust::raw_pointer_cast(d_keys.data());
int *g_norep_keys = thrust::raw_pointer_cast(d_norep_keys.data());
float *g_input = thrust::raw_pointer_cast(d_array.data());
float *g_output = thrust::raw_pointer_cast(d_out_vals.data());
if(Len > 1024) {
dim3 Grds(1,1,1);
dim3 Blks(1024,1,1);
Grds.x = (int) (Len + 1023) / 1024;
// Measure the runtime
for(int reps=0; reps<100; reps++)
{
thrust::fill(d_out_vals.begin(), d_out_vals.end(), 0.f);
local_ms = 0.f;
cudaEventRecord(start, 0);
reduce_by_key_kernel_more <<< Grds, Blks >>> (g_keys,
g_norep_keys,
g_input,
Ksize,
Len,
g_output // this needs to be initialize to zeros
);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&local_ms, start, stop);
gputime_ms += local_ms;
}
printf("(reduce_by_key_more) runtime = %lf (ms)\n", gputime_ms * 0.01);
}else{
dim3 Grds(1,1,1);
dim3 Blks(Len,1,1);
// Measure the runtime
for(int reps=0; reps<100; reps++)
{
local_ms = 0.f;
cudaEventRecord(start, 0);
// NOTE: need to write a template for customization
reduce_by_key_kernel_v1 <<< Grds, Blks >>> (g_keys,
g_norep_keys,
g_input,
Ksize,
Len,
g_output
);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&local_ms, start, stop);
gputime_ms += local_ms;
}
printf("(reduce_by_key_v1) runtime = %lf (ms)\n", gputime_ms * 0.01);
}
thrust::host_vector<float> h_out_vals;
h_out_vals = d_out_vals;
//---------------------//
// check output
//---------------------//
std::cout << "\noutput vals\n";
for(int i=0; i<h_out_vals.size(); i++) {
std::cout << h_out_vals[i] << " ";
}
std::cout << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/fill.h>
// examples
// [1] https://github.com/thrust/thrust/wiki/Quick-Start-Guide
void thrust_reduce_by_key(thrust::device_vector<int> &d_keys,
thrust::device_vector<float> &d_array,
const int Ksize, const int Len);
void reduce_by_key_v1(thrust::device_vector<int> &d_keys,
thrust::device_vector<int> &d_norep_keys,
thrust::device_vector<float> &d_array,
const int Ksize, const int Len);
__global__ void reduce_by_key_kernel_v1 (int* g_keys,
int* g_norep_keys,
float* g_input,
const int Ksize,
const int Len,
float* g_output);
__global__ void reduce_by_key_kernel_more (int* g_keys,
int* g_norep_keys,
float* g_input,
const int Ksize,
const int Len,
float* g_output);
int main(void)
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("Device name: %s\n", prop.name);
hipSetDevice(0);
int major = THRUST_MAJOR_VERSION;
int minor = THRUST_MINOR_VERSION;
std::cout << "Thrust v" << major << "." << minor << std::endl;
//-----------------------------------------------------------------------//
std::cout << "initialize keys on the host\n";
const int Len = 2047;
const int Ksize = 124;
int REAP = Len / Ksize;
thrust::host_vector<int> h_keys(Len);
thrust::host_vector<float> h_array(Len);
for(int i=0; i<Len; i++) {
//int ii = i / 7;
int ii = i / REAP;
int k = Ksize - (ii + 1); // repeat the value
if(k <0) k =0;
h_keys[i] = k;
h_array[i] = (float) k;
//std::cout << h_keys[i] << " ";
}
//std::cout << std::endl;
//-----------------------------------------------------------------------//
std::cout << "no-repeat keys in order\n";
thrust::host_vector<int> h_norep_keys;
int prev, curr;
for(int i=0; i<Len; i++) {
if(i==0) {
prev = h_keys[i];
h_norep_keys.push_back(prev);
continue;
}
curr = h_keys[i];
if(curr != prev) {
h_norep_keys.push_back(curr);
prev = curr;
}
}
std::cout << "no repeat keys size = " << h_norep_keys.size() << std::endl;
if(h_norep_keys.size() != Ksize) {
std::cerr << "Ksize is not equal to norep_keys.size()\n";
return -1;
}
for(int i=0; i<h_norep_keys.size(); i++) {
std::cout << h_norep_keys[i] << " ";
}
std::cout << std::endl;
//-----------------------------------------------------------------------//
std::cout << "\ncopy host to device\n";
thrust::device_vector<int> d_keys=h_keys;
thrust::device_vector<int> d_norep_keys=h_norep_keys;
thrust::device_vector<float> d_array=h_array;
//-----------------------------------------------------------------------//
std::cout << "\ntesting thrust::reduce_by_key\n";
thrust_reduce_by_key(d_keys, d_array, Ksize, Len);
//-----------------------------------------------------------------------//
std::cout << "\ntesting customized reduce_by_key (v1)\n";
reduce_by_key_v1(d_keys, d_norep_keys, d_array, Ksize, Len);
return 0;
}
void thrust_reduce_by_key(thrust::device_vector<int> &d_keys,
thrust::device_vector<float> &d_array,
const int Ksize, const int Len)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float local_ms;
float gputime_ms = 0.f;
// initialize output
thrust::device_vector<int> d_out_keys(Ksize, 0);
thrust::device_vector<float> d_out_vals(Ksize, 0.f);
for(int reps=0; reps<100; reps++)
{
local_ms = 0.f;
hipEventRecord(start, 0);
thrust::reduce_by_key(d_keys.begin(),
d_keys.end(),
d_array.begin(),
d_out_keys.begin(),
d_out_vals.begin()
);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&local_ms, start, stop);
gputime_ms += local_ms;
}
printf("(thrust::reduce_by_key) runtime = %lf (ms)\n", gputime_ms * 0.01);
thrust::host_vector<int> h_out_keys;
thrust::host_vector<float> h_out_vals;
h_out_keys = d_out_keys;
h_out_vals = d_out_vals;
//---------------------//
// check output
//---------------------//
std::cout << "\noutput keys\n";
for(int i=0; i<h_out_keys.size(); i++) {
std::cout << h_out_keys[i] << " ";
}
std::cout << std::endl;
std::cout << "\noutput vals\n";
for(int i=0; i<h_out_vals.size(); i++) {
std::cout << h_out_vals[i] << " ";
}
std::cout << std::endl;
hipEventDestroy(start);
hipEventDestroy(stop);
}
__global__ void reduce_by_key_kernel_v1 (int* g_keys,
int* g_norep_keys,
float* g_input,
const int Ksize,
const int Len,
float* g_output)
{
__shared__ int sm_keys[256];
__shared__ float sm_vals[256];
int lid = threadIdx.x;
if(lid < Ksize) { // 124
sm_keys[lid] = g_norep_keys[lid];
sm_vals[lid] = 0.f;
}
__syncthreads();
if(lid < Len) {
int my_key = g_keys[lid];
float my_val = g_input[lid];
for(int i=0; i<Ksize; i++) {
if(my_key == sm_keys[i]) {
atomicAdd(&sm_vals[i], my_val);
break;
}
}
}
__syncthreads();
if(lid < Ksize) {
g_output[lid] = sm_vals[lid];
}
}
__global__ void reduce_by_key_kernel_more (int* g_keys,
int* g_norep_keys,
float* g_input,
const int Ksize,
const int Len,
float* g_output)
{
__shared__ int sm_keys[1024];
__shared__ float sm_vals[1024];
int lid = threadIdx.x;
int gid = threadIdx.x + blockIdx.x * blockDim.x;
// norep_keys should be smaller than 1024, and the size of shared memory
if(lid < Ksize) {
sm_keys[lid] = g_norep_keys[lid];
sm_vals[lid] = 0.f;
}
__syncthreads();
if(gid < Len) {
int my_key = g_keys[gid];
float my_val = g_input[gid];
for(int i=0; i<Ksize; i++) {
if(my_key == sm_keys[i]) {
atomicAdd(&sm_vals[i], my_val);
break;
}
}
}
__syncthreads();
if(lid < Ksize) {
atomicAdd(&g_output[lid], sm_vals[lid]);
}
}
void reduce_by_key_v1(thrust::device_vector<int> &d_keys,
thrust::device_vector<int> &d_norep_keys,
thrust::device_vector<float> &d_array,
const int Ksize, const int Len)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float local_ms;
float gputime_ms = 0.f;
// initialize output
thrust::device_vector<float> d_out_vals(Ksize, 0.f);
int *g_keys = thrust::raw_pointer_cast(d_keys.data());
int *g_norep_keys = thrust::raw_pointer_cast(d_norep_keys.data());
float *g_input = thrust::raw_pointer_cast(d_array.data());
float *g_output = thrust::raw_pointer_cast(d_out_vals.data());
if(Len > 1024) {
dim3 Grds(1,1,1);
dim3 Blks(1024,1,1);
Grds.x = (int) (Len + 1023) / 1024;
// Measure the runtime
for(int reps=0; reps<100; reps++)
{
thrust::fill(d_out_vals.begin(), d_out_vals.end(), 0.f);
local_ms = 0.f;
hipEventRecord(start, 0);
reduce_by_key_kernel_more <<< Grds, Blks >>> (g_keys,
g_norep_keys,
g_input,
Ksize,
Len,
g_output // this needs to be initialize to zeros
);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&local_ms, start, stop);
gputime_ms += local_ms;
}
printf("(reduce_by_key_more) runtime = %lf (ms)\n", gputime_ms * 0.01);
}else{
dim3 Grds(1,1,1);
dim3 Blks(Len,1,1);
// Measure the runtime
for(int reps=0; reps<100; reps++)
{
local_ms = 0.f;
hipEventRecord(start, 0);
// NOTE: need to write a template for customization
reduce_by_key_kernel_v1 <<< Grds, Blks >>> (g_keys,
g_norep_keys,
g_input,
Ksize,
Len,
g_output
);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&local_ms, start, stop);
gputime_ms += local_ms;
}
printf("(reduce_by_key_v1) runtime = %lf (ms)\n", gputime_ms * 0.01);
}
thrust::host_vector<float> h_out_vals;
h_out_vals = d_out_vals;
//---------------------//
// check output
//---------------------//
std::cout << "\noutput vals\n";
for(int i=0; i<h_out_vals.size(); i++) {
std::cout << h_out_vals[i] << " ";
}
std::cout << std::endl;
hipEventDestroy(start);
hipEventDestroy(stop);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include <cstdlib>
#include <cassert>
#include <zlib.h>
#include <png.h>
#include <queue>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <vector>
using namespace std;
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
#define NUM_THREAD 512
__constant__ int _mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int read_png(const char* filename, unsigned char** image, unsigned* height,
unsigned* width, unsigned* channels) {
unsigned char sig[8];
FILE* infile;
infile = fopen(filename, "rb");
fread(sig, 1, 8, infile);
if (!png_check_sig(sig, 8))
return 1; /* bad signature */
png_structp png_ptr;
png_infop info_ptr;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr)
return 4; /* out of memory */
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return 4; /* out of memory */
}
png_init_io(png_ptr, infile);
png_set_sig_bytes(png_ptr, 8);
png_read_info(png_ptr, info_ptr);
int bit_depth, color_type;
png_get_IHDR(png_ptr, info_ptr, width, height, &bit_depth, &color_type, NULL, NULL, NULL);
png_uint_32 i, rowbytes;
png_bytep row_pointers[*height];
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
*channels = (int) png_get_channels(png_ptr, info_ptr);
if ((*image = (unsigned char *) malloc(rowbytes * *height)) == NULL) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return 3;
}
for (i = 0; i < *height; ++i)
row_pointers[i] = *image + i * rowbytes;
png_read_image(png_ptr, row_pointers);
png_read_end(png_ptr, NULL);
return 0;
}
void write_png(const char* filename, png_bytep image, const unsigned height, const unsigned width,
const unsigned channels) {
FILE* fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct(png_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
png_bytep row_ptr[height];
for (int i = 0; i < height; ++ i) {
row_ptr[i] = image + i * width * channels * sizeof(unsigned char);
}
png_write_image(png_ptr, row_ptr);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
}
__global__ void sobel (unsigned char* s, unsigned char* t, unsigned height, unsigned width, unsigned channels, size_t pitch, int y) {
int x, i, v, u;
int R, G, B;
double val[MASK_N*3] = {0.0};
const int adjustX=1, adjustY=1, xBound=2, yBound=2;
x = (blockIdx.x * NUM_THREAD + threadIdx.x)%width;
__shared__ int mask[MASK_N][MASK_X][MASK_Y];
mask[(x/25)%2][(x/5)%5][x%5] = _mask[(x/25)%2][(x/5)%5][x%5];
__syncthreads();
for (i = 0; i < MASK_N; ++i) {
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; ++v) {
#pragma unroll 5
for (u = -xBound; u < xBound + adjustX; ++u) {
int valid = (x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height;
R = s[((pitch * (y+v) + (x+u)*channels) + 2)*valid]*valid;
G = s[((pitch * (y+v) + (x+u)*channels) + 1)*valid]*valid;
B = s[((pitch * (y+v) + (x+u)*channels) + 0)*valid]*valid;
val[i*3+2] += R * mask[i][u + xBound][v + yBound];
val[i*3+1] += G * mask[i][u + xBound][v + yBound];
val[i*3+0] += B * mask[i][u + xBound][v + yBound];
}
}
}
double totalR = 0.0;
double totalG = 0.0;
double totalB = 0.0;
#pragma unroll 5
for (i = 0; i < MASK_N; ++i) {
totalR += val[i * 3 + 2] * val[i * 3 + 2];
totalG += val[i * 3 + 1] * val[i * 3 + 1];
totalB += val[i * 3 + 0] * val[i * 3 + 0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = min(255.0,totalR);
const unsigned char cG = min(255.0,totalG);
const unsigned char cB = min(255.0,totalB);
t[(pitch * y + x*channels) + 2] = cR;
t[(pitch * y + x*channels) + 1] = cG;
t[(pitch * y + x*channels) + 0] = cB;
}
queue<int> q;
mutex qLock;
mutex cvLock;
condition_variable cv;
void png_writer(int n,png_structp png_ptr,png_bytep* row_ptr,unsigned char* host_t,unsigned char* device_t,size_t pitch,int width,int height,int channels){
while(n--){
if(q.empty()){
unique_lock<mutex> ul(cvLock);
cv.wait(ul);
}
qLock.lock();
int i = q.front();
q.pop();
qLock.unlock();
// cudaMemcpy2D(host_t+sizeof(char)*width*channels*i,sizeof(char)*width*channels,device_t+pitch*i,pitch,width*channels,1,cudaMemcpyDeviceToHost);
png_write_row(png_ptr,host_t + i * width * channels * sizeof(unsigned char));
}
}
int main(int argc, char** argv) {
assert(argc == 3);
unsigned height, width, channels;
unsigned char* host_s = NULL;
read_png(argv[1], &host_s, &height, &width, &channels);
unsigned char* host_t = (unsigned char*) malloc(height * width * channels * sizeof(unsigned char));
unsigned char* device_s;
unsigned char* device_t;
size_t pitch;
cudaMallocPitch(&device_s,&pitch,sizeof(unsigned char)*width*channels,height);
cudaMallocPitch(&device_t,&pitch,sizeof(unsigned char)*width*channels,height);
FILE* fp = fopen(argv[2], "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct(png_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
png_bytep* row_ptr = new png_bytep[height];
for (int i = 0; i < height; ++ i) {
row_ptr[i] = host_t + i * width * channels * sizeof(unsigned char);
}
thread t(png_writer,height,png_ptr,row_ptr,host_t,device_t,pitch,width,height,channels);
cudaStream_t streams[height];
cudaMemcpy2D(device_s,pitch,host_s,sizeof(char)*width*channels,width*channels,height,cudaMemcpyHostToDevice);
for(int i=0;i<height;i++){
cudaStreamCreate(&streams[i]);
sobel<<<width/NUM_THREAD+1,NUM_THREAD,sizeof(int)*50,streams[i]>>> (device_s, device_t, height, width, channels, pitch, i);
cudaMemcpy2DAsync(host_t+sizeof(char)*width*channels*i,sizeof(char)*width*channels,device_t+pitch*i,pitch,width*channels,1,cudaMemcpyDeviceToHost,streams[i]); // lock_guard<mutex> lg(qLock);
// q.push(i);
// cv.notify_all();
}
for(int i=0;i<height;i++){
cudaStreamSynchronize(streams[i]);
q.push(i);
cv.notify_all();
}
t.join();
cudaFree(device_s);
cudaFree(device_t);
// png_write_image(png_ptr, row_ptr);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include <cstdlib>
#include <cassert>
#include <zlib.h>
#include <png.h>
#include <queue>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <vector>
using namespace std;
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
#define NUM_THREAD 512
__constant__ int _mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int read_png(const char* filename, unsigned char** image, unsigned* height,
unsigned* width, unsigned* channels) {
unsigned char sig[8];
FILE* infile;
infile = fopen(filename, "rb");
fread(sig, 1, 8, infile);
if (!png_check_sig(sig, 8))
return 1; /* bad signature */
png_structp png_ptr;
png_infop info_ptr;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr)
return 4; /* out of memory */
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return 4; /* out of memory */
}
png_init_io(png_ptr, infile);
png_set_sig_bytes(png_ptr, 8);
png_read_info(png_ptr, info_ptr);
int bit_depth, color_type;
png_get_IHDR(png_ptr, info_ptr, width, height, &bit_depth, &color_type, NULL, NULL, NULL);
png_uint_32 i, rowbytes;
png_bytep row_pointers[*height];
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
*channels = (int) png_get_channels(png_ptr, info_ptr);
if ((*image = (unsigned char *) malloc(rowbytes * *height)) == NULL) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return 3;
}
for (i = 0; i < *height; ++i)
row_pointers[i] = *image + i * rowbytes;
png_read_image(png_ptr, row_pointers);
png_read_end(png_ptr, NULL);
return 0;
}
void write_png(const char* filename, png_bytep image, const unsigned height, const unsigned width,
const unsigned channels) {
FILE* fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct(png_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
png_bytep row_ptr[height];
for (int i = 0; i < height; ++ i) {
row_ptr[i] = image + i * width * channels * sizeof(unsigned char);
}
png_write_image(png_ptr, row_ptr);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
}
__global__ void sobel (unsigned char* s, unsigned char* t, unsigned height, unsigned width, unsigned channels, size_t pitch, int y) {
int x, i, v, u;
int R, G, B;
double val[MASK_N*3] = {0.0};
const int adjustX=1, adjustY=1, xBound=2, yBound=2;
x = (blockIdx.x * NUM_THREAD + threadIdx.x)%width;
__shared__ int mask[MASK_N][MASK_X][MASK_Y];
mask[(x/25)%2][(x/5)%5][x%5] = _mask[(x/25)%2][(x/5)%5][x%5];
__syncthreads();
for (i = 0; i < MASK_N; ++i) {
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; ++v) {
#pragma unroll 5
for (u = -xBound; u < xBound + adjustX; ++u) {
int valid = (x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height;
R = s[((pitch * (y+v) + (x+u)*channels) + 2)*valid]*valid;
G = s[((pitch * (y+v) + (x+u)*channels) + 1)*valid]*valid;
B = s[((pitch * (y+v) + (x+u)*channels) + 0)*valid]*valid;
val[i*3+2] += R * mask[i][u + xBound][v + yBound];
val[i*3+1] += G * mask[i][u + xBound][v + yBound];
val[i*3+0] += B * mask[i][u + xBound][v + yBound];
}
}
}
double totalR = 0.0;
double totalG = 0.0;
double totalB = 0.0;
#pragma unroll 5
for (i = 0; i < MASK_N; ++i) {
totalR += val[i * 3 + 2] * val[i * 3 + 2];
totalG += val[i * 3 + 1] * val[i * 3 + 1];
totalB += val[i * 3 + 0] * val[i * 3 + 0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = min(255.0,totalR);
const unsigned char cG = min(255.0,totalG);
const unsigned char cB = min(255.0,totalB);
t[(pitch * y + x*channels) + 2] = cR;
t[(pitch * y + x*channels) + 1] = cG;
t[(pitch * y + x*channels) + 0] = cB;
}
queue<int> q;
mutex qLock;
mutex cvLock;
condition_variable cv;
void png_writer(int n,png_structp png_ptr,png_bytep* row_ptr,unsigned char* host_t,unsigned char* device_t,size_t pitch,int width,int height,int channels){
while(n--){
if(q.empty()){
unique_lock<mutex> ul(cvLock);
cv.wait(ul);
}
qLock.lock();
int i = q.front();
q.pop();
qLock.unlock();
// cudaMemcpy2D(host_t+sizeof(char)*width*channels*i,sizeof(char)*width*channels,device_t+pitch*i,pitch,width*channels,1,cudaMemcpyDeviceToHost);
png_write_row(png_ptr,host_t + i * width * channels * sizeof(unsigned char));
}
}
int main(int argc, char** argv) {
assert(argc == 3);
unsigned height, width, channels;
unsigned char* host_s = NULL;
read_png(argv[1], &host_s, &height, &width, &channels);
unsigned char* host_t = (unsigned char*) malloc(height * width * channels * sizeof(unsigned char));
unsigned char* device_s;
unsigned char* device_t;
size_t pitch;
hipMallocPitch(&device_s,&pitch,sizeof(unsigned char)*width*channels,height);
hipMallocPitch(&device_t,&pitch,sizeof(unsigned char)*width*channels,height);
FILE* fp = fopen(argv[2], "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct(png_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
png_bytep* row_ptr = new png_bytep[height];
for (int i = 0; i < height; ++ i) {
row_ptr[i] = host_t + i * width * channels * sizeof(unsigned char);
}
thread t(png_writer,height,png_ptr,row_ptr,host_t,device_t,pitch,width,height,channels);
hipStream_t streams[height];
hipMemcpy2D(device_s,pitch,host_s,sizeof(char)*width*channels,width*channels,height,hipMemcpyHostToDevice);
for(int i=0;i<height;i++){
hipStreamCreate(&streams[i]);
sobel<<<width/NUM_THREAD+1,NUM_THREAD,sizeof(int)*50,streams[i]>>> (device_s, device_t, height, width, channels, pitch, i);
hipMemcpy2DAsync(host_t+sizeof(char)*width*channels*i,sizeof(char)*width*channels,device_t+pitch*i,pitch,width*channels,1,hipMemcpyDeviceToHost,streams[i]); // lock_guard<mutex> lg(qLock);
// q.push(i);
// cv.notify_all();
}
for(int i=0;i<height;i++){
hipStreamSynchronize(streams[i]);
q.push(i);
cv.notify_all();
}
t.join();
hipFree(device_s);
hipFree(device_t);
// png_write_image(png_ptr, row_ptr);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void ExactResampleKernel_1toN(float *input, float *output, int inputWidth, int inputHeight, int outputWidth, int outputHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = outputWidth * outputHeight;
if (id < size)
{
//output point coordinates
int px = id % outputWidth;
int py = id / outputWidth;
int xRatio = outputWidth / inputWidth;
int yRatio = outputHeight / inputHeight;
//corresponding coordinates in the original image
int x = px / xRatio;
int y = py / yRatio;
output[py * outputWidth + px] = input[y * inputWidth + x];
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void ExactResampleKernel_1toN(float *input, float *output, int inputWidth, int inputHeight, int outputWidth, int outputHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = outputWidth * outputHeight;
if (id < size)
{
//output point coordinates
int px = id % outputWidth;
int py = id / outputWidth;
int xRatio = outputWidth / inputWidth;
int yRatio = outputHeight / inputHeight;
//corresponding coordinates in the original image
int x = px / xRatio;
int y = py / yRatio;
output[py * outputWidth + px] = input[y * inputWidth + x];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
// questions:
// 1. if I had a bunch of vectors to add in succession, would there be any benefit in keeping the memory allocated
// and doing all the additions before freeing memory? What would be the impact?
// 2. given that thread creation has some overhead, would it make sense to do multiple additions per thread instead of one? What is optimal num_add?
// todo: modify this code and use timer to see which parameters are optimal for card
// 3. if i pass a device memory location into *result for cudaAdd, does it store the result on the card? (could probs google this, but... todo: try it out)
__global__ void addKernel(float *x1, float*x2, float *result, int length){
//get unique index on which to compute
int index = threadIdx.x + blockIdx.x * blockDim.x;
//add vectors on this index
if(index < length){
result[index] = x1[index] + x2[index];
}
}
void cudaAdd(const float *x1, const float *x2, float *result, int length){
//set block size, i.e. number of threads in each block
int block_size = 512;
//set grid size, i.e. number of blocks
int grid_size = length / block_size + 1;
//allocate some memory on the device
float *d_x1, *d_x2, *d_result;
size_t num_bytes = length*sizeof(float);
cudaMalloc(&d_x1, num_bytes);
cudaMalloc(&d_x2, num_bytes);
cudaMalloc(&d_result, num_bytes);
//copy data to the device
cudaMemcpy(&d_x1, &x1, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(&d_x2, &x2, num_bytes, cudaMemcpyHostToDevice);
//start the kernel
addKernel<<<grid_size, block_size>>>(d_x1, d_x2, d_result, length);
//copy data back from the device
cudaMemcpy(d_result, result, num_bytes, cudaMemcpyDeviceToHost);
//free memory
cudaFree(d_x1);
cudaFree(d_x2);
cudaFree(d_result);
}
void serialAdd(const float *x1, const float *x2, float *result, int length){
for(int i = 0; i < length; i++){
result[i] = x1[i] + x2[i];
}
}
int main(){
//create and initialize vectors
int size = 60;
int num_bytes = size * sizeof(float);
float *x1 = (float*) malloc(num_bytes);
float *x2 = (float*) malloc(num_bytes);
float *result = (float*) malloc(num_bytes);
for(int i = 0; i < size; i++){
x1[i] = i;
x2[i] = i*i;
}
//call cudaAdd
cudaAdd(x1, x2, result, size);
//print result
for(int i = 0; i < size; i++){
printf("%f", result[i]);
printf("\n");
}
//free allocated memory
free(x1);
free(x2);
free(result);
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
// questions:
// 1. if I had a bunch of vectors to add in succession, would there be any benefit in keeping the memory allocated
// and doing all the additions before freeing memory? What would be the impact?
// 2. given that thread creation has some overhead, would it make sense to do multiple additions per thread instead of one? What is optimal num_add?
// todo: modify this code and use timer to see which parameters are optimal for card
// 3. if i pass a device memory location into *result for cudaAdd, does it store the result on the card? (could probs google this, but... todo: try it out)
__global__ void addKernel(float *x1, float*x2, float *result, int length){
//get unique index on which to compute
int index = threadIdx.x + blockIdx.x * blockDim.x;
//add vectors on this index
if(index < length){
result[index] = x1[index] + x2[index];
}
}
void cudaAdd(const float *x1, const float *x2, float *result, int length){
//set block size, i.e. number of threads in each block
int block_size = 512;
//set grid size, i.e. number of blocks
int grid_size = length / block_size + 1;
//allocate some memory on the device
float *d_x1, *d_x2, *d_result;
size_t num_bytes = length*sizeof(float);
hipMalloc(&d_x1, num_bytes);
hipMalloc(&d_x2, num_bytes);
hipMalloc(&d_result, num_bytes);
//copy data to the device
hipMemcpy(&d_x1, &x1, num_bytes, hipMemcpyHostToDevice);
hipMemcpy(&d_x2, &x2, num_bytes, hipMemcpyHostToDevice);
//start the kernel
addKernel<<<grid_size, block_size>>>(d_x1, d_x2, d_result, length);
//copy data back from the device
hipMemcpy(d_result, result, num_bytes, hipMemcpyDeviceToHost);
//free memory
hipFree(d_x1);
hipFree(d_x2);
hipFree(d_result);
}
void serialAdd(const float *x1, const float *x2, float *result, int length){
for(int i = 0; i < length; i++){
result[i] = x1[i] + x2[i];
}
}
int main(){
//create and initialize vectors
int size = 60;
int num_bytes = size * sizeof(float);
float *x1 = (float*) malloc(num_bytes);
float *x2 = (float*) malloc(num_bytes);
float *result = (float*) malloc(num_bytes);
for(int i = 0; i < size; i++){
x1[i] = i;
x2[i] = i*i;
}
//call cudaAdd
cudaAdd(x1, x2, result, size);
//print result
for(int i = 0; i < size; i++){
printf("%f", result[i]);
printf("\n");
}
//free allocated memory
free(x1);
free(x2);
free(result);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void addKernel(int *c, const int *a, const int *b, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void addKernel(int *c, const int *a, const int *b, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
using namespace std;
void SimpleIntegerAdd();
void TestIntegerArrayAdd();
int main(int argc, char *argv[])
{
// SimpleIntegerAdd();
TestIntegerArrayAdd();
}
|
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
void SimpleIntegerAdd();
void TestIntegerArrayAdd();
int main(int argc, char *argv[])
{
// SimpleIntegerAdd();
TestIntegerArrayAdd();
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
//vector
int main(void)
{
// H has storage for 4 integers
thrust::host_vector<int> H(4);
// initialize individual elements
H[0] = 14;
H[1] = 20;
H[2] = 38;
H[3] = 46;
// H.size() returns the size of vector H
std::cout << "H has size " << H.size() << "\n";
// print contents of H
for (int i = 0; i < H.size(); i++)
std::cout << "H[" << i << "] = " << H[i] << "\n";
// resize H
H.resize(2);
std::cout << "H now has size " << H.size() << "\n";
// Copy host_vector H to device_vector D
thrust::device_vector<int> D = H;
// elements of D can be modified
D[0] = 99;
D[1] = 88;
// print contents of D
for (int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << "\n";
// H and D are automatically deleted when the function returns
return 0;
}
//*/
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <iostream>
int main(void)
{
int i(0);
thrust::device_vector<int> D(10, 1); // initialize all ten integers of a device_vector to 1
thrust::fill(D.begin() + 3, D.begin() + 6, 0); // set the first seven elements of a vector to 9
thrust::host_vector<int> H(D.size());
thrust::copy(D.begin(), D.end(), H.begin()); // copy all of H back to the beginning of D
thrust::sequence(H.begin(), H.end()); // set the elements of H to 0, 1, 2, 3, ...
// print D
for (thrust::device_vector <int>::const_iterator it = D.begin(); it != D.end(); ++it)
{
std::cout << "D[" << i << "] = " << *it << "\t";
i++;
}
for (int i = 0; i < H.size(); i++)
std::cout << "H[" << i << "] = " << H[i] << "\t";
return 0;
}
|
/*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <iostream>
//vector
int main(void)
{
// H has storage for 4 integers
thrust::host_vector<int> H(4);
// initialize individual elements
H[0] = 14;
H[1] = 20;
H[2] = 38;
H[3] = 46;
// H.size() returns the size of vector H
std::cout << "H has size " << H.size() << "\n";
// print contents of H
for (int i = 0; i < H.size(); i++)
std::cout << "H[" << i << "] = " << H[i] << "\n";
// resize H
H.resize(2);
std::cout << "H now has size " << H.size() << "\n";
// Copy host_vector H to device_vector D
thrust::device_vector<int> D = H;
// elements of D can be modified
D[0] = 99;
D[1] = 88;
// print contents of D
for (int i = 0; i < D.size(); i++)
std::cout << "D[" << i << "] = " << D[i] << "\n";
// H and D are automatically deleted when the function returns
return 0;
}
//*/
#include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <iostream>
int main(void)
{
int i(0);
thrust::device_vector<int> D(10, 1); // initialize all ten integers of a device_vector to 1
thrust::fill(D.begin() + 3, D.begin() + 6, 0); // set the first seven elements of a vector to 9
thrust::host_vector<int> H(D.size());
thrust::copy(D.begin(), D.end(), H.begin()); // copy all of H back to the beginning of D
thrust::sequence(H.begin(), H.end()); // set the elements of H to 0, 1, 2, 3, ...
// print D
for (thrust::device_vector <int>::const_iterator it = D.begin(); it != D.end(); ++it)
{
std::cout << "D[" << i << "] = " << *it << "\t";
i++;
}
for (int i = 0; i < H.size(); i++)
std::cout << "H[" << i << "] = " << H[i] << "\t";
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <sys/time.h>
using namespace std;
// ############### COMMON ###############
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
inline double seconds()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
void initialData(double *data, int size)
// Khởi tạo vector ban đầu có kích thước size, kiểu double và giá random trong khoảng [0, 1]
{
srand(0);
for (int i = 0; i < size; i++)
{
data[i] = (double)(rand()) / RAND_MAX;
}
}
double sumGPU(double *data, int size){
double sum = 0;
for(int i = 0; i < size; i++){
sum += data[i];
}
return sum;
}
// ############### Device(CPU) ###############
// Hàm thực hiện reduce trên CPU
double recursiveReduce(double *data, int const size)
{
if (size == 1) return data[0];
int const stride = size / 2;
for (int i = 0; i < stride; i++){
data[i] += data[i + stride];
}
return recursiveReduce(data, stride);
}
// ############### Device(GPU) ###############
// Neighbored Pair phân kỳ
__global__ void reduceNeighbored (double *g_idata, double *g_odata,
unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x;
// Kiểm tra nếu vượt qua kích thước mảng
if (idx >= n) return;
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// Neighbored Pair cài đặt với ít phân kỳ bằng cách thực thi trong một block
__global__ void reduceNeighboredLess (double *g_idata, double *g_odata,
unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x;
// Kiểm tra nếu vượt qua kích thước mảng
if(idx >= n) return;
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
// Chuyển tid sang bộ nhớ của một block (register - thanh ghi)
int index = 2 * stride * tid;
if (index < blockDim.x)
{
idata[index] += idata[index + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// Interleaved Pair Implementation with less divergence
__global__ void reduceInterleaved (double *g_idata, double *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x;
// Kiểm tra nếu vượt qua kích thước mảng
if(idx >= n) return;
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling2 (double *g_idata, double *g_odata,
unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2 data blocks
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
// Đồng bộ hóa các group data trong 2 thread kết cận
__syncthreads();
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling4(double *g_idata, double *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4
if (idx + 3 * blockDim.x < n)
{
double a1 = g_idata[idx];
double a2 = g_idata[idx + blockDim.x];
double a3 = g_idata[idx + 2 * blockDim.x];
double a4 = g_idata[idx + 3 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4;
}
__syncthreads(); // Đồng bộ hóa các group data trong 4 thread kết cận
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling8 (double *g_idata, double *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
double *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
double a1 = g_idata[idx];
double a2 = g_idata[idx + blockDim.x];
double a3 = g_idata[idx + 2 * blockDim.x];
double a4 = g_idata[idx + 3 * blockDim.x];
double b1 = g_idata[idx + 4 * blockDim.x];
double b2 = g_idata[idx + 5 * blockDim.x];
double b3 = g_idata[idx + 6 * blockDim.x];
double b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads(); // Đồng bộ hóa các group data trong 8 thread kết cận
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1){
if (tid < stride){
idata[tid] += idata[tid + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrollWarps8 (double *g_idata, double *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
double *idata = g_idata + blockIdx.x * blockDim.x * 8;
if (idx + 7 * blockDim.x < n)
{
double a1 = g_idata[idx];
double a2 = g_idata[idx + blockDim.x];
double a3 = g_idata[idx + 2 * blockDim.x];
double a4 = g_idata[idx + 3 * blockDim.x];
double b1 = g_idata[idx + 4 * blockDim.x];
double b2 = g_idata[idx + 5 * blockDim.x];
double b3 = g_idata[idx + 6 * blockDim.x];
double b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1){
if (tid < stride){
idata[tid] += idata[tid + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile double *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceCompleteUnrollWarps8 (double *g_idata, double *g_odata,
unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
double *idata = g_idata + blockIdx.x * blockDim.x * 8;
if (idx + 7 * blockDim.x < n)
{
double a1 = g_idata[idx];
double a2 = g_idata[idx + blockDim.x];
double a3 = g_idata[idx + 2 * blockDim.x];
double a4 = g_idata[idx + 3 * blockDim.x];
double b1 = g_idata[idx + 4 * blockDim.x];
double b2 = g_idata[idx + 5 * blockDim.x];
double b3 = g_idata[idx + 6 * blockDim.x];
double b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads(); // Đồng bộ hóa tất cả các thread trong một block
// in-place reduction and complete unroll
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile double *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
template <unsigned int iBlockSize>
__global__ void reduceCompleteUnroll(double *g_idata, double *g_odata,
unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
double a1 = g_idata[idx];
double a2 = g_idata[idx + blockDim.x];
double a3 = g_idata[idx + 2 * blockDim.x];
double a4 = g_idata[idx + 3 * blockDim.x];
double b1 = g_idata[idx + 4 * blockDim.x];
double b2 = g_idata[idx + 5 * blockDim.x];
double b3 = g_idata[idx + 6 * blockDim.x];
double b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads(); // Đồng bộ tất các thread trong một block
// in-place reduction and complete unroll
if (iBlockSize >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (iBlockSize >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (iBlockSize >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (iBlockSize >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile double *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrollWarps (double *g_idata, double *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
// unrolling last warp
if (tid < 32)
{
volatile double *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
int main()
{
printf("############ THÔNG TIN GPU ############\n");
// Chọn GPU thực thi câu lệnh
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Device %d: %s \n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// Khởi tạo kích thước vector
int size = 1 << 24; // 2^24
printf("Kích thước mảng : %d\n", size);
// Kernel được cấu hình với 1D grid và 1D blocks
int const BLOCK_SIZE = 512;
dim3 block (BLOCK_SIZE, 1); // Block size có kích thước 512 x 1 ~ (x, y)
dim3 grid ((size + block.x - 1) / block.x, 1); // Grid size có kích thước ceil(size/block.x)
printf("Kích thước : <<<Grid (%d, %d), Block (%d, %d)>>>\n", block.x, block.y, grid.x, grid.y);
// Cấp phát bộ nhớ trên host (CPU)
size_t bytes = size * sizeof(double);
double *h_idata = (double *) malloc(bytes); // host input data
double *h_odata = (double *) malloc(grid.x * sizeof(double)); // host output data
double *temp = (double *) malloc(bytes); // vùng nhớ tạp để copy input cho nhiều hàm thực thi khác nhau
initialData(h_idata, size);
// Copy vào biến temp để chạy với CPU
memcpy (temp, h_idata, bytes);
// Biến tính thời gian chạy
double iStart, iElaps;
double gpu_sum = 0.0; // hàm tính tổng kết quả trên GPU
double gpu_bytes = grid.x * sizeof(double);
// Cấp phát bộ nhớ trên device (GPU)
double *d_idata = NULL;
double *d_odata = NULL;
CHECK(cudaMalloc(&d_idata, bytes));
CHECK(cudaMalloc(&d_odata, gpu_bytes));
printf("ID| Time \t\t| Sum result \t\t| <<<GridSize, BlockSize >>> | Kernel\t\t\n");
// ############ 1. CPU #############
iStart = seconds();
double cpu_sum = recursiveReduce (temp, size);
iElaps = seconds() - iStart;
printf("1 | %f sec\t| %f\t|\t\t | recursiveReduce-CPU\n", iElaps, cpu_sum);
// ############ 2. reduceNeighbored ############
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, gpu_bytes, cudaMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x);
printf("2 | %f sec\t| %f\t|<<<%d, %d>>> | reduceNeighbored\n", iElaps, gpu_sum, grid.x, block.x);
// ############ 3. reduceNeighboredLess ############
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceNeighboredLess<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, gpu_bytes, cudaMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x);
printf("3 | %f sec\t| %f\t|<<<%d, %d>>> | reduceNeighboredLess\n", iElaps, gpu_sum, grid.x, block.x);
// ############ 4. reduceInterleaved ############
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceInterleaved<<<grid, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, gpu_bytes, cudaMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x);
printf("4 | %f sec\t| %f\t|<<<%d, %d>>> | reduceInterleaved\n", iElaps, gpu_sum, grid.x, block.x);
// ############ 5. reduceUnrolling2 ############
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling2<<<grid.x / 2, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(double), cudaMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x / 2);
printf("5 | %f sec\t| %f\t|<<<%d, %d>>> | reduceUnrolling2\n", iElaps, gpu_sum, grid.x/2, block.x);
// ############ 6. reduceUnrolling4 ############
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling4<<<grid.x / 4, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(double), cudaMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x / 4);
printf("6 | %f sec\t| %f\t|<<<%d, %d>>> | reduceUnrolling4\n", iElaps, gpu_sum, grid.x/4, block.x);
// ############ 7. reduceUnrolling8 ############
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrolling8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(double), cudaMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x / 8);
printf("7 | %f sec\t| %f\t|<<<%d, %d>>> | reduceUnrolling8\n", iElaps, gpu_sum, grid.x/8, block.x);
// ############ 8. reduceUnrollWarps8 ############
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(double), cudaMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x / 8);
printf("8 | %f sec\t| %f\t|<<<%d, %d>>> | reduceUnrollWarps8\n", iElaps, gpu_sum, grid.x/8, block.x);
// ############ 9. reduceCompleteUnrollWarsp8 ############
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
reduceCompleteUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(double), cudaMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x / 8);
printf("9 | %f sec\t| %f\t|<<<%d, %d>>> | reduceCompleteUnrollWarsp8\n", iElaps, gpu_sum, grid.x/8, block.x);
// ############ 10. reduceCompleteUnroll ############
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
CHECK(cudaDeviceSynchronize());
iStart = seconds();
switch (BLOCK_SIZE){
case 1024:
reduceCompleteUnroll<1024><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
case 512:
reduceCompleteUnroll<512><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
case 256:
reduceCompleteUnroll<256><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
case 128:
reduceCompleteUnroll<128><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
case 64:
reduceCompleteUnroll<64><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
}
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(double), cudaMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x / 8);
printf("10| %f sec\t| %f\t|<<<%d, %d>>> | reduceCompleteUnroll\n", iElaps, gpu_sum, grid.x/8, block.x);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(cudaFree(d_idata));
CHECK(cudaFree(d_odata));
// reset device
CHECK(cudaDeviceReset());
// Print sum result
printf("Sum on CPU : %f\nSum on GPU : %f", cpu_sum, gpu_sum);
return 0;
}
|
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
using namespace std;
// ############### COMMON ###############
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
inline double seconds()
{
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
void initialData(double *data, int size)
// Khởi tạo vector ban đầu có kích thước size, kiểu double và giá random trong khoảng [0, 1]
{
srand(0);
for (int i = 0; i < size; i++)
{
data[i] = (double)(rand()) / RAND_MAX;
}
}
double sumGPU(double *data, int size){
double sum = 0;
for(int i = 0; i < size; i++){
sum += data[i];
}
return sum;
}
// ############### Device(CPU) ###############
// Hàm thực hiện reduce trên CPU
double recursiveReduce(double *data, int const size)
{
if (size == 1) return data[0];
int const stride = size / 2;
for (int i = 0; i < stride; i++){
data[i] += data[i + stride];
}
return recursiveReduce(data, stride);
}
// ############### Device(GPU) ###############
// Neighbored Pair phân kỳ
__global__ void reduceNeighbored (double *g_idata, double *g_odata,
unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x;
// Kiểm tra nếu vượt qua kích thước mảng
if (idx >= n) return;
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// Neighbored Pair cài đặt với ít phân kỳ bằng cách thực thi trong một block
__global__ void reduceNeighboredLess (double *g_idata, double *g_odata,
unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x;
// Kiểm tra nếu vượt qua kích thước mảng
if(idx >= n) return;
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
// Chuyển tid sang bộ nhớ của một block (register - thanh ghi)
int index = 2 * stride * tid;
if (index < blockDim.x)
{
idata[index] += idata[index + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
// Interleaved Pair Implementation with less divergence
__global__ void reduceInterleaved (double *g_idata, double *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x;
// Kiểm tra nếu vượt qua kích thước mảng
if(idx >= n) return;
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling2 (double *g_idata, double *g_odata,
unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2 data blocks
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
// Đồng bộ hóa các group data trong 2 thread kết cận
__syncthreads();
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling4(double *g_idata, double *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4
if (idx + 3 * blockDim.x < n)
{
double a1 = g_idata[idx];
double a2 = g_idata[idx + blockDim.x];
double a3 = g_idata[idx + 2 * blockDim.x];
double a4 = g_idata[idx + 3 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4;
}
__syncthreads(); // Đồng bộ hóa các group data trong 4 thread kết cận
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrolling8 (double *g_idata, double *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
double *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
double a1 = g_idata[idx];
double a2 = g_idata[idx + blockDim.x];
double a3 = g_idata[idx + 2 * blockDim.x];
double a4 = g_idata[idx + 3 * blockDim.x];
double b1 = g_idata[idx + 4 * blockDim.x];
double b2 = g_idata[idx + 5 * blockDim.x];
double b3 = g_idata[idx + 6 * blockDim.x];
double b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads(); // Đồng bộ hóa các group data trong 8 thread kết cận
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1){
if (tid < stride){
idata[tid] += idata[tid + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrollWarps8 (double *g_idata, double *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
double *idata = g_idata + blockIdx.x * blockDim.x * 8;
if (idx + 7 * blockDim.x < n)
{
double a1 = g_idata[idx];
double a2 = g_idata[idx + blockDim.x];
double a3 = g_idata[idx + 2 * blockDim.x];
double a4 = g_idata[idx + 3 * blockDim.x];
double b1 = g_idata[idx + 4 * blockDim.x];
double b2 = g_idata[idx + 5 * blockDim.x];
double b3 = g_idata[idx + 6 * blockDim.x];
double b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1){
if (tid < stride){
idata[tid] += idata[tid + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile double *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceCompleteUnrollWarps8 (double *g_idata, double *g_odata,
unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
double *idata = g_idata + blockIdx.x * blockDim.x * 8;
if (idx + 7 * blockDim.x < n)
{
double a1 = g_idata[idx];
double a2 = g_idata[idx + blockDim.x];
double a3 = g_idata[idx + 2 * blockDim.x];
double a4 = g_idata[idx + 3 * blockDim.x];
double b1 = g_idata[idx + 4 * blockDim.x];
double b2 = g_idata[idx + 5 * blockDim.x];
double b3 = g_idata[idx + 6 * blockDim.x];
double b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads(); // Đồng bộ hóa tất cả các thread trong một block
// in-place reduction and complete unroll
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile double *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
template <unsigned int iBlockSize>
__global__ void reduceCompleteUnroll(double *g_idata, double *g_odata,
unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
double a1 = g_idata[idx];
double a2 = g_idata[idx + blockDim.x];
double a3 = g_idata[idx + 2 * blockDim.x];
double a4 = g_idata[idx + 3 * blockDim.x];
double b1 = g_idata[idx + 4 * blockDim.x];
double b2 = g_idata[idx + 5 * blockDim.x];
double b3 = g_idata[idx + 6 * blockDim.x];
double b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads(); // Đồng bộ tất các thread trong một block
// in-place reduction and complete unroll
if (iBlockSize >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (iBlockSize >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (iBlockSize >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (iBlockSize >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile double *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// Ghi kết quả cho block này vào bộ nhớ toàn cục
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceUnrollWarps (double *g_idata, double *g_odata, unsigned int n)
{
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// Chuyển từ con trỏ toàn cục sang con trỏ của block này
double *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
__syncthreads();
// Thực hiện tính tổng ở bộ nhớ toàn cục
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1)
{
if (tid < stride)
{
idata[tid] += idata[tid + stride];
}
// Đồng bộ hóa trong một threadBlock
__syncthreads();
}
// unrolling last warp
if (tid < 32)
{
volatile double *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
int main()
{
printf("############ THÔNG TIN GPU ############\n");
// Chọn GPU thực thi câu lệnh
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Device %d: %s \n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// Khởi tạo kích thước vector
int size = 1 << 24; // 2^24
printf("Kích thước mảng : %d\n", size);
// Kernel được cấu hình với 1D grid và 1D blocks
int const BLOCK_SIZE = 512;
dim3 block (BLOCK_SIZE, 1); // Block size có kích thước 512 x 1 ~ (x, y)
dim3 grid ((size + block.x - 1) / block.x, 1); // Grid size có kích thước ceil(size/block.x)
printf("Kích thước : <<<Grid (%d, %d), Block (%d, %d)>>>\n", block.x, block.y, grid.x, grid.y);
// Cấp phát bộ nhớ trên host (CPU)
size_t bytes = size * sizeof(double);
double *h_idata = (double *) malloc(bytes); // host input data
double *h_odata = (double *) malloc(grid.x * sizeof(double)); // host output data
double *temp = (double *) malloc(bytes); // vùng nhớ tạp để copy input cho nhiều hàm thực thi khác nhau
initialData(h_idata, size);
// Copy vào biến temp để chạy với CPU
memcpy (temp, h_idata, bytes);
// Biến tính thời gian chạy
double iStart, iElaps;
double gpu_sum = 0.0; // hàm tính tổng kết quả trên GPU
double gpu_bytes = grid.x * sizeof(double);
// Cấp phát bộ nhớ trên device (GPU)
double *d_idata = NULL;
double *d_odata = NULL;
CHECK(hipMalloc(&d_idata, bytes));
CHECK(hipMalloc(&d_odata, gpu_bytes));
printf("ID| Time \t\t| Sum result \t\t| <<<GridSize, BlockSize >>> | Kernel\t\t\n");
// ############ 1. CPU #############
iStart = seconds();
double cpu_sum = recursiveReduce (temp, size);
iElaps = seconds() - iStart;
printf("1 | %f sec\t| %f\t|\t\t | recursiveReduce-CPU\n", iElaps, cpu_sum);
// ############ 2. reduceNeighbored ############
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceNeighbored<<<grid, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, gpu_bytes, hipMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x);
printf("2 | %f sec\t| %f\t|<<<%d, %d>>> | reduceNeighbored\n", iElaps, gpu_sum, grid.x, block.x);
// ############ 3. reduceNeighboredLess ############
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceNeighboredLess<<<grid, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, gpu_bytes, hipMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x);
printf("3 | %f sec\t| %f\t|<<<%d, %d>>> | reduceNeighboredLess\n", iElaps, gpu_sum, grid.x, block.x);
// ############ 4. reduceInterleaved ############
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceInterleaved<<<grid, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, gpu_bytes, hipMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x);
printf("4 | %f sec\t| %f\t|<<<%d, %d>>> | reduceInterleaved\n", iElaps, gpu_sum, grid.x, block.x);
// ############ 5. reduceUnrolling2 ############
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceUnrolling2<<<grid.x / 2, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(double), hipMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x / 2);
printf("5 | %f sec\t| %f\t|<<<%d, %d>>> | reduceUnrolling2\n", iElaps, gpu_sum, grid.x/2, block.x);
// ############ 6. reduceUnrolling4 ############
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceUnrolling4<<<grid.x / 4, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(double), hipMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x / 4);
printf("6 | %f sec\t| %f\t|<<<%d, %d>>> | reduceUnrolling4\n", iElaps, gpu_sum, grid.x/4, block.x);
// ############ 7. reduceUnrolling8 ############
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceUnrolling8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(double), hipMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x / 8);
printf("7 | %f sec\t| %f\t|<<<%d, %d>>> | reduceUnrolling8\n", iElaps, gpu_sum, grid.x/8, block.x);
// ############ 8. reduceUnrollWarps8 ############
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(double), hipMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x / 8);
printf("8 | %f sec\t| %f\t|<<<%d, %d>>> | reduceUnrollWarps8\n", iElaps, gpu_sum, grid.x/8, block.x);
// ############ 9. reduceCompleteUnrollWarsp8 ############
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
reduceCompleteUnrollWarps8<<<grid.x / 8, block>>>(d_idata, d_odata, size);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(double), hipMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x / 8);
printf("9 | %f sec\t| %f\t|<<<%d, %d>>> | reduceCompleteUnrollWarsp8\n", iElaps, gpu_sum, grid.x/8, block.x);
// ############ 10. reduceCompleteUnroll ############
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
CHECK(hipDeviceSynchronize());
iStart = seconds();
switch (BLOCK_SIZE){
case 1024:
reduceCompleteUnroll<1024><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
case 512:
reduceCompleteUnroll<512><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
case 256:
reduceCompleteUnroll<256><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
case 128:
reduceCompleteUnroll<128><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
case 64:
reduceCompleteUnroll<64><<<grid.x / 8, block>>>(d_idata, d_odata, size);
break;
}
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(double), hipMemcpyDeviceToHost));
gpu_sum = sumGPU(h_odata, grid.x / 8);
printf("10| %f sec\t| %f\t|<<<%d, %d>>> | reduceCompleteUnroll\n", iElaps, gpu_sum, grid.x/8, block.x);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(hipFree(d_idata));
CHECK(hipFree(d_odata));
// reset device
CHECK(hipDeviceReset());
// Print sum result
printf("Sum on CPU : %f\nSum on GPU : %f", cpu_sum, gpu_sum);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cstdio>
#include <cstdlib>
#include <vector>
__device__ __managed__ int sum;
__global__ void thread(int *a) {
a[threadIdx.x] = 0;
}
__global__ void reduction(int *bucket,int *key) {
int i = threadIdx.x;
atomicAdd(&bucket[key[i]], 1);
}
__global__ void sort(int num,int *key,int sum) {
int thread = threadIdx.x;
key[sum+thread] = num;
}
int main() {
int n = 50;
int range = 5;
int *key;
cudaMallocManaged(&key, n*sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
int *bucket;
cudaMallocManaged(&bucket, range*sizeof(int));
thread<<<1,range>>>(bucket);
reduction<<<1,n>>>(bucket,key);
sum=0;
for(int i=0;i<range;i++){
int threadnum = bucket[i];
sort<<<1,threadnum>>>(i,key,sum);
sum += threadnum;
}
cudaDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
cudaFree(key);
cudaFree(bucket);
}
|
#include <hip/hip_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <vector>
__device__ __managed__ int sum;
__global__ void thread(int *a) {
a[threadIdx.x] = 0;
}
__global__ void reduction(int *bucket,int *key) {
int i = threadIdx.x;
atomicAdd(&bucket[key[i]], 1);
}
__global__ void sort(int num,int *key,int sum) {
int thread = threadIdx.x;
key[sum+thread] = num;
}
int main() {
int n = 50;
int range = 5;
int *key;
hipMallocManaged(&key, n*sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
int *bucket;
hipMallocManaged(&bucket, range*sizeof(int));
thread<<<1,range>>>(bucket);
reduction<<<1,n>>>(bucket,key);
sum=0;
for(int i=0;i<range;i++){
int threadnum = bucket[i];
sort<<<1,threadnum>>>(i,key,sum);
sum += threadnum;
}
hipDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
hipFree(key);
hipFree(bucket);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<bits/stdc++.h>
#define N 16
#define BLOCK_DIM 16
using namespace std;
__global__ void multiply(float A[], float B[], float C[])
{
__shared__ float sub_A[BLOCK_DIM][BLOCK_DIM], sub_B[BLOCK_DIM][BLOCK_DIM];
int global_x = threadIdx.x + blockIdx.x * blockDim.x, global_y = threadIdx.y + blockIdx.y * blockDim.y, global_ID = global_y * N + global_x;
C[global_ID] = 0;
for(int i = 0; i < N / BLOCK_DIM; i++)
{
sub_A[threadIdx.y][threadIdx.x] = A[global_y * N + global_x + BLOCK_DIM * i];
sub_B[threadIdx.y][threadIdx.x] = B[(global_y + BLOCK_DIM * i) * N + global_x];
__syncthreads();
for(int j = 0; j < BLOCK_DIM; j++)
C[global_ID] += sub_A[threadIdx.y][j] * sub_B[j][threadIdx.x];
__syncthreads();
}
}
void init_matrix(float mat[])
{
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++)
mat[i * N + j] = 1;
}
void print_matrix(float mat[])
{
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
cout << mat[i * N + j] << " ";
cout << endl;
}
cout << endl;
}
int main()
{
float *A = new float[N * N], *B = new float[N * N], *C = new float[N * N], *cuda_A, *cuda_B, * cuda_C;
init_matrix(A);
cout << "A : " << endl;
print_matrix(A);
init_matrix(B);
cout << "B : " << endl;
print_matrix(B);
cudaMalloc(&cuda_A, sizeof(float) * N * N);
cudaMalloc(&cuda_B, sizeof(float) * N * N);
cudaMalloc(&cuda_C, sizeof(float) * N * N);
cudaMemcpy(cuda_A, A, sizeof(float) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_B, B, sizeof(float) * N * N, cudaMemcpyHostToDevice);
dim3 grid_dim(N / BLOCK_DIM, N / BLOCK_DIM), block_dim(BLOCK_DIM, BLOCK_DIM);
multiply<<<grid_dim, block_dim>>>(cuda_A, cuda_B, cuda_C);
cudaMemcpy(C, cuda_C, sizeof(float) * N * N, cudaMemcpyDeviceToHost);
cout << "C : " << endl;
print_matrix(C);
return 0;
}
|
#include <hip/hip_runtime.h>
#include<bits/stdc++.h>
#define N 16
#define BLOCK_DIM 16
using namespace std;
__global__ void multiply(float A[], float B[], float C[])
{
__shared__ float sub_A[BLOCK_DIM][BLOCK_DIM], sub_B[BLOCK_DIM][BLOCK_DIM];
int global_x = threadIdx.x + blockIdx.x * blockDim.x, global_y = threadIdx.y + blockIdx.y * blockDim.y, global_ID = global_y * N + global_x;
C[global_ID] = 0;
for(int i = 0; i < N / BLOCK_DIM; i++)
{
sub_A[threadIdx.y][threadIdx.x] = A[global_y * N + global_x + BLOCK_DIM * i];
sub_B[threadIdx.y][threadIdx.x] = B[(global_y + BLOCK_DIM * i) * N + global_x];
__syncthreads();
for(int j = 0; j < BLOCK_DIM; j++)
C[global_ID] += sub_A[threadIdx.y][j] * sub_B[j][threadIdx.x];
__syncthreads();
}
}
void init_matrix(float mat[])
{
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++)
mat[i * N + j] = 1;
}
void print_matrix(float mat[])
{
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
cout << mat[i * N + j] << " ";
cout << endl;
}
cout << endl;
}
int main()
{
float *A = new float[N * N], *B = new float[N * N], *C = new float[N * N], *cuda_A, *cuda_B, * cuda_C;
init_matrix(A);
cout << "A : " << endl;
print_matrix(A);
init_matrix(B);
cout << "B : " << endl;
print_matrix(B);
hipMalloc(&cuda_A, sizeof(float) * N * N);
hipMalloc(&cuda_B, sizeof(float) * N * N);
hipMalloc(&cuda_C, sizeof(float) * N * N);
hipMemcpy(cuda_A, A, sizeof(float) * N * N, hipMemcpyHostToDevice);
hipMemcpy(cuda_B, B, sizeof(float) * N * N, hipMemcpyHostToDevice);
dim3 grid_dim(N / BLOCK_DIM, N / BLOCK_DIM), block_dim(BLOCK_DIM, BLOCK_DIM);
multiply<<<grid_dim, block_dim>>>(cuda_A, cuda_B, cuda_C);
hipMemcpy(C, cuda_C, sizeof(float) * N * N, hipMemcpyDeviceToHost);
cout << "C : " << endl;
print_matrix(C);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
__global__ void add(double *a,double *b,double *c,int n)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
{
c[id] = a[id] + b[id];
}
}
int main()
{
int n=8;
double *h_a,*h_b,*h_c,*d_a,*d_b,*d_c;
size_t bytes = n*sizeof(double);
h_a=(double*)malloc(bytes);
h_b=(double*)malloc(bytes);
h_c=(double*)malloc(bytes);
cudaMalloc(&d_a,bytes);
cudaMalloc(&d_b,bytes);
cudaMalloc(&d_c,bytes);
int i;
for(i=0;i<n;i++)
{
h_a[i]= random() %n;
h_b[i]= random() %n;
}
printf("\n\nVector A =>");
for(i=0;i<n;i++)
{
printf("%lf ",h_a[i]);
}
printf("\n\nVector B =>");
for(i=0;i<n;i++)
{
printf("%lf ",h_b[i]);
}
cudaMemcpy(d_a,h_a,bytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,bytes,cudaMemcpyHostToDevice);
int blockSize=2;
int gridSize=(int)ceil((float)n/blockSize);
add<<<gridSize,blockSize>>>(d_a,d_b,d_c,n);
cudaMemcpy(h_c,d_c,bytes,cudaMemcpyDeviceToHost);
printf("\n\nVector BC=>");
for(i=0;i<n;i++)
{
printf("%lf ",h_c[i]);
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
__global__ void add(double *a,double *b,double *c,int n)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
{
c[id] = a[id] + b[id];
}
}
int main()
{
int n=8;
double *h_a,*h_b,*h_c,*d_a,*d_b,*d_c;
size_t bytes = n*sizeof(double);
h_a=(double*)malloc(bytes);
h_b=(double*)malloc(bytes);
h_c=(double*)malloc(bytes);
hipMalloc(&d_a,bytes);
hipMalloc(&d_b,bytes);
hipMalloc(&d_c,bytes);
int i;
for(i=0;i<n;i++)
{
h_a[i]= random() %n;
h_b[i]= random() %n;
}
printf("\n\nVector A =>");
for(i=0;i<n;i++)
{
printf("%lf ",h_a[i]);
}
printf("\n\nVector B =>");
for(i=0;i<n;i++)
{
printf("%lf ",h_b[i]);
}
hipMemcpy(d_a,h_a,bytes,hipMemcpyHostToDevice);
hipMemcpy(d_b,h_b,bytes,hipMemcpyHostToDevice);
int blockSize=2;
int gridSize=(int)ceil((float)n/blockSize);
add<<<gridSize,blockSize>>>(d_a,d_b,d_c,n);
hipMemcpy(h_c,d_c,bytes,hipMemcpyDeviceToHost);
printf("\n\nVector BC=>");
for(i=0;i<n;i++)
{
printf("%lf ",h_c[i]);
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
//Based on the work of Andrew Krepps
#include <stdio.h>
#include <stdio.h>
#define ARRAY_SIZE N
#define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE))
////////////////////////OPERATIONS//////////////////////////////////////////////
//ADD=1
__global__ void add(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]+array2[i];
}
//SUBTRACT=2
__global__ void subtract(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]-array2[i];
}
//MULTIPLY=3
__global__ void multiply(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]*array2[i];
}
//MOD=4
__global__ void mod(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]%array2[i];
}
//////////////////////////GPU FUNCTION//////////////////////////////////
void main_sub(int N, int BLOCK_SIZE, int NUM_BLOCKS, int whichOperation)
{
/* Declare statically four arrays of ARRAY_SIZE each */
int array1[ARRAY_SIZE];
int array2[ARRAY_SIZE];
int array3[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
array1[i] = i;
array2[i] = (rand()%4);
//Check that array1 and array 2 inputs are correct
//printf("ARRAY1 at %i\nARRAY2 at %i\n\n", array1[i], array2[i]);
}
/* Declare pointers for GPU based params */
int *gpu_block1;
int *gpu_block2;
int *gpu_block3;
cudaMalloc((void **)&gpu_block1, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_block2, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void **)&gpu_block3, ARRAY_SIZE_IN_BYTES);
cudaMemcpy( gpu_block1, array1, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice );
cudaMemcpy( gpu_block2, array2, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice );
cudaMemcpy( gpu_block3, array3, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice );
/* Execute our kernel */
switch(whichOperation) {
//ADD
case 1 :
printf("///////////////////////OUTPUT ADD///////////////\n");
add<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block1,gpu_block2,gpu_block3);
break;
//SUBTRACT
case 2 :
printf("///////////////////////OUTPUT SUBTRACT///////////////\n");
subtract<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block1,gpu_block2,gpu_block3);
break;
//MULTIPLY
case 3 :
printf("///////////////////////OUTPUT MULTIPLY///////////////\n");
multiply<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block1,gpu_block2,gpu_block3);
break;
//MOD
case 4 :
printf("///////////////////////OUTPUT MOD///////////////\n");
mod<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block1,gpu_block2,gpu_block3);
break;
}
/* Free the arrays on the GPU as now we're done with them */
cudaMemcpy( array1, gpu_block1, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost );
cudaMemcpy( array2, gpu_block2, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost );
cudaMemcpy( array3, gpu_block3, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost );
cudaFree(gpu_block1);
cudaFree(gpu_block2);
cudaFree(gpu_block3);
/* Iterate through the arrays and print */
for(int i = 0; i < ARRAY_SIZE; i+=4)
{
printf("Index %i:\t %i\t\tIndex %i:\t %i\t\tIndex %i:\t %i\t\tIndex %i:\t %i\n", i, array3[i], i+1, array3[i+1],i+2, array3[i+2], i+3, array3[i+3]);
}
}
//////////////////////////MAIN///////////////////////////////////
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
main_sub(totalThreads,blockSize,numBlocks, 1);
main_sub(totalThreads,blockSize,numBlocks, 2);
main_sub(totalThreads,blockSize,numBlocks, 3);
main_sub(totalThreads,blockSize,numBlocks, 4);
}
|
//Based on the work of Andrew Krepps
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdio.h>
#define ARRAY_SIZE N
#define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE))
////////////////////////OPERATIONS//////////////////////////////////////////////
//ADD=1
__global__ void add(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]+array2[i];
}
//SUBTRACT=2
__global__ void subtract(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]-array2[i];
}
//MULTIPLY=3
__global__ void multiply(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]*array2[i];
}
//MOD=4
__global__ void mod(int * array1,int * array2,int * array3)
{
const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
array3[i]=array1[i]%array2[i];
}
//////////////////////////GPU FUNCTION//////////////////////////////////
void main_sub(int N, int BLOCK_SIZE, int NUM_BLOCKS, int whichOperation)
{
/* Declare statically four arrays of ARRAY_SIZE each */
int array1[ARRAY_SIZE];
int array2[ARRAY_SIZE];
int array3[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++)
{
array1[i] = i;
array2[i] = (rand()%4);
//Check that array1 and array 2 inputs are correct
//printf("ARRAY1 at %i\nARRAY2 at %i\n\n", array1[i], array2[i]);
}
/* Declare pointers for GPU based params */
int *gpu_block1;
int *gpu_block2;
int *gpu_block3;
hipMalloc((void **)&gpu_block1, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_block2, ARRAY_SIZE_IN_BYTES);
hipMalloc((void **)&gpu_block3, ARRAY_SIZE_IN_BYTES);
hipMemcpy( gpu_block1, array1, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice );
hipMemcpy( gpu_block2, array2, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice );
hipMemcpy( gpu_block3, array3, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice );
/* Execute our kernel */
switch(whichOperation) {
//ADD
case 1 :
printf("///////////////////////OUTPUT ADD///////////////\n");
add<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block1,gpu_block2,gpu_block3);
break;
//SUBTRACT
case 2 :
printf("///////////////////////OUTPUT SUBTRACT///////////////\n");
subtract<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block1,gpu_block2,gpu_block3);
break;
//MULTIPLY
case 3 :
printf("///////////////////////OUTPUT MULTIPLY///////////////\n");
multiply<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block1,gpu_block2,gpu_block3);
break;
//MOD
case 4 :
printf("///////////////////////OUTPUT MOD///////////////\n");
mod<<<NUM_BLOCKS, BLOCK_SIZE>>>(gpu_block1,gpu_block2,gpu_block3);
break;
}
/* Free the arrays on the GPU as now we're done with them */
hipMemcpy( array1, gpu_block1, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost );
hipMemcpy( array2, gpu_block2, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost );
hipMemcpy( array3, gpu_block3, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost );
hipFree(gpu_block1);
hipFree(gpu_block2);
hipFree(gpu_block3);
/* Iterate through the arrays and print */
for(int i = 0; i < ARRAY_SIZE; i+=4)
{
printf("Index %i:\t %i\t\tIndex %i:\t %i\t\tIndex %i:\t %i\t\tIndex %i:\t %i\n", i, array3[i], i+1, array3[i+1],i+2, array3[i+2], i+3, array3[i+3]);
}
}
//////////////////////////MAIN///////////////////////////////////
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
main_sub(totalThreads,blockSize,numBlocks, 1);
main_sub(totalThreads,blockSize,numBlocks, 2);
main_sub(totalThreads,blockSize,numBlocks, 3);
main_sub(totalThreads,blockSize,numBlocks, 4);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<stdio.h>
#include<sys/time.h>
// time of execution AOS will be more than SOA as AOS contains other data
// which is not yet loaded.
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
const int numThreads = 1000000;
int numThreadsPerBlock = 16;
// Array of structure a structure occupies 72 B
typedef struct {
double3 pos;
double3 vel;
double3 force;
}atom;
//////////////////////////////////////////////////////
// KERNEL
//////////////////////////////////////////////////////
__global__ void updateAtomKernel(const atom *d_in,atom *d_out ,const int N){
//int t_idx = threadIdx.x; // thread index
int idx = threadIdx.x + blockIdx.x*blockDim.x;
//Colesced memory access.
d_out[idx].pos.x = 2*d_in[idx].pos.x ;
d_out[idx].pos.y = 2*d_in[idx].pos.y ;
d_out[idx].pos.z = 2*d_in[idx].pos.z ;
d_out[idx].vel.x = 2*d_in[idx].vel.x;
d_out[idx].vel.y = 2*d_in[idx].vel.y;
d_out[idx].vel.z = 2*d_in[idx].vel.z;
d_out[idx].force.x = 2*d_in[idx].force.x;
d_out[idx].force.y = 2*d_in[idx].force.y;
d_out[idx].force.z = 2*d_in[idx].force.z;
}
////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////
int main(int argc,char **argv){
if(argc >= 1) numThreadsPerBlock = atoi(argv[1]) ;
// assign host memory variable and size
atom *h_aos; // number of threads
int sizeA = numThreads*sizeof(atom); // size of host memory
timeval t;
double time;
// assign device memory address
atom *d_a;
atom *d_b;
// assign number of blocks and num of threads
//int numThreadsPerBlock = ThreadsPerBlock;
int numBlocks = numThreads/numThreadsPerBlock;
// allocate space to host memory and device
int memSize = sizeA;
h_aos = (atom*)malloc(sizeA);
cudaMalloc((void **)&d_a,memSize);
cudaMalloc((void **)&d_b,memSize);
//intialize host device
for(int i=0;i<numThreads;i++){
h_aos[i].pos.x = 1;
h_aos[i].pos.y = 1;
h_aos[i].pos.z = 1;
h_aos[i].vel.x = 1;
h_aos[i].vel.y = 1;
h_aos[i].vel.z = 1;
h_aos[i].force.x = 1;
h_aos[i].force.y = 1;
h_aos[i].force.z = 1;
}
//copy host to device all the memory
cudaMemcpy(d_a,h_aos,memSize,cudaMemcpyHostToDevice);
gettimeofday(&t,NULL);
time = t.tv_sec*1000.0 + (t.tv_usec/1000.0);
//launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
updateAtomKernel<<<dimGrid,dimBlock>>>(d_a,d_b,numThreads);
//let the threads complete
cudaThreadSynchronize();
// check if any error
checkCUDAError("Invocation kernel");
gettimeofday(&t,NULL);
time = t.tv_sec*1000.0 + (t.tv_usec/1000.0) - time;
// device to host copy
cudaMemcpy( h_aos, d_b, memSize, cudaMemcpyDeviceToHost );
// To validate result. must be all = 2
/*
for(int i=0;i<numThreads;i++){
printf("new pos x: %f \n",h_aos[i].pos.x);
printf("new pos y: %f \n",h_aos[i].pos.y);
printf("new pos x: %f \n",h_aos[i].pos.z);
printf("new vel y: %f \n",h_aos[i].vel.x);
printf("new vel x: %f \n",h_aos[i].vel.y);
printf("new vel y: %f \n",h_aos[i].vel.z);
printf("new force x: %f \n",h_aos[i].force.x);
printf("new force y: %f \n",h_aos[i].force.y);
printf("new force z: %f \n",h_aos[i].force.z);
}*/
printf("Time taken in ThreadsPerBlock: %d is %f msec\n", numThreadsPerBlock,time);
// Free some memory
cudaFree(d_a);
cudaFree(d_b);
free(h_aos);
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<sys/time.h>
// time of execution AOS will be more than SOA as AOS contains other data
// which is not yet loaded.
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char* msg);
const int numThreads = 1000000;
int numThreadsPerBlock = 16;
// Array of structure a structure occupies 72 B
typedef struct {
double3 pos;
double3 vel;
double3 force;
}atom;
//////////////////////////////////////////////////////
// KERNEL
//////////////////////////////////////////////////////
__global__ void updateAtomKernel(const atom *d_in,atom *d_out ,const int N){
//int t_idx = threadIdx.x; // thread index
int idx = threadIdx.x + blockIdx.x*blockDim.x;
//Colesced memory access.
d_out[idx].pos.x = 2*d_in[idx].pos.x ;
d_out[idx].pos.y = 2*d_in[idx].pos.y ;
d_out[idx].pos.z = 2*d_in[idx].pos.z ;
d_out[idx].vel.x = 2*d_in[idx].vel.x;
d_out[idx].vel.y = 2*d_in[idx].vel.y;
d_out[idx].vel.z = 2*d_in[idx].vel.z;
d_out[idx].force.x = 2*d_in[idx].force.x;
d_out[idx].force.y = 2*d_in[idx].force.y;
d_out[idx].force.z = 2*d_in[idx].force.z;
}
////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////
int main(int argc,char **argv){
if(argc >= 1) numThreadsPerBlock = atoi(argv[1]) ;
// assign host memory variable and size
atom *h_aos; // number of threads
int sizeA = numThreads*sizeof(atom); // size of host memory
timeval t;
double time;
// assign device memory address
atom *d_a;
atom *d_b;
// assign number of blocks and num of threads
//int numThreadsPerBlock = ThreadsPerBlock;
int numBlocks = numThreads/numThreadsPerBlock;
// allocate space to host memory and device
int memSize = sizeA;
h_aos = (atom*)malloc(sizeA);
hipMalloc((void **)&d_a,memSize);
hipMalloc((void **)&d_b,memSize);
//intialize host device
for(int i=0;i<numThreads;i++){
h_aos[i].pos.x = 1;
h_aos[i].pos.y = 1;
h_aos[i].pos.z = 1;
h_aos[i].vel.x = 1;
h_aos[i].vel.y = 1;
h_aos[i].vel.z = 1;
h_aos[i].force.x = 1;
h_aos[i].force.y = 1;
h_aos[i].force.z = 1;
}
//copy host to device all the memory
hipMemcpy(d_a,h_aos,memSize,hipMemcpyHostToDevice);
gettimeofday(&t,NULL);
time = t.tv_sec*1000.0 + (t.tv_usec/1000.0);
//launch kernel
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
updateAtomKernel<<<dimGrid,dimBlock>>>(d_a,d_b,numThreads);
//let the threads complete
hipDeviceSynchronize();
// check if any error
checkCUDAError("Invocation kernel");
gettimeofday(&t,NULL);
time = t.tv_sec*1000.0 + (t.tv_usec/1000.0) - time;
// device to host copy
hipMemcpy( h_aos, d_b, memSize, hipMemcpyDeviceToHost );
// To validate result. must be all = 2
/*
for(int i=0;i<numThreads;i++){
printf("new pos x: %f \n",h_aos[i].pos.x);
printf("new pos y: %f \n",h_aos[i].pos.y);
printf("new pos x: %f \n",h_aos[i].pos.z);
printf("new vel y: %f \n",h_aos[i].vel.x);
printf("new vel x: %f \n",h_aos[i].vel.y);
printf("new vel y: %f \n",h_aos[i].vel.z);
printf("new force x: %f \n",h_aos[i].force.x);
printf("new force y: %f \n",h_aos[i].force.y);
printf("new force z: %f \n",h_aos[i].force.z);
}*/
printf("Time taken in ThreadsPerBlock: %d is %f msec\n", numThreadsPerBlock,time);
// Free some memory
hipFree(d_a);
hipFree(d_b);
free(h_aos);
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
//Function to check for errors
inline cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
//Number of vertices
#define vertices 5000
//Number of edges per vertex
#define Edge_per_node 4999
//Used to define weights on each edge.
#define Maximum_weight 5
//Value for infinity
#define infinity 10000000
//Kernel call to inititialize all node weights to infinity except for the source node. We mark the source node as settled after this point
__global__ void Initializing(int *node_weight_array, int *mask_array, int Source) // CUDA kernel
{
int id = blockIdx.x*blockDim.x+threadIdx.x; // Get global thread ID
if(id<vertices)
{
if(id==Source)
{
node_weight_array[id]=0;
mask_array[id]=1;
}
else
{
node_weight_array[id]=infinity;
mask_array[id]=0;
}
}
}
//Kernel Call to choose a a node which is relaxed and settled and to relax the outgoing edges of each settled node.
__global__ void Minimum(int *mask_array,int *vertex_array,int *vertex_array_copy, int *node_weight_array, int *edge_array, int *edge_weight_array, int *min)
{
int id = blockIdx.x*blockDim.x+threadIdx.x; // Get global thread ID
//Iterative variables
int i,n,t;
if(id<vertices)
{
if(mask_array[id]==1)
{
t=vertex_array_copy[id];
for(i=t*Edge_per_node;i<t*Edge_per_node+Edge_per_node;i++)
{
n=edge_array[i];
if(mask_array[n]!=1)
{
atomicMin(&node_weight_array[n],node_weight_array[id]+edge_weight_array[i]);
atomicMin(&min[0],node_weight_array[n]);
vertex_array_copy[id]=n;
break;
}
}
}
}
}
//Kernel call to mark all the settled nodes
__global__ void Relax(int *mask_array,int *node_weight_array,int *min)
{
int id = blockIdx.x*blockDim.x+threadIdx.x; // Get global thread ID
if(id<vertices)
{
if(mask_array[id]!=1 && node_weight_array[id]==min[0])
{
mask_array[id]=1;
}
}
}
int main( int argc, char* argv[] )
{
//Size of the Vertex array
size_t vertex_array_size = vertices*sizeof(int);
//Size of the edge array and edge_weight array
size_t edge_array_size = vertices*Edge_per_node*sizeof(int);
//Intializing the vertex array
int *vertex_array = (int*)malloc(vertex_array_size);
//Intializing the vertex array
int *vertex_array_copy = (int*)malloc(vertex_array_size);
//Initializing a copy of the vertex array
int *vertex_copy = (int*)malloc(vertex_array_size);
//Intializing the edge array
int *edge_array=(int*)malloc(edge_array_size);
//Initializing edge_weight_array which stores the weights of each edge
int *edge_weight_array = (int*)malloc(edge_array_size);
//Initializing Node weight array which stores the value for the current weight to reach the node
int *node_weight_array = (int*)malloc(vertex_array_size);
//Array to mark if a node is settled or not
int *mask_array = (int*)malloc(vertex_array_size);
//Iterative operator
int i,j,k;
printf("Populating Vertex Array....\n");
//Setting node number in vertex_array
for(i=0;i<vertices;i++)
{
vertex_array[i]=i;
}
//Setting the seed of the RNG to system clock
srand(time(NULL));
//temp variable
int temp;
//Adding random edges to each node while avoiding self edge
memcpy(vertex_copy,vertex_array,vertex_array_size);
memcpy(vertex_array_copy,vertex_array,vertex_array_size);
printf("Populating Edge Array....\n");
//We give each node random edges and store them in the increasing order of weights in the edge array.
for(i=0;i<vertices;i++)
{
//Function to jumble the nodes in the vertex array and assign them to each node
for(j=vertices-1;j>0;j--)
{
k=rand()%(j+1);
temp = vertex_copy[j];
vertex_copy[j]=vertex_copy[k];
vertex_copy[k]=temp;
}
for(j=0;j<Edge_per_node;j++)
{
if(vertex_copy[j]==i)
{
j=j+1;
edge_array[i*Edge_per_node+(j-1)]= vertex_copy[j];
}
else
{
edge_array[i*Edge_per_node+j]= vertex_copy[j];
}
}
}
/*
//Can be uncommented to see the edges of each node
printf("=== Initial edges===\n");
for(i=0;i<vertices*Edge_per_node;i++)
{
printf("E[%d]= %d\n",i,edge_array[i]);
}
*/
printf("Adding Weights to each edge...\n");
//Adding weights to the edge_weight array
for(i=0;i<vertices;i++)
{
int a = rand()%Maximum_weight+1;
int b = rand()%Maximum_weight+1;
for(j=0;j<Edge_per_node;j++)
{
edge_weight_array[i*Edge_per_node+j]=a+j*b;
}
}
/*
//Can be uncommented to see the edge weight of each edge
printf("=== Initial edge weight weight===\n");
for(i=0;i<vertices*Edge_per_node;i++)
{
printf("W[%d]= %d\n",i,edge_weight_array[i]);
}
*/
//Initializing gpu variables
int *gpu_vertex_array;
int *gpu_vertex_array_copy;
int *gpu_edge_array;
int *gpu_edge_weight_array;
int *gpu_node_weight_array;
int *gpu_mask_array;
//Allocating memory to the gpu variables
checkCuda(cudaMalloc((void**)&gpu_vertex_array,vertex_array_size));
checkCuda(cudaMalloc((void**)&gpu_vertex_array_copy,vertex_array_size));
checkCuda(cudaMalloc((void**)&gpu_node_weight_array,vertex_array_size));
checkCuda(cudaMalloc((void**)&gpu_mask_array,vertex_array_size));
checkCuda(cudaMalloc((void**)&gpu_edge_array,edge_array_size));
checkCuda(cudaMalloc((void**)&gpu_edge_weight_array,edge_array_size));
//Copying memory from Host to Device
checkCuda(cudaMemcpy(gpu_vertex_array,vertex_array,vertex_array_size,cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(gpu_vertex_array_copy,vertex_array_copy,vertex_array_size,cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(gpu_node_weight_array,node_weight_array,vertex_array_size,cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(gpu_mask_array,mask_array,vertex_array_size,cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(gpu_edge_array,edge_array,edge_array_size,cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(gpu_edge_weight_array,edge_weight_array,edge_array_size,cudaMemcpyHostToDevice));
//Setting the Block and Grid Size
int blockSize, gridSize;
blockSize=1024;
gridSize = (int)ceil((float)vertices/blockSize); // Number of thread blocks in grid
printf("Beginning Optimized Djikstra Algorithm\n");
//Starting timer
float start_time;
TIMER_CREATE(start_time);
TIMER_START(start_time);
//Kernel call to initialize all the node weights. We provide the source node 0
Initializing<<<gridSize, blockSize>>>(gpu_node_weight_array,gpu_mask_array, 0);
cudaError_t err = cudaGetLastError();
/*
if (err != cudaSuccess) checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
{
printf("Error: %s\n", cudaGetErrorString(err));
}
*/
/*
//Can be uncommented to see the initial weights of each node
checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
printf("=== Initial node weight===\n");
for(i=0;i<vertices;i++)
{
printf("NW[%d]= %d\n ",i,node_weight_array[i]);
}
*/
//Variable min used to store the minimum node wieght of the relaxed nodes and use this node to relax all of its edges
int *min=(int*)malloc(2*sizeof(int));
min[0]=0;
min[1]=0;
//GPU variable to store min value
int *gpu_min;
checkCuda(cudaMalloc((void**)&gpu_min,2*sizeof(int)));
//Begin the relax calls of the algorithm
while(min[0]<infinity)
{
min[0] = infinity;
checkCuda(cudaMemcpy(gpu_min,min,sizeof(int),cudaMemcpyHostToDevice));
Minimum<<<gridSize, blockSize>>>(gpu_mask_array,gpu_vertex_array,gpu_vertex_array_copy,gpu_node_weight_array,gpu_edge_array,gpu_edge_weight_array,gpu_min);
/*
if (err != cudaSuccess) checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
{
printf("Error: %s\n", cudaGetErrorString(err));
}
*/
Relax<<<gridSize, blockSize>>>(gpu_mask_array,gpu_node_weight_array,gpu_min);
/*
if (err != cudaSuccess) checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
{
printf("Error: %s\n", cudaGetErrorString(err));
}
*/
/*
//Can be uncommented to see the node weight and Dijkistra's Algorithm being performed ste by step
checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
for(i=0;i<vertices;i++)
{
printf("NW[%d]= %d\n ",i,node_weight_array[i]);
}
*/
checkCuda(cudaMemcpy(min,gpu_min,2*sizeof(int),cudaMemcpyDeviceToHost));
}
//End timer
TIMER_END(start_time);
printf("Kernel Execution Time: %f ms\n",start_time);
//Coppying the the final node weights from the Device to Host
checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
/*
//Can be uncommented to see the final shortes distance of all node from Source Node
printf("=== Final node weight===\n");
for(i=0;i<vertices;i++)
{
printf("NW[%d]= %d\n ",i,node_weight_array[i]);
}
*/
cudaFree(gpu_vertex_array);
cudaFree(gpu_node_weight_array);
cudaFree(gpu_edge_array);
cudaFree(gpu_edge_weight_array);
cudaFree(gpu_mask_array);
free(vertex_array);
free(node_weight_array);
free(edge_array);
free(edge_weight_array);
free(mask_array);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
#define TIMER_CREATE(t) \
hipEvent_t t##_start, t##_end; \
hipEventCreate(&t##_start); \
hipEventCreate(&t##_end);
#define TIMER_START(t) \
hipEventRecord(t##_start); \
hipEventSynchronize(t##_start); \
#define TIMER_END(t) \
hipEventRecord(t##_end); \
hipEventSynchronize(t##_end); \
hipEventElapsedTime(&t, t##_start, t##_end); \
hipEventDestroy(t##_start); \
hipEventDestroy(t##_end);
//Function to check for errors
inline hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
exit(-1);
}
#endif
return result;
}
//Number of vertices
#define vertices 5000
//Number of edges per vertex
#define Edge_per_node 4999
//Used to define weights on each edge.
#define Maximum_weight 5
//Value for infinity
#define infinity 10000000
//Kernel call to inititialize all node weights to infinity except for the source node. We mark the source node as settled after this point
__global__ void Initializing(int *node_weight_array, int *mask_array, int Source) // CUDA kernel
{
int id = blockIdx.x*blockDim.x+threadIdx.x; // Get global thread ID
if(id<vertices)
{
if(id==Source)
{
node_weight_array[id]=0;
mask_array[id]=1;
}
else
{
node_weight_array[id]=infinity;
mask_array[id]=0;
}
}
}
//Kernel Call to choose a a node which is relaxed and settled and to relax the outgoing edges of each settled node.
__global__ void Minimum(int *mask_array,int *vertex_array,int *vertex_array_copy, int *node_weight_array, int *edge_array, int *edge_weight_array, int *min)
{
int id = blockIdx.x*blockDim.x+threadIdx.x; // Get global thread ID
//Iterative variables
int i,n,t;
if(id<vertices)
{
if(mask_array[id]==1)
{
t=vertex_array_copy[id];
for(i=t*Edge_per_node;i<t*Edge_per_node+Edge_per_node;i++)
{
n=edge_array[i];
if(mask_array[n]!=1)
{
atomicMin(&node_weight_array[n],node_weight_array[id]+edge_weight_array[i]);
atomicMin(&min[0],node_weight_array[n]);
vertex_array_copy[id]=n;
break;
}
}
}
}
}
//Kernel call to mark all the settled nodes
__global__ void Relax(int *mask_array,int *node_weight_array,int *min)
{
int id = blockIdx.x*blockDim.x+threadIdx.x; // Get global thread ID
if(id<vertices)
{
if(mask_array[id]!=1 && node_weight_array[id]==min[0])
{
mask_array[id]=1;
}
}
}
int main( int argc, char* argv[] )
{
//Size of the Vertex array
size_t vertex_array_size = vertices*sizeof(int);
//Size of the edge array and edge_weight array
size_t edge_array_size = vertices*Edge_per_node*sizeof(int);
//Intializing the vertex array
int *vertex_array = (int*)malloc(vertex_array_size);
//Intializing the vertex array
int *vertex_array_copy = (int*)malloc(vertex_array_size);
//Initializing a copy of the vertex array
int *vertex_copy = (int*)malloc(vertex_array_size);
//Intializing the edge array
int *edge_array=(int*)malloc(edge_array_size);
//Initializing edge_weight_array which stores the weights of each edge
int *edge_weight_array = (int*)malloc(edge_array_size);
//Initializing Node weight array which stores the value for the current weight to reach the node
int *node_weight_array = (int*)malloc(vertex_array_size);
//Array to mark if a node is settled or not
int *mask_array = (int*)malloc(vertex_array_size);
//Iterative operator
int i,j,k;
printf("Populating Vertex Array....\n");
//Setting node number in vertex_array
for(i=0;i<vertices;i++)
{
vertex_array[i]=i;
}
//Setting the seed of the RNG to system clock
srand(time(NULL));
//temp variable
int temp;
//Adding random edges to each node while avoiding self edge
memcpy(vertex_copy,vertex_array,vertex_array_size);
memcpy(vertex_array_copy,vertex_array,vertex_array_size);
printf("Populating Edge Array....\n");
//We give each node random edges and store them in the increasing order of weights in the edge array.
for(i=0;i<vertices;i++)
{
//Function to jumble the nodes in the vertex array and assign them to each node
for(j=vertices-1;j>0;j--)
{
k=rand()%(j+1);
temp = vertex_copy[j];
vertex_copy[j]=vertex_copy[k];
vertex_copy[k]=temp;
}
for(j=0;j<Edge_per_node;j++)
{
if(vertex_copy[j]==i)
{
j=j+1;
edge_array[i*Edge_per_node+(j-1)]= vertex_copy[j];
}
else
{
edge_array[i*Edge_per_node+j]= vertex_copy[j];
}
}
}
/*
//Can be uncommented to see the edges of each node
printf("=== Initial edges===\n");
for(i=0;i<vertices*Edge_per_node;i++)
{
printf("E[%d]= %d\n",i,edge_array[i]);
}
*/
printf("Adding Weights to each edge...\n");
//Adding weights to the edge_weight array
for(i=0;i<vertices;i++)
{
int a = rand()%Maximum_weight+1;
int b = rand()%Maximum_weight+1;
for(j=0;j<Edge_per_node;j++)
{
edge_weight_array[i*Edge_per_node+j]=a+j*b;
}
}
/*
//Can be uncommented to see the edge weight of each edge
printf("=== Initial edge weight weight===\n");
for(i=0;i<vertices*Edge_per_node;i++)
{
printf("W[%d]= %d\n",i,edge_weight_array[i]);
}
*/
//Initializing gpu variables
int *gpu_vertex_array;
int *gpu_vertex_array_copy;
int *gpu_edge_array;
int *gpu_edge_weight_array;
int *gpu_node_weight_array;
int *gpu_mask_array;
//Allocating memory to the gpu variables
checkCuda(hipMalloc((void**)&gpu_vertex_array,vertex_array_size));
checkCuda(hipMalloc((void**)&gpu_vertex_array_copy,vertex_array_size));
checkCuda(hipMalloc((void**)&gpu_node_weight_array,vertex_array_size));
checkCuda(hipMalloc((void**)&gpu_mask_array,vertex_array_size));
checkCuda(hipMalloc((void**)&gpu_edge_array,edge_array_size));
checkCuda(hipMalloc((void**)&gpu_edge_weight_array,edge_array_size));
//Copying memory from Host to Device
checkCuda(hipMemcpy(gpu_vertex_array,vertex_array,vertex_array_size,hipMemcpyHostToDevice));
checkCuda(hipMemcpy(gpu_vertex_array_copy,vertex_array_copy,vertex_array_size,hipMemcpyHostToDevice));
checkCuda(hipMemcpy(gpu_node_weight_array,node_weight_array,vertex_array_size,hipMemcpyHostToDevice));
checkCuda(hipMemcpy(gpu_mask_array,mask_array,vertex_array_size,hipMemcpyHostToDevice));
checkCuda(hipMemcpy(gpu_edge_array,edge_array,edge_array_size,hipMemcpyHostToDevice));
checkCuda(hipMemcpy(gpu_edge_weight_array,edge_weight_array,edge_array_size,hipMemcpyHostToDevice));
//Setting the Block and Grid Size
int blockSize, gridSize;
blockSize=1024;
gridSize = (int)ceil((float)vertices/blockSize); // Number of thread blocks in grid
printf("Beginning Optimized Djikstra Algorithm\n");
//Starting timer
float start_time;
TIMER_CREATE(start_time);
TIMER_START(start_time);
//Kernel call to initialize all the node weights. We provide the source node 0
Initializing<<<gridSize, blockSize>>>(gpu_node_weight_array,gpu_mask_array, 0);
hipError_t err = hipGetLastError();
/*
if (err != cudaSuccess) checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
{
printf("Error: %s\n", cudaGetErrorString(err));
}
*/
/*
//Can be uncommented to see the initial weights of each node
checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
printf("=== Initial node weight===\n");
for(i=0;i<vertices;i++)
{
printf("NW[%d]= %d\n ",i,node_weight_array[i]);
}
*/
//Variable min used to store the minimum node wieght of the relaxed nodes and use this node to relax all of its edges
int *min=(int*)malloc(2*sizeof(int));
min[0]=0;
min[1]=0;
//GPU variable to store min value
int *gpu_min;
checkCuda(hipMalloc((void**)&gpu_min,2*sizeof(int)));
//Begin the relax calls of the algorithm
while(min[0]<infinity)
{
min[0] = infinity;
checkCuda(hipMemcpy(gpu_min,min,sizeof(int),hipMemcpyHostToDevice));
Minimum<<<gridSize, blockSize>>>(gpu_mask_array,gpu_vertex_array,gpu_vertex_array_copy,gpu_node_weight_array,gpu_edge_array,gpu_edge_weight_array,gpu_min);
/*
if (err != cudaSuccess) checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
{
printf("Error: %s\n", cudaGetErrorString(err));
}
*/
Relax<<<gridSize, blockSize>>>(gpu_mask_array,gpu_node_weight_array,gpu_min);
/*
if (err != cudaSuccess) checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
{
printf("Error: %s\n", cudaGetErrorString(err));
}
*/
/*
//Can be uncommented to see the node weight and Dijkistra's Algorithm being performed ste by step
checkCuda(cudaMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,cudaMemcpyDeviceToHost));
for(i=0;i<vertices;i++)
{
printf("NW[%d]= %d\n ",i,node_weight_array[i]);
}
*/
checkCuda(hipMemcpy(min,gpu_min,2*sizeof(int),hipMemcpyDeviceToHost));
}
//End timer
TIMER_END(start_time);
printf("Kernel Execution Time: %f ms\n",start_time);
//Coppying the the final node weights from the Device to Host
checkCuda(hipMemcpy(node_weight_array,gpu_node_weight_array,vertex_array_size,hipMemcpyDeviceToHost));
/*
//Can be uncommented to see the final shortes distance of all node from Source Node
printf("=== Final node weight===\n");
for(i=0;i<vertices;i++)
{
printf("NW[%d]= %d\n ",i,node_weight_array[i]);
}
*/
hipFree(gpu_vertex_array);
hipFree(gpu_node_weight_array);
hipFree(gpu_edge_array);
hipFree(gpu_edge_weight_array);
hipFree(gpu_mask_array);
free(vertex_array);
free(node_weight_array);
free(edge_array);
free(edge_weight_array);
free(mask_array);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 0.1
#undef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#undef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void patchmatch2_conv_kernel( float *A, float *B, float *AP, float *BP, float *conv, int *prev_corrAB_upsampled, int patch, int s_rad, int c, int h, int w )
{
int h1 = h, h2 = h, w1 = w, w2 = w;
int _id = blockIdx.x * blockDim.x + threadIdx.x;
int size1 = h * w, size2 = h * w;
int s_size = 2 * s_rad + 1;
int s_n = s_size * s_size;
if (_id < size1 * s_n) {
conv[_id] = -1;
int id1 = _id / s_n, s_idx = _id % s_n;
int y1 = id1 / w1, x1 = id1 % w1;
int dy2 = s_idx / s_size - s_rad, dx2 = s_idx % s_size - s_rad;
int x2 = prev_corrAB_upsampled[2 * id1 + 0];
int y2 = prev_corrAB_upsampled[2 * id1 + 1];
int new_y2 = y2 + dy2;
int new_x2 = x2 + dx2;
if (!(new_x2 >= 0 && new_x2 < w2 && new_y2 >= 0 && new_y2 < h2)) {
return ;
}
// Improve by local searching
int kernel_radius = (patch - 1) / 2;
float conv_result = 0;
int cnt = 0;
for (int dy = -kernel_radius; dy <= kernel_radius; dy++) {
for (int dx = -kernel_radius; dx <= kernel_radius; dx++) {
int xx1 = x1 + dx, yy1 = y1 + dy;
int xx2 = new_x2 + dx, yy2 = new_y2 + dy;
if (0 <= xx1 && xx1 < w1 && 0 <= yy1 && yy1 < h1 &&
0 <= xx2 && xx2 < w2 && 0 <= yy2 && yy2 < h2)
{
int _id1 = yy1 * w1 + xx1, _id2 = yy2 * w2 + xx2;
for (int dc = 0; dc < c; dc++) {
float term1 = A[dc * size1 + _id1];
float term2 = B[dc * size2 + _id2];
conv_result += term1 * term2;
term1 = AP[dc * size1 + _id1];
term2 = BP[dc * size2 + _id2];
conv_result += term1 * term2;
}
cnt++;
}
}
}
conv[_id] = conv_result / cnt;
}
return ;
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 0.1
#undef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#undef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void patchmatch2_conv_kernel( float *A, float *B, float *AP, float *BP, float *conv, int *prev_corrAB_upsampled, int patch, int s_rad, int c, int h, int w )
{
int h1 = h, h2 = h, w1 = w, w2 = w;
int _id = blockIdx.x * blockDim.x + threadIdx.x;
int size1 = h * w, size2 = h * w;
int s_size = 2 * s_rad + 1;
int s_n = s_size * s_size;
if (_id < size1 * s_n) {
conv[_id] = -1;
int id1 = _id / s_n, s_idx = _id % s_n;
int y1 = id1 / w1, x1 = id1 % w1;
int dy2 = s_idx / s_size - s_rad, dx2 = s_idx % s_size - s_rad;
int x2 = prev_corrAB_upsampled[2 * id1 + 0];
int y2 = prev_corrAB_upsampled[2 * id1 + 1];
int new_y2 = y2 + dy2;
int new_x2 = x2 + dx2;
if (!(new_x2 >= 0 && new_x2 < w2 && new_y2 >= 0 && new_y2 < h2)) {
return ;
}
// Improve by local searching
int kernel_radius = (patch - 1) / 2;
float conv_result = 0;
int cnt = 0;
for (int dy = -kernel_radius; dy <= kernel_radius; dy++) {
for (int dx = -kernel_radius; dx <= kernel_radius; dx++) {
int xx1 = x1 + dx, yy1 = y1 + dy;
int xx2 = new_x2 + dx, yy2 = new_y2 + dy;
if (0 <= xx1 && xx1 < w1 && 0 <= yy1 && yy1 < h1 &&
0 <= xx2 && xx2 < w2 && 0 <= yy2 && yy2 < h2)
{
int _id1 = yy1 * w1 + xx1, _id2 = yy2 * w2 + xx2;
for (int dc = 0; dc < c; dc++) {
float term1 = A[dc * size1 + _id1];
float term2 = B[dc * size2 + _id2];
conv_result += term1 * term2;
term1 = AP[dc * size1 + _id1];
term2 = BP[dc * size2 + _id2];
conv_result += term1 * term2;
}
cnt++;
}
}
}
conv[_id] = conv_result / cnt;
}
return ;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
// by lectures and "CUDA by Example" book
#define ind(k, i, j, rows, cols) (k * (rows * cols) + i * cols + j)
// device code: matrices sum calculation
__global__ void sum_matrices_kernel(int* mat_stack, int* mat, int rows, int cols, int num) {
printf("blockId, threadId, dims: [%d, %d], [%d, %d], [%d, %d]\n",
blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x, rows, cols);
// row and col that correspond to current thread
// process all elements that correspond to current thread
for (int i = blockIdx.y * blockDim.y + threadIdx.y;
i < rows; i += blockDim.y * gridDim.y)
for (int j = blockIdx.x * blockDim.x + threadIdx.x;
j < cols; j += blockDim.x * gridDim.x) {
printf("blockId, threadId, pos: [%d, %d], [%d, %d], [%d, %d]\n",
blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x, i, j);
int mat_ind = ind(0, i, j, rows, cols);
mat[mat_ind] = 0;
// iterating over all elements on (i, j) position
for (int k = 0; k < num; k++) {
int stack_ind = ind(k, i, j, rows, cols);
mat[mat_ind] += mat_stack[stack_ind];
}
}
}
int* cuda_copy_tens(int* host_tensor, int rows, int cols, int num) {
int* dev_tensor;
// size of memory to allocate on device for tensor
long mem_size = rows * cols * num * sizeof(int);
// device memory allocation
cudaMalloc((void**) &dev_tensor, mem_size);
// copying data from host to device
cudaMemcpy(dev_tensor, host_tensor, mem_size, cudaMemcpyHostToDevice);
// returning pointer
return dev_tensor;
}
// host code: preparation
void sum_matrices_gpu(int* host_mat_stack, int* host_m, int rows, int cols, int num) {
// Step 1: moving data on device
int* dev_mat_stack = cuda_copy_tens(host_mat_stack, rows, cols, num);
int* dev_m = cuda_copy_tens(host_m, rows, cols, 1);
// Step 2
// grid (of blocks) dimensions
dim3 grid_dim(3, 2, 1);
// block (of threads) dimensions
dim3 block_dim(2, 2, 1);
// running kernel summation code
sum_matrices_kernel<<<grid_dim, block_dim>>>(dev_mat_stack, dev_m, rows, cols, num);
// Step 3
// copying result from device to host matrix
cudaMemcpy(host_m, dev_m, rows * cols * sizeof(int), cudaMemcpyDeviceToHost);
// freeing device memory
cudaFree(dev_mat_stack);
cudaFree(dev_m);
}
void sum_matrices_cpu(int* mat_stack, int* mat, int rows, int cols, int num) {
for (int i = 0; i < rows; i++)
for (int j = 0; j < cols; j++) {
int mat_ind = ind(0, i, j, rows, cols);
mat[mat_ind] = 0;
for (int k = 0; k < num; k++) {
int stack_ind = ind(k, i, j, rows, cols);
mat[mat_ind] += mat_stack[stack_ind];
}
}
}
// initialize matrix stack
int* init_mat_stack(int* mat_stack, int rows, int cols, int num) {
for (int k = 0; k < num; k++)
for (int i = 0; i < rows; i++)
for (int j = 0; j < cols; j++) {
int index = ind(k, i, j, rows, cols);
int rel_index = ind(0, i, j, rows, cols);
mat_stack[index] = (k + 1) * (rel_index + 1);
}
return mat_stack;
}
// print matrix
void print_mat(const char* header, int* mat, int rows, int cols) {
printf("%s (%d, %d):\n", header, rows, cols);
for (int i = 0; i < rows; i++)
for (int j = 0; j < cols; j++) {
int index = ind(0, i, j, rows, cols);
printf("\t%d ", mat[index]);
if (j == cols - 1)
printf("\n");
}
}
void print_mat_stack(int* mat_stack, int rows, int cols, int num) {
printf("Matrix stack (%d, %d) x %d:\n", rows, cols, num);
for (int k = 0; k < num; k++) {
char *header = (char*) malloc(256 * sizeof(char));
sprintf(header, "Matrix #%d", k + 1);
int* matrix_offset = mat_stack + k * (rows * cols) * sizeof(char);
print_mat(header, matrix_offset, rows, cols);
}
}
int main() {
// matrix params
int rows = 6;
int cols = 8;
int num = 3;
// first matrix
int* host_mat_stack = (int*) malloc(rows * cols * num * sizeof(int));
init_mat_stack(host_mat_stack, rows, cols, num);
print_mat_stack(host_mat_stack, rows, cols, num);
// result matrix
int* host_m = (int*) malloc(rows * cols * sizeof(int));
print_mat("Result matrix", host_m, rows, cols);
// summation on device
sum_matrices_gpu(host_mat_stack, host_m, rows, cols, num);
// showing result
print_mat("Result matrix", host_m, rows, cols);
// summation on host
sum_matrices_cpu(host_mat_stack, host_m, rows, cols, num);
// showing result
print_mat("Result matrix", host_m, rows, cols);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
// by lectures and "CUDA by Example" book
#define ind(k, i, j, rows, cols) (k * (rows * cols) + i * cols + j)
// device code: matrices sum calculation
__global__ void sum_matrices_kernel(int* mat_stack, int* mat, int rows, int cols, int num) {
printf("blockId, threadId, dims: [%d, %d], [%d, %d], [%d, %d]\n",
blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x, rows, cols);
// row and col that correspond to current thread
// process all elements that correspond to current thread
for (int i = blockIdx.y * blockDim.y + threadIdx.y;
i < rows; i += blockDim.y * gridDim.y)
for (int j = blockIdx.x * blockDim.x + threadIdx.x;
j < cols; j += blockDim.x * gridDim.x) {
printf("blockId, threadId, pos: [%d, %d], [%d, %d], [%d, %d]\n",
blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x, i, j);
int mat_ind = ind(0, i, j, rows, cols);
mat[mat_ind] = 0;
// iterating over all elements on (i, j) position
for (int k = 0; k < num; k++) {
int stack_ind = ind(k, i, j, rows, cols);
mat[mat_ind] += mat_stack[stack_ind];
}
}
}
int* cuda_copy_tens(int* host_tensor, int rows, int cols, int num) {
int* dev_tensor;
// size of memory to allocate on device for tensor
long mem_size = rows * cols * num * sizeof(int);
// device memory allocation
hipMalloc((void**) &dev_tensor, mem_size);
// copying data from host to device
hipMemcpy(dev_tensor, host_tensor, mem_size, hipMemcpyHostToDevice);
// returning pointer
return dev_tensor;
}
// host code: preparation
void sum_matrices_gpu(int* host_mat_stack, int* host_m, int rows, int cols, int num) {
// Step 1: moving data on device
int* dev_mat_stack = cuda_copy_tens(host_mat_stack, rows, cols, num);
int* dev_m = cuda_copy_tens(host_m, rows, cols, 1);
// Step 2
// grid (of blocks) dimensions
dim3 grid_dim(3, 2, 1);
// block (of threads) dimensions
dim3 block_dim(2, 2, 1);
// running kernel summation code
sum_matrices_kernel<<<grid_dim, block_dim>>>(dev_mat_stack, dev_m, rows, cols, num);
// Step 3
// copying result from device to host matrix
hipMemcpy(host_m, dev_m, rows * cols * sizeof(int), hipMemcpyDeviceToHost);
// freeing device memory
hipFree(dev_mat_stack);
hipFree(dev_m);
}
void sum_matrices_cpu(int* mat_stack, int* mat, int rows, int cols, int num) {
for (int i = 0; i < rows; i++)
for (int j = 0; j < cols; j++) {
int mat_ind = ind(0, i, j, rows, cols);
mat[mat_ind] = 0;
for (int k = 0; k < num; k++) {
int stack_ind = ind(k, i, j, rows, cols);
mat[mat_ind] += mat_stack[stack_ind];
}
}
}
// initialize matrix stack
int* init_mat_stack(int* mat_stack, int rows, int cols, int num) {
for (int k = 0; k < num; k++)
for (int i = 0; i < rows; i++)
for (int j = 0; j < cols; j++) {
int index = ind(k, i, j, rows, cols);
int rel_index = ind(0, i, j, rows, cols);
mat_stack[index] = (k + 1) * (rel_index + 1);
}
return mat_stack;
}
// print matrix
void print_mat(const char* header, int* mat, int rows, int cols) {
printf("%s (%d, %d):\n", header, rows, cols);
for (int i = 0; i < rows; i++)
for (int j = 0; j < cols; j++) {
int index = ind(0, i, j, rows, cols);
printf("\t%d ", mat[index]);
if (j == cols - 1)
printf("\n");
}
}
void print_mat_stack(int* mat_stack, int rows, int cols, int num) {
printf("Matrix stack (%d, %d) x %d:\n", rows, cols, num);
for (int k = 0; k < num; k++) {
char *header = (char*) malloc(256 * sizeof(char));
sprintf(header, "Matrix #%d", k + 1);
int* matrix_offset = mat_stack + k * (rows * cols) * sizeof(char);
print_mat(header, matrix_offset, rows, cols);
}
}
int main() {
// matrix params
int rows = 6;
int cols = 8;
int num = 3;
// first matrix
int* host_mat_stack = (int*) malloc(rows * cols * num * sizeof(int));
init_mat_stack(host_mat_stack, rows, cols, num);
print_mat_stack(host_mat_stack, rows, cols, num);
// result matrix
int* host_m = (int*) malloc(rows * cols * sizeof(int));
print_mat("Result matrix", host_m, rows, cols);
// summation on device
sum_matrices_gpu(host_mat_stack, host_m, rows, cols, num);
// showing result
print_mat("Result matrix", host_m, rows, cols);
// summation on host
sum_matrices_cpu(host_mat_stack, host_m, rows, cols, num);
// showing result
print_mat("Result matrix", host_m, rows, cols);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdint.h>
#include<stdio.h>
__global__ void convolution_kernel(const uint8_t *d_source, uint8_t *d_target,
const int width, const int height,
const float *d_stancil,
const int st_width,
const int st_height)
{
//boundaries checking for width
if ((blockDim.x * blockIdx.x)+ threadIdx.x > (width-1))
{
return;
}
//
if ((blockDim.y * blockIdx.y)+ threadIdx.y > (height-1))
{
return;
}
int x,y,localX,localY,final_idx,pixX,pixY, st_idx;
//lets compute the coordinate of the pixel we are computing
pixX = (blockIdx.x*blockDim.x) + threadIdx.x;
pixY = (blockIdx.y*blockDim.y) + threadIdx.y;
int idx = ((pixY *width) + (pixX)) *3;
//computing the center of the filter
int center_x = (int)(st_width/2.0);
int center_y = (int)(st_height/2.0);
//allocating/initializing color variables
float colorR = 0,colorG = 0,colorB = 0;
//looping the height of the filter
for (y=0; y<st_height; ++y)
{
localY = y - center_y;
//looping the weidth of the filter
for (x=0;x<st_width; ++x)
{
//lets compute where in the filter we are, computiing local
//coordinate from the center
localX = x - center_x;
//boundary check
if (( (localX + pixX) >= 0 && ((localX+pixX) < width)) &&
(localY+pixY >= 0 && ((localY+pixY) < height)))
{
//compute the final pixel to sample taking in to account
//the offset of the filter
final_idx = idx + ((localX*3) + (localY*width*3));
//compute the filter index buffer
st_idx = x+ (y*st_width);
colorR += float(d_source[final_idx])*d_stancil[st_idx];
colorG += float(d_source[final_idx+1])*d_stancil[st_idx];
colorB += float(d_source[final_idx+2])*d_stancil[st_idx];
}//end of stencil boundary checking
}//end of looping filter width
}//end of looping filter height
//setting the color to final buffer
d_target[idx] = (uint8_t)min(255.0f,max(0.0f,colorR));
d_target[idx+1] = (uint8_t)min(255.0f,max(0.0f,colorG));
d_target[idx+2] = (uint8_t)min(255.0f,max(0.0f,colorB));
}
void run_convolution_kernel( uint8_t *d_source, uint8_t *d_target,
const size_t width, const size_t height,
const float *d_stancil,
const size_t st_width,
const size_t st_height)
{
const int grainSize=16;
int width_blocks,width_height;
//computing the block size
width_blocks = ((width%grainSize) != 0)?(width/grainSize) +1: (width/grainSize);
width_height = ((height%grainSize) != 0)?(height/grainSize) +1: (height/grainSize);
//setupping the block and grids
const dim3 blockSize( grainSize, grainSize , 1);
const dim3 gridSize( width_blocks, width_height, 1);
//calling the actual kernel
convolution_kernel<<<gridSize, blockSize>>>(d_source,
d_target,
width,
height,
d_stancil,
st_width,
st_height);
//sincronizing device
cudaDeviceSynchronize();
//checking for error
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
}
|
#include "hip/hip_runtime.h"
#include <stdint.h>
#include<stdio.h>
__global__ void convolution_kernel(const uint8_t *d_source, uint8_t *d_target,
const int width, const int height,
const float *d_stancil,
const int st_width,
const int st_height)
{
//boundaries checking for width
if ((blockDim.x * blockIdx.x)+ threadIdx.x > (width-1))
{
return;
}
//
if ((blockDim.y * blockIdx.y)+ threadIdx.y > (height-1))
{
return;
}
int x,y,localX,localY,final_idx,pixX,pixY, st_idx;
//lets compute the coordinate of the pixel we are computing
pixX = (blockIdx.x*blockDim.x) + threadIdx.x;
pixY = (blockIdx.y*blockDim.y) + threadIdx.y;
int idx = ((pixY *width) + (pixX)) *3;
//computing the center of the filter
int center_x = (int)(st_width/2.0);
int center_y = (int)(st_height/2.0);
//allocating/initializing color variables
float colorR = 0,colorG = 0,colorB = 0;
//looping the height of the filter
for (y=0; y<st_height; ++y)
{
localY = y - center_y;
//looping the weidth of the filter
for (x=0;x<st_width; ++x)
{
//lets compute where in the filter we are, computiing local
//coordinate from the center
localX = x - center_x;
//boundary check
if (( (localX + pixX) >= 0 && ((localX+pixX) < width)) &&
(localY+pixY >= 0 && ((localY+pixY) < height)))
{
//compute the final pixel to sample taking in to account
//the offset of the filter
final_idx = idx + ((localX*3) + (localY*width*3));
//compute the filter index buffer
st_idx = x+ (y*st_width);
colorR += float(d_source[final_idx])*d_stancil[st_idx];
colorG += float(d_source[final_idx+1])*d_stancil[st_idx];
colorB += float(d_source[final_idx+2])*d_stancil[st_idx];
}//end of stencil boundary checking
}//end of looping filter width
}//end of looping filter height
//setting the color to final buffer
d_target[idx] = (uint8_t)min(255.0f,max(0.0f,colorR));
d_target[idx+1] = (uint8_t)min(255.0f,max(0.0f,colorG));
d_target[idx+2] = (uint8_t)min(255.0f,max(0.0f,colorB));
}
void run_convolution_kernel( uint8_t *d_source, uint8_t *d_target,
const size_t width, const size_t height,
const float *d_stancil,
const size_t st_width,
const size_t st_height)
{
const int grainSize=16;
int width_blocks,width_height;
//computing the block size
width_blocks = ((width%grainSize) != 0)?(width/grainSize) +1: (width/grainSize);
width_height = ((height%grainSize) != 0)?(height/grainSize) +1: (height/grainSize);
//setupping the block and grids
const dim3 blockSize( grainSize, grainSize , 1);
const dim3 gridSize( width_blocks, width_height, 1);
//calling the actual kernel
convolution_kernel<<<gridSize, blockSize>>>(d_source,
d_target,
width,
height,
d_stancil,
st_width,
st_height);
//sincronizing device
hipDeviceSynchronize();
//checking for error
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is being provided
* under the terms and conditions of a Source Code License Agreement.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/*
*
* -- LAPACK auxiliary routine (version 3.2) --
* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd..
* November 2006
*
* .. Scalar Arguments ..
INTEGER INCX, K1, K2, LDA, N
* ..
* .. Array Arguments ..
INTEGER IPIV( * )
DOUBLE PRECISION A( LDA, * )
* ..
*
* Purpose
* =======
*
* DLASWP performs a series of row interchanges on the matrix A.
* One row interchange is initiated for each of rows K1 through K2 of A.
*
* Arguments
* =========
*
* N (input) INTEGER
* The number of columns of the matrix A.
*
* A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
* On entry, the matrix of column dimension N to which the row
* interchanges will be applied.
* On exit, the permuted matrix.
*
* LDA (input) INTEGER
* The leading dimension of the array A.
* K1 (input) INTEGER
* The first element of IPIV for which a row interchange will
* be done.
*
* K2 (input) INTEGER
* The last element of IPIV for which a row interchange will
* be done.
*
* IPIV (input) INTEGER array, dimension (K2*abs(INCX))
* The vector of pivot indices. Only the elements in positions
* K1 through K2 of IPIV are accessed.
* IPIV(K) = L implies rows K and L are to be interchanged.
*
* INCX (input) INTEGER
* The increment between successive values of IPIV. If IPIV
* is negative, the pivots are applied in reverse order.
*
*/
#define DLASWP_BLOCK_SIZE 256
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
// based on swap_rows - in just folds in the loop from the cpu,
// so there is no going in and out of the gpu
__global__ void dlaswp(int n, double *A, int lda, int *ipiv, int k1, int k2)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
for (; k1 < k2 ; ++k1)
{
int src_row = k1;
int dst_row = ipiv[k1];
for (int col_id = threadIdx.x ; col_id < n ; col_id += blockDim.x)
{
double A_tmp = A[col_id*lda + src_row];
A[col_id*lda + src_row] = A[col_id*lda + dst_row];
A[col_id*lda + dst_row] = A_tmp;
cg::sync(cta);
}
// TODO: we have very poor coalescing here. Can't we do better? Launch one warp of threads per column and
// ask those threads to reorder the column, for example.
}
}
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is being provided
* under the terms and conditions of a Source Code License Agreement.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/*
*
* -- LAPACK auxiliary routine (version 3.2) --
* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd..
* November 2006
*
* .. Scalar Arguments ..
INTEGER INCX, K1, K2, LDA, N
* ..
* .. Array Arguments ..
INTEGER IPIV( * )
DOUBLE PRECISION A( LDA, * )
* ..
*
* Purpose
* =======
*
* DLASWP performs a series of row interchanges on the matrix A.
* One row interchange is initiated for each of rows K1 through K2 of A.
*
* Arguments
* =========
*
* N (input) INTEGER
* The number of columns of the matrix A.
*
* A (input/output) DOUBLE PRECISION array, dimension (LDA,N)
* On entry, the matrix of column dimension N to which the row
* interchanges will be applied.
* On exit, the permuted matrix.
*
* LDA (input) INTEGER
* The leading dimension of the array A.
* K1 (input) INTEGER
* The first element of IPIV for which a row interchange will
* be done.
*
* K2 (input) INTEGER
* The last element of IPIV for which a row interchange will
* be done.
*
* IPIV (input) INTEGER array, dimension (K2*abs(INCX))
* The vector of pivot indices. Only the elements in positions
* K1 through K2 of IPIV are accessed.
* IPIV(K) = L implies rows K and L are to be interchanged.
*
* INCX (input) INTEGER
* The increment between successive values of IPIV. If IPIV
* is negative, the pivots are applied in reverse order.
*
*/
#define DLASWP_BLOCK_SIZE 256
#include <hip/hip_runtime.h>
#include <hip/hip_cooperative_groups.h>
namespace cg = cooperative_groups;
// based on swap_rows - in just folds in the loop from the cpu,
// so there is no going in and out of the gpu
__global__ void dlaswp(int n, double *A, int lda, int *ipiv, int k1, int k2)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
for (; k1 < k2 ; ++k1)
{
int src_row = k1;
int dst_row = ipiv[k1];
for (int col_id = threadIdx.x ; col_id < n ; col_id += blockDim.x)
{
double A_tmp = A[col_id*lda + src_row];
A[col_id*lda + src_row] = A[col_id*lda + dst_row];
A[col_id*lda + dst_row] = A_tmp;
cg::sync(cta);
}
// TODO: we have very poor coalescing here. Can't we do better? Launch one warp of threads per column and
// ask those threads to reorder the column, for example.
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<stdio.h>
#include<cuda.h>
__global__ void add(int *a,int *b,int *c)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<5)
c[id] = a[id] + b[id];
}
int main()
{
const int arraySize = 5;
float avg,sd;
int d[arraySize];
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
int *d_a,*d_b,*d_c,*d_d,i;
int size=sizeof(int)*arraySize;
cudaMalloc((void **)&d_a,size);
cudaMalloc((void **)&d_b,size);
cudaMalloc((void **)&d_c,size);
cudaMalloc((void **)&d_d,size);
int blocksize=1024;
int threadsize=(int) ceil ((float)arraySize/blocksize);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
add<<<blocksize,threadsize>>>(d_a,d_b,d_c);
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
for(i=0;i<arraySize;i++)
{
printf("%d\t",c[i]);
}
int sum=0;
for(i=0;i<arraySize;i++)
{
sum=sum+c[i];
}
printf("Sum=%d\n",sum);
avg=sum/arraySize;
printf("Avg=%f\n",avg);
for(int i=0;i<arraySize;i++)
{
d[i]=c[i]-avg;
d[i]=d[i]*d[i];
sum=sum+d[i];
}
sum=sum/arraySize;
sd=sqrt(sum);
printf("sd=%f\n",sd);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_d);
return 0;
}
/*OUTPUT
cpllab00@cpllab:~$ nvcc stdev.cu
cpllab00@cpllab:~$ ./a.out
11 22 33 44 55 Sum=165
Avg=33.000000
sd=16.583124
*/
|
#include<stdio.h>
#include<hip/hip_runtime.h>
__global__ void add(int *a,int *b,int *c)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<5)
c[id] = a[id] + b[id];
}
int main()
{
const int arraySize = 5;
float avg,sd;
int d[arraySize];
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
int *d_a,*d_b,*d_c,*d_d,i;
int size=sizeof(int)*arraySize;
hipMalloc((void **)&d_a,size);
hipMalloc((void **)&d_b,size);
hipMalloc((void **)&d_c,size);
hipMalloc((void **)&d_d,size);
int blocksize=1024;
int threadsize=(int) ceil ((float)arraySize/blocksize);
hipMemcpy(d_a,a,size,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,size,hipMemcpyHostToDevice);
add<<<blocksize,threadsize>>>(d_a,d_b,d_c);
hipMemcpy(c,d_c,size,hipMemcpyDeviceToHost);
for(i=0;i<arraySize;i++)
{
printf("%d\t",c[i]);
}
int sum=0;
for(i=0;i<arraySize;i++)
{
sum=sum+c[i];
}
printf("Sum=%d\n",sum);
avg=sum/arraySize;
printf("Avg=%f\n",avg);
for(int i=0;i<arraySize;i++)
{
d[i]=c[i]-avg;
d[i]=d[i]*d[i];
sum=sum+d[i];
}
sum=sum/arraySize;
sd=sqrt(sum);
printf("sd=%f\n",sd);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_d);
return 0;
}
/*OUTPUT
cpllab00@cpllab:~$ nvcc stdev.cu
cpllab00@cpllab:~$ ./a.out
11 22 33 44 55 Sum=165
Avg=33.000000
sd=16.583124
*/
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include<thrust/transform.h>
#include<thrust/functional.h>
#include <iostream>
struct saxpi{
float k1;
float k2;
saxpi(float k1_, float k2_) :k1(k1_), k2(k2_)
{}
__host__ __device__ float operator()(float &x)const{
return x*k1 + k2;
}
};
int main()
{
thrust::host_vector<int> h_vec;
h_vec.push_back(10);
thrust::device_vector<int> d_vec;
d_vec = h_vec;
d_vec.push_back(1000);
thrust::host_vector<int> h_vec1 = d_vec;
std::cout << h_vec1[0] << "," << h_vec1[1] << std::endl;
thrust::host_vector<float> V;
for (int i = 0; i < 20; i++)
{
V.push_back(float(i));
}
saxpi f(2, 5);
thrust::device_vector<float> D = V;
thrust::device_vector<float> vH(V.size());
thrust::transform(D.begin(), D.end(), vH.begin(), f);
for (int i = 0; i < vH.size(); i++)
{
std::cout << vH[i] << std::endl;
}
}
|
#include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include<thrust/transform.h>
#include<thrust/functional.h>
#include <iostream>
struct saxpi{
float k1;
float k2;
saxpi(float k1_, float k2_) :k1(k1_), k2(k2_)
{}
__host__ __device__ float operator()(float &x)const{
return x*k1 + k2;
}
};
int main()
{
thrust::host_vector<int> h_vec;
h_vec.push_back(10);
thrust::device_vector<int> d_vec;
d_vec = h_vec;
d_vec.push_back(1000);
thrust::host_vector<int> h_vec1 = d_vec;
std::cout << h_vec1[0] << "," << h_vec1[1] << std::endl;
thrust::host_vector<float> V;
for (int i = 0; i < 20; i++)
{
V.push_back(float(i));
}
saxpi f(2, 5);
thrust::device_vector<float> D = V;
thrust::device_vector<float> vH(V.size());
thrust::transform(D.begin(), D.end(), vH.begin(), f);
for (int i = 0; i < vH.size(); i++)
{
std::cout << vH[i] << std::endl;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void computePathStates(int noPaths, int noDims, int nYears, int noControls, int year, float unitCost, float unitRevenue, int* controls, int noFuels, float *fuelCosts, float *uResults, float *uComposition, int noUncertainties, int *fuelIdx, int noCommodities, float* aars, float* totalPops, float* xin, int* currControls) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < noPaths) {
// 1. Adjusted population for each species
// We only take the highest flow's adjusted population as this is a
// measure of how damaging the road is. If we instead used the aar of
// the random control selected, we would get overlaps in the optimal
// control map.
for (int ii = 0; ii < noDims-1; ii++) {
// xin[idx*noDims + ii] = totalPops[idx*(noDims-1)*(nYears+1) + year*
// (noDims-1) + ii]*aars[idx*(nYears+1)*noControls*(noDims-1)
// + year*noControls*(noDims-1) + ii*noControls + controls[
// idx*nYears + year]];
xin[idx*noDims + ii] = totalPops[idx*(noDims-1)*(nYears+1) + year*
(noDims-1) + ii]*aars[idx*(nYears+1)*noControls*(noDims-1)
+ year*noControls*(noDims-1) + ii*noControls + (noControls
- 1)];
}
// 2. Unit profit
float unitFuel = 0.0;
float orePrice = 0.0;
// Compute the unit fuel cost component
for (int ii = 0; ii < noFuels; ii++) {
unitFuel += fuelCosts[ii]*uResults[idx*(nYears+1)*noUncertainties +
(year)*noUncertainties + fuelIdx[ii]];
}
// Compute the unit revenue from ore
for (int ii = 0; ii < noCommodities; ii++) {
orePrice += uComposition[idx*nYears*noCommodities + (year)*
noCommodities + ii]*uResults[idx*(nYears+1)*noUncertainties +
(year)*noUncertainties + noFuels + ii];
}
xin[idx*noDims + noDims-1] = unitCost + unitFuel - unitRevenue*
orePrice;
currControls[idx] = controls[idx*nYears + year];
// printf("%f %f\n",unitFuel,orePrice);
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void computePathStates(int noPaths, int noDims, int nYears, int noControls, int year, float unitCost, float unitRevenue, int* controls, int noFuels, float *fuelCosts, float *uResults, float *uComposition, int noUncertainties, int *fuelIdx, int noCommodities, float* aars, float* totalPops, float* xin, int* currControls) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < noPaths) {
// 1. Adjusted population for each species
// We only take the highest flow's adjusted population as this is a
// measure of how damaging the road is. If we instead used the aar of
// the random control selected, we would get overlaps in the optimal
// control map.
for (int ii = 0; ii < noDims-1; ii++) {
// xin[idx*noDims + ii] = totalPops[idx*(noDims-1)*(nYears+1) + year*
// (noDims-1) + ii]*aars[idx*(nYears+1)*noControls*(noDims-1)
// + year*noControls*(noDims-1) + ii*noControls + controls[
// idx*nYears + year]];
xin[idx*noDims + ii] = totalPops[idx*(noDims-1)*(nYears+1) + year*
(noDims-1) + ii]*aars[idx*(nYears+1)*noControls*(noDims-1)
+ year*noControls*(noDims-1) + ii*noControls + (noControls
- 1)];
}
// 2. Unit profit
float unitFuel = 0.0;
float orePrice = 0.0;
// Compute the unit fuel cost component
for (int ii = 0; ii < noFuels; ii++) {
unitFuel += fuelCosts[ii]*uResults[idx*(nYears+1)*noUncertainties +
(year)*noUncertainties + fuelIdx[ii]];
}
// Compute the unit revenue from ore
for (int ii = 0; ii < noCommodities; ii++) {
orePrice += uComposition[idx*nYears*noCommodities + (year)*
noCommodities + ii]*uResults[idx*(nYears+1)*noUncertainties +
(year)*noUncertainties + noFuels + ii];
}
xin[idx*noDims + noDims-1] = unitCost + unitFuel - unitRevenue*
orePrice;
currControls[idx] = controls[idx*nYears + year];
// printf("%f %f\n",unitFuel,orePrice);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.