problem
stringlengths 66
76k
| answer
stringlengths 29
75.8k
|
|---|---|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#define N 1000000
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c){
//总线程id = 当前块线程id.x + 块id*块维度x
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N)
{
d_c[tid] = d_a[tid] + d_b[tid];// 加法
tid += blockDim.x * gridDim.x; // 一次执行一个格子 块维度x*格子维度x
}
}
int main(){
int *h_a, *h_b, *h_c;
int *d_a0, *d_b0, *d_c0;
int *d_a1, *d_b1, *d_c1;
int sizeByte = N*sizeof(int)*2;
//create two streams
cudaStream_t stream0, stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//Use cudaHostMalloc to allocate page-locked memory
cudaHostAlloc(&h_a, sizeByte, cudaHostAllocDefault);
cudaHostAlloc(&h_b, sizeByte, cudaHostAllocDefault);
cudaHostAlloc(&h_c, sizeByte, cudaHostAllocDefault);
//same as single stream, just add stream parameter when launching kernel
for(int i=0;i<N*2;i++){
h_a[i] = i;
h_b[i] = i;
}
cudaMalloc(&d_a0,sizeByte/2);
cudaMalloc(&d_b0,sizeByte/2);
cudaMalloc(&d_c0,sizeByte/2);
cudaMalloc(&d_a1,sizeByte/2);
cudaMalloc(&d_b1,sizeByte/2);
cudaMalloc(&d_c1,sizeByte/2);
cudaMemcpyAsync(d_a0, h_a, sizeByte/2, cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_a1, h_a + N, sizeByte/2, cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(d_b0, h_b, sizeByte/2, cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_b1, h_b + N, sizeByte/2, cudaMemcpyHostToDevice, stream1);
gpuAdd<<<512,512,0,stream0>>>(d_a0,d_b0,d_c0);
gpuAdd<<<512,512,0,stream1>>>(d_a1,d_b1,d_c1);
cudaMemcpyAsync(h_c, d_c0, sizeByte/2, cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(h_c+N, d_c1, sizeByte/2, cudaMemcpyDeviceToHost, stream1);
//only synchronize on cpu/host
cudaDeviceSynchronize();
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float time = 0;
cudaEventElapsedTime(&time, start, stop);
printf("Time consumption: %lf\n", time);
cudaEventDestroy(stop);
cudaEventDestroy(start);
int Correct = 1;
int wrongIndex = -1;
printf("Vector addition on GPU \n");
//Printing result on console
for (int i = 0; i < 2*N; i++)
{
if ((h_a[i] + h_b[i] != h_c[i]))
{
Correct = 0;
wrongIndex = i;
break;
}
}
if (Correct == 1)
{
printf("GPU has computed Sum Correctly\n");
}
else
{
printf("There is an Error in GPU Computation, at index %d, CPU(%d)!=GPU(%d)\n", wrongIndex, (h_a[wrongIndex] + h_b[wrongIndex]), h_c[wrongIndex]);
}
// 清空GPU内存
cudaFree(d_a0);
cudaFree(d_b0);
cudaFree(d_c0);
cudaFree(d_a0);
cudaFree(d_b0);
cudaFree(d_c0);
// 清空cuda分配的cpu内存
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#define N 1000000
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c){
//总线程id = 当前块线程id.x + 块id*块维度x
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N)
{
d_c[tid] = d_a[tid] + d_b[tid];// 加法
tid += blockDim.x * gridDim.x; // 一次执行一个格子 块维度x*格子维度x
}
}
int main(){
int *h_a, *h_b, *h_c;
int *d_a0, *d_b0, *d_c0;
int *d_a1, *d_b1, *d_c1;
int sizeByte = N*sizeof(int)*2;
//create two streams
hipStream_t stream0, stream1;
hipStreamCreate(&stream0);
hipStreamCreate(&stream1);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
//Use cudaHostMalloc to allocate page-locked memory
hipHostAlloc(&h_a, sizeByte, hipHostMallocDefault);
hipHostAlloc(&h_b, sizeByte, hipHostMallocDefault);
hipHostAlloc(&h_c, sizeByte, hipHostMallocDefault);
//same as single stream, just add stream parameter when launching kernel
for(int i=0;i<N*2;i++){
h_a[i] = i;
h_b[i] = i;
}
hipMalloc(&d_a0,sizeByte/2);
hipMalloc(&d_b0,sizeByte/2);
hipMalloc(&d_c0,sizeByte/2);
hipMalloc(&d_a1,sizeByte/2);
hipMalloc(&d_b1,sizeByte/2);
hipMalloc(&d_c1,sizeByte/2);
hipMemcpyAsync(d_a0, h_a, sizeByte/2, hipMemcpyHostToDevice, stream0);
hipMemcpyAsync(d_a1, h_a + N, sizeByte/2, hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(d_b0, h_b, sizeByte/2, hipMemcpyHostToDevice, stream0);
hipMemcpyAsync(d_b1, h_b + N, sizeByte/2, hipMemcpyHostToDevice, stream1);
gpuAdd<<<512,512,0,stream0>>>(d_a0,d_b0,d_c0);
gpuAdd<<<512,512,0,stream1>>>(d_a1,d_b1,d_c1);
hipMemcpyAsync(h_c, d_c0, sizeByte/2, hipMemcpyDeviceToHost, stream0);
hipMemcpyAsync(h_c+N, d_c1, sizeByte/2, hipMemcpyDeviceToHost, stream1);
//only synchronize on cpu/host
hipDeviceSynchronize();
hipStreamSynchronize(stream0);
hipStreamSynchronize(stream1);
hipEventRecord(stop);
hipEventSynchronize(stop);
float time = 0;
hipEventElapsedTime(&time, start, stop);
printf("Time consumption: %lf\n", time);
hipEventDestroy(stop);
hipEventDestroy(start);
int Correct = 1;
int wrongIndex = -1;
printf("Vector addition on GPU \n");
//Printing result on console
for (int i = 0; i < 2*N; i++)
{
if ((h_a[i] + h_b[i] != h_c[i]))
{
Correct = 0;
wrongIndex = i;
break;
}
}
if (Correct == 1)
{
printf("GPU has computed Sum Correctly\n");
}
else
{
printf("There is an Error in GPU Computation, at index %d, CPU(%d)!=GPU(%d)\n", wrongIndex, (h_a[wrongIndex] + h_b[wrongIndex]), h_c[wrongIndex]);
}
// 清空GPU内存
hipFree(d_a0);
hipFree(d_b0);
hipFree(d_c0);
hipFree(d_a0);
hipFree(d_b0);
hipFree(d_c0);
// 清空cuda分配的cpu内存
hipHostFree(h_a);
hipHostFree(h_b);
hipHostFree(h_c);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <climits>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include <chrono>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Struct containing edge outputs computed in parallel
// One produced per thread, aggregated after GPU computation
struct EDGE_OUTPUT {
// 3-node graphlet counts
unsigned long long g31;
unsigned long long g32;
unsigned long long g33;
// 4-node cliques and cycles
unsigned long long g41;
unsigned long long g44;
// Unrestricted counts for 4-node connected graphlets
unsigned long long T_T;
unsigned long long Su_Sv;
unsigned long long T_SuVSv;
unsigned long long S_S;
// Unrestricted counts for 4-node disconnected graphlets
unsigned long long T_I;
unsigned long long SuVSv_I;
unsigned long long I_I;
unsigned long long I_I_1;
};
struct GRAPHLET_COUNTS {
// 3-node graphlet counts
unsigned long long g31;
unsigned long long g32;
unsigned long long g33;
unsigned long long g34;
// 4-node connected graphlet counts
unsigned long long g41;
unsigned long long g42;
unsigned long long g43;
unsigned long long g44;
unsigned long long g45;
unsigned long long g46;
// 4-node disconnected graphlet counts
unsigned long long g47;
unsigned long long g48;
unsigned long long g49;
unsigned long long g410;
unsigned long long g411;
};
// CUDA kernel used to count graphlets of size k=4
// Each thread processes a single edge
__global__
void graphlets(int* V, unsigned long long V_num, int* E, unsigned long long E_num, int* E_u, int* E_v, EDGE_OUTPUT* outputs)
{
// Calculate global thread index in 1D grid of 1D blocks
// Used as the undirected edge number to compute
int edge = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
// Return immediately if thread index is greater than maximum edge index
if (edge >= E_num) return;
// Using thread number, look up directed edge number in array of undirected edges
int ei = E[edge];
// Lookup the endpoints of the current edge
// TODO: Dynamically choose u to be the node with smallest neighborhood
int u = E_u[ei];
int v = E_v[ei];
// Array length is 1 less because v is omitted
int arr_len = V[u + 1] - V[u] - 1;
// Array holding current index of edges in E_v of each of u's neighbors
int* inds = new int[arr_len];
// Array holding maximum index of edges in E_v of each of u's neighbors
int* ends = new int[arr_len];
// Array indicating whether each of u's neighbors neighbors v
bool* neighbors_v = new bool[arr_len];
// To count graphlets, nodes are advanced in ascending order concurrently across u, v,
// and all of u's neighbors. Through this walk, counts can be gathered by checking
// which nodes have identical neighbors after each step
int tri_e = 0, star_u = 0, star_v = 0;
int iu = V[u], iv = V[v], arr_i = 0;
while (iu < V[u + 1] || iv < V[v + 1]) {
int cu = iu < V[u + 1] ? E_v[iu] : INT_MAX;
int cv = iv < V[v + 1] ? E_v[iv] : INT_MAX;
if (cu < cv) {
if (cu != v) {
// A star with u is found when the current node in the walk is a neighbor of u
// but not a neighbor of v
star_u++;
inds[arr_i] = V[cu];
ends[arr_i] = V[cu + 1];
neighbors_v[arr_i] = false;
arr_i++;
}
iu++;
}
else if (cv < cu) {
if (cv != u) {
// A star with v is found when the current node in the walk is a neighbor of v
// but not a neighbor of u
star_v++;
}
iv++;
}
else {
// A triangle is found when the current node in the walk is both a neighbor of
// u and a neighbor of v
tri_e++;
inds[arr_i] = V[cu];
ends[arr_i] = V[cu + 1];
neighbors_v[arr_i] = true;
arr_i++;
iu++;
iv++;
}
}
int cliq_e = 0, cyc_e = 0;
iu = V[u];
iv = V[v];
while (iu < V[u + 1] || iv < V[v + 1]) {
int cu = iu < V[u + 1] ? E_v[iu] : INT_MAX;
int cv = iv < V[v + 1] ? E_v[iv] : INT_MAX;
// Cycles and cliques can only occur when current node is in N(v) \ {u}
if (cv <= cu && cv != u) {
for (int arr_i = 0; arr_i < arr_len; arr_i++) {
// Before checking for cliques or cycles, the edge index is advanced to the current
// location in the walk
while (inds[arr_i] < ends[arr_i] && E_v[inds[arr_i]] < cv) {
inds[arr_i]++;
}
// If u's neighbor neighbors v's neighbor, a clique or cycle may be found
if (inds[arr_i] < ends[arr_i] && E_v[inds[arr_i]] == cv) {
if (cu == cv && neighbors_v[arr_i]) {
// If u's neighbor and v's neighbor form triangles with e, a clique is found
cliq_e++;
}
else if (cu != cv && !neighbors_v[arr_i]) {
// If neither u's neighbor or v's neighbor form triangles with e, a cycle is found
cyc_e++;
}
}
}
}
if (cu <= cv) iu++;
if (cv <= cu) iv++;
}
delete(inds);
delete(ends);
delete(neighbors_v);
// 3-node graphlet and 4-node unrestricted counts calculated as described
// in http://nesreenahmed.com/publications/ahmed-et-al-icdm2015.pdf
outputs[edge].g31 = tri_e;
outputs[edge].g32 = star_u + star_v;
outputs[edge].g33 = V_num - (tri_e + star_u + star_v + 2);
outputs[edge].g41 = cliq_e / 2;
outputs[edge].g44 = cyc_e;
outputs[edge].T_T = (tri_e * (tri_e - 1)) / 2;
outputs[edge].Su_Sv = star_u * star_v;
outputs[edge].T_SuVSv = tri_e * (star_u + star_v);
outputs[edge].S_S = ((star_u * (star_u - 1)) / 2) + ((star_v * (star_v - 1)) / 2);
outputs[edge].T_I = tri_e * outputs[edge].g33;
outputs[edge].SuVSv_I = (star_u + star_v) * outputs[edge].g33;
outputs[edge].I_I = (outputs[edge].g33 * (outputs[edge].g33 - 1)) / 2;
outputs[edge].I_I_1 = E_num - (V[u + 1] - V[u] - 1) - (V[v + 1] - V[v] - 1) - 1;
}
int main(int argc, char *argv[])
{
if (argc != 3) {
std::cout << "usage: " << argv[0] << " <input_file> <block_size>" << std::endl;
return EXIT_FAILURE;
}
// Number of threads in each block
// Should be a multiple of GPU warp size (32 in recent architectures)
int blocksize = atoi(argv[2]);
// Nested vector used to store read file into adjacency list
std::vector< std::vector<int> > adj_list;
// Input network file provided by user
// File format assume to be edge list
std::ifstream infile(argv[1]);
std::string su, sv;
int u, v, edge_count = 0, max = -1;
while (getline(infile, su, '\t') && getline(infile, sv)) {
// Node ids assumed to be 1-indexed and decremented to be 0-indexed
u = std::atoi(su.c_str()) - 1;
v = std::atoi(sv.c_str()) - 1;
// Dynamically add empty vectors as nodes are found in edge list
int new_max = u > v ? u : v;
if (new_max > max) {
for (int i = max + 1; i <= new_max; i++) {
adj_list.push_back(std::vector<int>(0));
}
max = new_max;
}
// Add both directions of edge to adjacency list
adj_list[u].push_back(v);
adj_list[v].push_back(u);
edge_count++;
}
int V_num = adj_list.size();
int E_num = edge_count;
std::cout << "Graph found with " << V_num << " nodes and " << E_num << " edges" << std::endl;
// Value of V[i] is index of E_u and E_v where vertex i's edges begin
// Value of V[i+1] is index of E_u and E_v where vertex i's edges end
std::vector<int> V;
// Value of E[i] is the directed edge index in E_u and E_v associated with
// the undirected edge at index i in E
std::vector<int> E;
// Value of E_u[i] is the source vertex id (as used in V) associated with edge i
std::vector<int> E_u;
// Value of E_v[i] is the destination vertex id of edge i
std::vector<int> E_v;
V.reserve(V_num + 1);
E.reserve(E_num);
E_u.reserve(E_num * 2);
E_v.reserve(E_num * 2);
// Build V, E_u, and E_v from adjacency list representation
int edge_index = 0;
for (int i = 0; i < (int) adj_list.size(); i++) {
V.push_back(edge_index);
for (int j = 0; j < (int) adj_list[i].size(); j++) {
E_u.push_back(i);
E_v.push_back(adj_list[i][j]);
if (i < adj_list[i][j]) {
E.push_back(E_u.size() - 1);
}
}
edge_index += adj_list[i].size();
}
V.push_back(edge_index);
int max_degree = 0;
for (int i = 1; i < (int) E.size(); i++) {
int degree = E[i] - E[i - 1];
if (degree > max_degree) {
max_degree = degree;
}
}
// Create and initialize CUDA thread output structs
std::vector<EDGE_OUTPUT> outputs(E_num);
outputs.resize(E_num);
// Pointers of V, E_u, E_v, and edge outputs in GPU memory
int* V_ptr;
int* E_ptr;
int* E_u_ptr;
int* E_v_ptr;
EDGE_OUTPUT* outputs_ptr;
int V_size = (V_num + 1) * sizeof(int);
int E_size = E_num * sizeof(int);
int outputs_size = E_num * sizeof(EDGE_OUTPUT);
std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
// Malloc GPU memory and store location in pointers
gpuErrchk(cudaMalloc((void**)&V_ptr, V_size));
gpuErrchk(cudaMalloc((void**)&E_ptr, E_size));
gpuErrchk(cudaMalloc((void**)&E_u_ptr, E_size * 2));
gpuErrchk(cudaMalloc((void**)&E_v_ptr, E_size * 2));
gpuErrchk(cudaMalloc((void**)&outputs_ptr, outputs_size));
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point malloc_end = std::chrono::steady_clock::now();
// Copy data structures from main memory to allocated GPU memory
gpuErrchk(cudaMemcpy(V_ptr, V.data(), V_size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(E_ptr, E.data(), E_size, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(E_u_ptr, E_u.data(), E_size * 2, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(E_v_ptr, E_v.data(), E_size * 2, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(outputs_ptr, outputs.data(), outputs_size, cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point memcpy_input_end = std::chrono::steady_clock::now();
// Calculate number of blocks in CUDA grid based upon number of edges in graph
int gridsize = E_num / blocksize;
int gridlength = 1;
if (E_num % blocksize > 0) {
gridsize++;
if (gridsize > 65535) {
gridsize -= 65535;
gridlength++;
}
}
if (gridlength > 1) {
gridsize = 65535;
}
// Create one-dimensional blocks and grids based upon blocksize and gridsize
// TODO: Increase dimensionality in order to support larger networks
dim3 dimBlock(blocksize, 1);
dim3 dimGrid(gridsize, gridlength);
int heap_size = (2 * sizeof(int) + sizeof(bool)) * max_degree * E_num;
gpuErrchk(cudaDeviceSetLimit(cudaLimitMallocHeapSize, heap_size));
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point kernel_begin = std::chrono::steady_clock::now();
// Execute CUDA kernel
// TODO: Add timing to kernel execution and count aggregation below
graphlets<<<dimGrid, dimBlock>>>(V_ptr, V_num, E_ptr, E_num, E_u_ptr, E_v_ptr, outputs_ptr);
gpuErrchk(cudaGetLastError());
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point kernel_end = std::chrono::steady_clock::now();
// Copy output data from GPU memory back into main memory
gpuErrchk(cudaMemcpy(outputs.data(), outputs_ptr, outputs_size, cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point memcpy_output_end = std::chrono::steady_clock::now();
// Free memory in GPU
gpuErrchk(cudaFree(V_ptr));
gpuErrchk(cudaFree(E_ptr));
gpuErrchk(cudaFree(E_u_ptr));
gpuErrchk(cudaFree(E_v_ptr));
gpuErrchk(cudaFree(outputs_ptr));
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point free_end = std::chrono::steady_clock::now();
// Compute aggregate outputs based upon individual edge outputs
EDGE_OUTPUT aggregates = { 0 };
for (int i = 0; i < E_num; i++) {
aggregates.g31 += outputs[i].g31;
aggregates.g32 += outputs[i].g32;
aggregates.g33 += outputs[i].g33;
aggregates.g41 += outputs[i].g41;
aggregates.g44 += outputs[i].g44;
aggregates.T_T += outputs[i].T_T;
aggregates.Su_Sv += outputs[i].Su_Sv;
aggregates.T_SuVSv += outputs[i].T_SuVSv;
aggregates.S_S += outputs[i].S_S;
aggregates.T_I += outputs[i].T_I;
aggregates.SuVSv_I += outputs[i].SuVSv_I;
aggregates.I_I += outputs[i].I_I;
aggregates.I_I_1 += outputs[i].I_I_1;
}
// 3-nodeand 4-node graphlet counts calculated as described
// in http://nesreenahmed.com/publications/ahmed-et-al-icdm2015.pdf
GRAPHLET_COUNTS counts = { 0 };
counts.g31 = aggregates.g31 / 3;
counts.g32 = aggregates.g32 / 2;
counts.g33 = aggregates.g33;
counts.g34 = ((V_num * (V_num - 1) * (V_num - 2)) / (3 * 2)) - counts.g31 - counts.g32 - counts.g33;
counts.g41 = aggregates.g41 / 6;
counts.g42 = aggregates.T_T - 6 * counts.g41;
counts.g43 = (aggregates.T_SuVSv - 4 * counts.g42) / 2;
counts.g44 = aggregates.g44 / 4;
counts.g45 = (aggregates.S_S - counts.g43) / 3;
counts.g46 = aggregates.Su_Sv - 4 * counts.g44;
counts.g47 = (aggregates.T_I - counts.g43) / 3;
counts.g48 = (aggregates.SuVSv_I - 2 * counts.g46) / 2;
counts.g49 = (aggregates.I_I_1 - (6 * counts.g41) - (4 * counts.g42) - (2 * counts.g43) - (4 * counts.g44) - (2 * counts.g46)) / 2;
counts.g410 = aggregates.I_I - 2 * counts.g49;
counts.g411 = ((V_num * (V_num - 1) * (V_num - 2) * (V_num - 3)) / (4 * 3 * 2)) - counts.g41 - counts.g42 - counts.g43 - counts.g44 - counts.g45 - counts.g46 - counts.g47 - counts.g48 - counts.g49 - counts.g410;
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
std::cout << std::endl;
std::cout << " Graphlet Counts" << std::endl;
std::cout << "===========================" << std::endl;
std::cout << std::endl;
std::cout << "k=4 Connected Graphlets" << std::endl;
std::cout << "---------------------------" << std::endl;
std::cout << "4-clique (g41) : " << counts.g41 << std::endl;
std::cout << "4-chordalcycle (g42) : " << counts.g42 << std::endl;
std::cout << "4-tailedtriangle (g43) : " << counts.g43 << std::endl;
std::cout << "4-cycle (g44) : " << counts.g44 << std::endl;
std::cout << "3-star (g45) : " << counts.g45 << std::endl;
std::cout << "4-path (g46) : " << counts.g46 << std::endl;
std::cout << std::endl;
std::cout << "k=4 Disconnected Graphlets" << std::endl;
std::cout << "---------------------------" << std::endl;
std::cout << "4-node-1-triangle (g47) : " << counts.g47 << std::endl;
std::cout << "4-node-2-star (g48) : " << counts.g48 << std::endl;
std::cout << "4-node-2-edge (g49) : " << counts.g49 << std::endl;
std::cout << "4-node-1-edge (g410) : " << counts.g410 << std::endl;
std::cout << "4-node-independent (g411) : " << counts.g411 << std::endl;
std::cout << std::endl;
std::cout << "k=3 Graphlets" << std::endl;
std::cout << "---------------------------" << std::endl;
std::cout << "triangle (g31) : " << counts.g31 << std::endl;
std::cout << "2-star (g32) : " << counts.g32 << std::endl;
std::cout << "3-node-1-edge (g33) : " << counts.g33 << std::endl;
std::cout << "3-node-independent (g34) : " << counts.g34 << std::endl;
std::cout << std::endl;
std::cout << " Timing (us)" << std::endl;
std::cout << "====================" << std::endl;
std::cout << "total elapsed : " << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << std::endl;
std::cout << "cudaMalloc : " << std::chrono::duration_cast<std::chrono::microseconds>(malloc_end - begin).count() << std::endl;
std::cout << "cudaMemcpy (input) : " << std::chrono::duration_cast<std::chrono::microseconds>(memcpy_input_end - malloc_end).count() << std::endl;
std::cout << "kernel (graphlets) : " << std::chrono::duration_cast<std::chrono::microseconds>(kernel_end - kernel_begin).count() << std::endl;
std::cout << "cudaMemcpy (output): " << std::chrono::duration_cast<std::chrono::microseconds>(memcpy_output_end - kernel_end).count() << std::endl;
std::cout << "cudaFree : " << std::chrono::duration_cast<std::chrono::microseconds>(free_end - memcpy_output_end).count() << std::endl;
std::cout << "aggregate : " << std::chrono::duration_cast<std::chrono::microseconds>(end - memcpy_output_end).count() << std::endl;
return EXIT_SUCCESS;
}
|
#include "hip/hip_runtime.h"
#include <climits>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include <chrono>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Struct containing edge outputs computed in parallel
// One produced per thread, aggregated after GPU computation
struct EDGE_OUTPUT {
// 3-node graphlet counts
unsigned long long g31;
unsigned long long g32;
unsigned long long g33;
// 4-node cliques and cycles
unsigned long long g41;
unsigned long long g44;
// Unrestricted counts for 4-node connected graphlets
unsigned long long T_T;
unsigned long long Su_Sv;
unsigned long long T_SuVSv;
unsigned long long S_S;
// Unrestricted counts for 4-node disconnected graphlets
unsigned long long T_I;
unsigned long long SuVSv_I;
unsigned long long I_I;
unsigned long long I_I_1;
};
struct GRAPHLET_COUNTS {
// 3-node graphlet counts
unsigned long long g31;
unsigned long long g32;
unsigned long long g33;
unsigned long long g34;
// 4-node connected graphlet counts
unsigned long long g41;
unsigned long long g42;
unsigned long long g43;
unsigned long long g44;
unsigned long long g45;
unsigned long long g46;
// 4-node disconnected graphlet counts
unsigned long long g47;
unsigned long long g48;
unsigned long long g49;
unsigned long long g410;
unsigned long long g411;
};
// CUDA kernel used to count graphlets of size k=4
// Each thread processes a single edge
__global__
void graphlets(int* V, unsigned long long V_num, int* E, unsigned long long E_num, int* E_u, int* E_v, EDGE_OUTPUT* outputs)
{
// Calculate global thread index in 1D grid of 1D blocks
// Used as the undirected edge number to compute
int edge = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
// Return immediately if thread index is greater than maximum edge index
if (edge >= E_num) return;
// Using thread number, look up directed edge number in array of undirected edges
int ei = E[edge];
// Lookup the endpoints of the current edge
// TODO: Dynamically choose u to be the node with smallest neighborhood
int u = E_u[ei];
int v = E_v[ei];
// Array length is 1 less because v is omitted
int arr_len = V[u + 1] - V[u] - 1;
// Array holding current index of edges in E_v of each of u's neighbors
int* inds = new int[arr_len];
// Array holding maximum index of edges in E_v of each of u's neighbors
int* ends = new int[arr_len];
// Array indicating whether each of u's neighbors neighbors v
bool* neighbors_v = new bool[arr_len];
// To count graphlets, nodes are advanced in ascending order concurrently across u, v,
// and all of u's neighbors. Through this walk, counts can be gathered by checking
// which nodes have identical neighbors after each step
int tri_e = 0, star_u = 0, star_v = 0;
int iu = V[u], iv = V[v], arr_i = 0;
while (iu < V[u + 1] || iv < V[v + 1]) {
int cu = iu < V[u + 1] ? E_v[iu] : INT_MAX;
int cv = iv < V[v + 1] ? E_v[iv] : INT_MAX;
if (cu < cv) {
if (cu != v) {
// A star with u is found when the current node in the walk is a neighbor of u
// but not a neighbor of v
star_u++;
inds[arr_i] = V[cu];
ends[arr_i] = V[cu + 1];
neighbors_v[arr_i] = false;
arr_i++;
}
iu++;
}
else if (cv < cu) {
if (cv != u) {
// A star with v is found when the current node in the walk is a neighbor of v
// but not a neighbor of u
star_v++;
}
iv++;
}
else {
// A triangle is found when the current node in the walk is both a neighbor of
// u and a neighbor of v
tri_e++;
inds[arr_i] = V[cu];
ends[arr_i] = V[cu + 1];
neighbors_v[arr_i] = true;
arr_i++;
iu++;
iv++;
}
}
int cliq_e = 0, cyc_e = 0;
iu = V[u];
iv = V[v];
while (iu < V[u + 1] || iv < V[v + 1]) {
int cu = iu < V[u + 1] ? E_v[iu] : INT_MAX;
int cv = iv < V[v + 1] ? E_v[iv] : INT_MAX;
// Cycles and cliques can only occur when current node is in N(v) \ {u}
if (cv <= cu && cv != u) {
for (int arr_i = 0; arr_i < arr_len; arr_i++) {
// Before checking for cliques or cycles, the edge index is advanced to the current
// location in the walk
while (inds[arr_i] < ends[arr_i] && E_v[inds[arr_i]] < cv) {
inds[arr_i]++;
}
// If u's neighbor neighbors v's neighbor, a clique or cycle may be found
if (inds[arr_i] < ends[arr_i] && E_v[inds[arr_i]] == cv) {
if (cu == cv && neighbors_v[arr_i]) {
// If u's neighbor and v's neighbor form triangles with e, a clique is found
cliq_e++;
}
else if (cu != cv && !neighbors_v[arr_i]) {
// If neither u's neighbor or v's neighbor form triangles with e, a cycle is found
cyc_e++;
}
}
}
}
if (cu <= cv) iu++;
if (cv <= cu) iv++;
}
delete(inds);
delete(ends);
delete(neighbors_v);
// 3-node graphlet and 4-node unrestricted counts calculated as described
// in http://nesreenahmed.com/publications/ahmed-et-al-icdm2015.pdf
outputs[edge].g31 = tri_e;
outputs[edge].g32 = star_u + star_v;
outputs[edge].g33 = V_num - (tri_e + star_u + star_v + 2);
outputs[edge].g41 = cliq_e / 2;
outputs[edge].g44 = cyc_e;
outputs[edge].T_T = (tri_e * (tri_e - 1)) / 2;
outputs[edge].Su_Sv = star_u * star_v;
outputs[edge].T_SuVSv = tri_e * (star_u + star_v);
outputs[edge].S_S = ((star_u * (star_u - 1)) / 2) + ((star_v * (star_v - 1)) / 2);
outputs[edge].T_I = tri_e * outputs[edge].g33;
outputs[edge].SuVSv_I = (star_u + star_v) * outputs[edge].g33;
outputs[edge].I_I = (outputs[edge].g33 * (outputs[edge].g33 - 1)) / 2;
outputs[edge].I_I_1 = E_num - (V[u + 1] - V[u] - 1) - (V[v + 1] - V[v] - 1) - 1;
}
int main(int argc, char *argv[])
{
if (argc != 3) {
std::cout << "usage: " << argv[0] << " <input_file> <block_size>" << std::endl;
return EXIT_FAILURE;
}
// Number of threads in each block
// Should be a multiple of GPU warp size (32 in recent architectures)
int blocksize = atoi(argv[2]);
// Nested vector used to store read file into adjacency list
std::vector< std::vector<int> > adj_list;
// Input network file provided by user
// File format assume to be edge list
std::ifstream infile(argv[1]);
std::string su, sv;
int u, v, edge_count = 0, max = -1;
while (getline(infile, su, '\t') && getline(infile, sv)) {
// Node ids assumed to be 1-indexed and decremented to be 0-indexed
u = std::atoi(su.c_str()) - 1;
v = std::atoi(sv.c_str()) - 1;
// Dynamically add empty vectors as nodes are found in edge list
int new_max = u > v ? u : v;
if (new_max > max) {
for (int i = max + 1; i <= new_max; i++) {
adj_list.push_back(std::vector<int>(0));
}
max = new_max;
}
// Add both directions of edge to adjacency list
adj_list[u].push_back(v);
adj_list[v].push_back(u);
edge_count++;
}
int V_num = adj_list.size();
int E_num = edge_count;
std::cout << "Graph found with " << V_num << " nodes and " << E_num << " edges" << std::endl;
// Value of V[i] is index of E_u and E_v where vertex i's edges begin
// Value of V[i+1] is index of E_u and E_v where vertex i's edges end
std::vector<int> V;
// Value of E[i] is the directed edge index in E_u and E_v associated with
// the undirected edge at index i in E
std::vector<int> E;
// Value of E_u[i] is the source vertex id (as used in V) associated with edge i
std::vector<int> E_u;
// Value of E_v[i] is the destination vertex id of edge i
std::vector<int> E_v;
V.reserve(V_num + 1);
E.reserve(E_num);
E_u.reserve(E_num * 2);
E_v.reserve(E_num * 2);
// Build V, E_u, and E_v from adjacency list representation
int edge_index = 0;
for (int i = 0; i < (int) adj_list.size(); i++) {
V.push_back(edge_index);
for (int j = 0; j < (int) adj_list[i].size(); j++) {
E_u.push_back(i);
E_v.push_back(adj_list[i][j]);
if (i < adj_list[i][j]) {
E.push_back(E_u.size() - 1);
}
}
edge_index += adj_list[i].size();
}
V.push_back(edge_index);
int max_degree = 0;
for (int i = 1; i < (int) E.size(); i++) {
int degree = E[i] - E[i - 1];
if (degree > max_degree) {
max_degree = degree;
}
}
// Create and initialize CUDA thread output structs
std::vector<EDGE_OUTPUT> outputs(E_num);
outputs.resize(E_num);
// Pointers of V, E_u, E_v, and edge outputs in GPU memory
int* V_ptr;
int* E_ptr;
int* E_u_ptr;
int* E_v_ptr;
EDGE_OUTPUT* outputs_ptr;
int V_size = (V_num + 1) * sizeof(int);
int E_size = E_num * sizeof(int);
int outputs_size = E_num * sizeof(EDGE_OUTPUT);
std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();
// Malloc GPU memory and store location in pointers
gpuErrchk(hipMalloc((void**)&V_ptr, V_size));
gpuErrchk(hipMalloc((void**)&E_ptr, E_size));
gpuErrchk(hipMalloc((void**)&E_u_ptr, E_size * 2));
gpuErrchk(hipMalloc((void**)&E_v_ptr, E_size * 2));
gpuErrchk(hipMalloc((void**)&outputs_ptr, outputs_size));
hipDeviceSynchronize();
std::chrono::steady_clock::time_point malloc_end = std::chrono::steady_clock::now();
// Copy data structures from main memory to allocated GPU memory
gpuErrchk(hipMemcpy(V_ptr, V.data(), V_size, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(E_ptr, E.data(), E_size, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(E_u_ptr, E_u.data(), E_size * 2, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(E_v_ptr, E_v.data(), E_size * 2, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(outputs_ptr, outputs.data(), outputs_size, hipMemcpyHostToDevice));
hipDeviceSynchronize();
std::chrono::steady_clock::time_point memcpy_input_end = std::chrono::steady_clock::now();
// Calculate number of blocks in CUDA grid based upon number of edges in graph
int gridsize = E_num / blocksize;
int gridlength = 1;
if (E_num % blocksize > 0) {
gridsize++;
if (gridsize > 65535) {
gridsize -= 65535;
gridlength++;
}
}
if (gridlength > 1) {
gridsize = 65535;
}
// Create one-dimensional blocks and grids based upon blocksize and gridsize
// TODO: Increase dimensionality in order to support larger networks
dim3 dimBlock(blocksize, 1);
dim3 dimGrid(gridsize, gridlength);
int heap_size = (2 * sizeof(int) + sizeof(bool)) * max_degree * E_num;
gpuErrchk(hipDeviceSetLimit(hipLimitMallocHeapSize, heap_size));
hipDeviceSynchronize();
std::chrono::steady_clock::time_point kernel_begin = std::chrono::steady_clock::now();
// Execute CUDA kernel
// TODO: Add timing to kernel execution and count aggregation below
graphlets<<<dimGrid, dimBlock>>>(V_ptr, V_num, E_ptr, E_num, E_u_ptr, E_v_ptr, outputs_ptr);
gpuErrchk(hipGetLastError());
hipDeviceSynchronize();
std::chrono::steady_clock::time_point kernel_end = std::chrono::steady_clock::now();
// Copy output data from GPU memory back into main memory
gpuErrchk(hipMemcpy(outputs.data(), outputs_ptr, outputs_size, hipMemcpyDeviceToHost));
hipDeviceSynchronize();
std::chrono::steady_clock::time_point memcpy_output_end = std::chrono::steady_clock::now();
// Free memory in GPU
gpuErrchk(hipFree(V_ptr));
gpuErrchk(hipFree(E_ptr));
gpuErrchk(hipFree(E_u_ptr));
gpuErrchk(hipFree(E_v_ptr));
gpuErrchk(hipFree(outputs_ptr));
hipDeviceSynchronize();
std::chrono::steady_clock::time_point free_end = std::chrono::steady_clock::now();
// Compute aggregate outputs based upon individual edge outputs
EDGE_OUTPUT aggregates = { 0 };
for (int i = 0; i < E_num; i++) {
aggregates.g31 += outputs[i].g31;
aggregates.g32 += outputs[i].g32;
aggregates.g33 += outputs[i].g33;
aggregates.g41 += outputs[i].g41;
aggregates.g44 += outputs[i].g44;
aggregates.T_T += outputs[i].T_T;
aggregates.Su_Sv += outputs[i].Su_Sv;
aggregates.T_SuVSv += outputs[i].T_SuVSv;
aggregates.S_S += outputs[i].S_S;
aggregates.T_I += outputs[i].T_I;
aggregates.SuVSv_I += outputs[i].SuVSv_I;
aggregates.I_I += outputs[i].I_I;
aggregates.I_I_1 += outputs[i].I_I_1;
}
// 3-nodeand 4-node graphlet counts calculated as described
// in http://nesreenahmed.com/publications/ahmed-et-al-icdm2015.pdf
GRAPHLET_COUNTS counts = { 0 };
counts.g31 = aggregates.g31 / 3;
counts.g32 = aggregates.g32 / 2;
counts.g33 = aggregates.g33;
counts.g34 = ((V_num * (V_num - 1) * (V_num - 2)) / (3 * 2)) - counts.g31 - counts.g32 - counts.g33;
counts.g41 = aggregates.g41 / 6;
counts.g42 = aggregates.T_T - 6 * counts.g41;
counts.g43 = (aggregates.T_SuVSv - 4 * counts.g42) / 2;
counts.g44 = aggregates.g44 / 4;
counts.g45 = (aggregates.S_S - counts.g43) / 3;
counts.g46 = aggregates.Su_Sv - 4 * counts.g44;
counts.g47 = (aggregates.T_I - counts.g43) / 3;
counts.g48 = (aggregates.SuVSv_I - 2 * counts.g46) / 2;
counts.g49 = (aggregates.I_I_1 - (6 * counts.g41) - (4 * counts.g42) - (2 * counts.g43) - (4 * counts.g44) - (2 * counts.g46)) / 2;
counts.g410 = aggregates.I_I - 2 * counts.g49;
counts.g411 = ((V_num * (V_num - 1) * (V_num - 2) * (V_num - 3)) / (4 * 3 * 2)) - counts.g41 - counts.g42 - counts.g43 - counts.g44 - counts.g45 - counts.g46 - counts.g47 - counts.g48 - counts.g49 - counts.g410;
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
std::cout << std::endl;
std::cout << " Graphlet Counts" << std::endl;
std::cout << "===========================" << std::endl;
std::cout << std::endl;
std::cout << "k=4 Connected Graphlets" << std::endl;
std::cout << "---------------------------" << std::endl;
std::cout << "4-clique (g41) : " << counts.g41 << std::endl;
std::cout << "4-chordalcycle (g42) : " << counts.g42 << std::endl;
std::cout << "4-tailedtriangle (g43) : " << counts.g43 << std::endl;
std::cout << "4-cycle (g44) : " << counts.g44 << std::endl;
std::cout << "3-star (g45) : " << counts.g45 << std::endl;
std::cout << "4-path (g46) : " << counts.g46 << std::endl;
std::cout << std::endl;
std::cout << "k=4 Disconnected Graphlets" << std::endl;
std::cout << "---------------------------" << std::endl;
std::cout << "4-node-1-triangle (g47) : " << counts.g47 << std::endl;
std::cout << "4-node-2-star (g48) : " << counts.g48 << std::endl;
std::cout << "4-node-2-edge (g49) : " << counts.g49 << std::endl;
std::cout << "4-node-1-edge (g410) : " << counts.g410 << std::endl;
std::cout << "4-node-independent (g411) : " << counts.g411 << std::endl;
std::cout << std::endl;
std::cout << "k=3 Graphlets" << std::endl;
std::cout << "---------------------------" << std::endl;
std::cout << "triangle (g31) : " << counts.g31 << std::endl;
std::cout << "2-star (g32) : " << counts.g32 << std::endl;
std::cout << "3-node-1-edge (g33) : " << counts.g33 << std::endl;
std::cout << "3-node-independent (g34) : " << counts.g34 << std::endl;
std::cout << std::endl;
std::cout << " Timing (us)" << std::endl;
std::cout << "====================" << std::endl;
std::cout << "total elapsed : " << std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count() << std::endl;
std::cout << "hipMalloc : " << std::chrono::duration_cast<std::chrono::microseconds>(malloc_end - begin).count() << std::endl;
std::cout << "hipMemcpy (input) : " << std::chrono::duration_cast<std::chrono::microseconds>(memcpy_input_end - malloc_end).count() << std::endl;
std::cout << "kernel (graphlets) : " << std::chrono::duration_cast<std::chrono::microseconds>(kernel_end - kernel_begin).count() << std::endl;
std::cout << "hipMemcpy (output): " << std::chrono::duration_cast<std::chrono::microseconds>(memcpy_output_end - kernel_end).count() << std::endl;
std::cout << "hipFree : " << std::chrono::duration_cast<std::chrono::microseconds>(free_end - memcpy_output_end).count() << std::endl;
std::cout << "aggregate : " << std::chrono::duration_cast<std::chrono::microseconds>(end - memcpy_output_end).count() << std::endl;
return EXIT_SUCCESS;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
__global__ void add(int a, int b, int *c){
*c = a + b;
}
int main(){
int c;
int *dev_c;
cudaMalloc( (void**) &dev_c, sizeof(int) );
add<<<1, 1>>> (2, 7, dev_c);
cudaMemcpy(&c,
dev_c, sizeof(int),
cudaMemcpyDeviceToHost);
printf("2 + 7 = %d \n", c);
cudaFree(dev_c);
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void add(int a, int b, int *c){
*c = a + b;
}
int main(){
int c;
int *dev_c;
hipMalloc( (void**) &dev_c, sizeof(int) );
add<<<1, 1>>> (2, 7, dev_c);
hipMemcpy(&c,
dev_c, sizeof(int),
hipMemcpyDeviceToHost);
printf("2 + 7 = %d \n", c);
hipFree(dev_c);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
// __global__ indicates a function or "kernel" that runs on the device and is called from host code
__global__ void hello_kernel(void)
{
// greet from the device : the GPU and its memory
printf("Hello, world from the device!\n");
}
int main(void)
{
// greet from the host : the CPU and its memory
printf("Hello, world from the host!\n");
// triple angle brackets mark a call from host code to device code
// launch a kernel with a single thread to greet from the device
hello_kernel<<<1,1>>>();
// wait for the device to finish so that we see the message
cudaDeviceSynchronize();
// check error
printf("CUDA error: %s\n", cudaGetErrorString(cudaGetLastError()));
return 0;
}
// nvcc separates source code into host and device components*
// nvcc hello.cu -o hello
|
#include <hip/hip_runtime.h>
#include <stdio.h>
// __global__ indicates a function or "kernel" that runs on the device and is called from host code
__global__ void hello_kernel(void)
{
// greet from the device : the GPU and its memory
printf("Hello, world from the device!\n");
}
int main(void)
{
// greet from the host : the CPU and its memory
printf("Hello, world from the host!\n");
// triple angle brackets mark a call from host code to device code
// launch a kernel with a single thread to greet from the device
hello_kernel<<<1,1>>>();
// wait for the device to finish so that we see the message
hipDeviceSynchronize();
// check error
printf("CUDA error: %s\n", hipGetErrorString(hipGetLastError()));
return 0;
}
// nvcc separates source code into host and device components*
// nvcc hello.cu -o hello
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 128 // 2^7
#define BLOCKS 1024 // 2^10
#define NUM_VALS THREADS*BLOCKS
#define MAX_VALUE 8196
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
__global__ void count(int *A, int *B, int n) {
int b_id = blockIdx.x,
b_num = gridDim.x,
b_size,
b_offset,
t_id = threadIdx.x,
t_num = blockDim.x,
t_size,
t_offset,
offset;
// initialize a shared memory array to store the count for each block.
__shared__ int count[MAX_VALUE];
// set intial values to zeros. Each thread sets its own share to zero.
t_size = (t_num > MAX_VALUE ? 1 : MAX_VALUE / t_num);
offset = t_id * t_size;
for (int i = offset; i < offset + t_size && i < MAX_VALUE; ++i)
count[i] = 0;
// wait until all threads have completed the initialization process.
__syncthreads();
// accumulate the counts of each value. Each thread counts a certain portain
// of the unsorted array.
b_size = (b_num > n ? 1 : n / b_num);
b_offset = b_id * b_size;
t_size = (t_num > b_size ? 1 : b_size / t_num);
offset = b_offset + t_id * t_size;
for (int i = offset; i < offset + t_size && i < b_offset + b_size && i < n; ++i)
atomicAdd(&count[A[i]], 1);
// wait until all threads have completed the couting phase.
__syncthreads();
// copy the block count into global memory. Each thread copies its portioin to
// the global memory.
t_size = (t_num > MAX_VALUE ? 1 : MAX_VALUE / t_num);
t_offset = t_id * t_size;
offset = b_id * MAX_VALUE + t_offset;
if (offset + t_size <= (b_id + 1) * MAX_VALUE)
memcpy(&B[offset], &count[t_offset], sizeof(int) * t_size);
}
__global__ void merge(int *B) {
int b_id = blockIdx.x,
b_num = gridDim.x,
b_size,
b_offset,
t_id = threadIdx.x,
t_num = blockDim.x,
t_size,
offset;
// loop through and merge until all arrays are merged.
for (int i = b_num, j = 2; i != 1; i /= 2, j *= 2) {
// each block will operate on b_size values which equal, the number of
// count arrays * size of count arrays / number of blocks / 2. The final 2
// represents the merge process.
b_size = i * MAX_VALUE / b_num / 2;
b_offset = (b_id / j) * (j * MAX_VALUE) + b_size * (b_id % j);
t_size = (t_num > b_size ? 1 : b_size / t_num);
// calculate the offset that each thread will start at and sum counts.
offset = b_offset + t_id * t_size;
for (int k = offset, l = offset + (MAX_VALUE * (j / 2));
k < offset + t_size && k < b_offset + b_size; ++k, ++l)
B[k] += B[l];
// wait untill all arrays are merged for every step.
__syncthreads();
}
}
void bitonic_sort(int *values)
{
int *dev_values, *dev_ans;
size_t size = NUM_VALS * sizeof(int);
size_t size1 = MAX_VALUE * BLOCKS * sizeof(int);
cudaMalloc((void**) &dev_values, size);
cudaMalloc((void**) &dev_ans, size1);
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS,1);
dim3 threads(THREADS,1);
count<<<blocks, threads>>>(dev_values, dev_ans, NUM_VALS);
merge<<<blocks, threads>>>(dev_ans);
int ans[NUM_VALS];
cudaMemcpy(ans, dev_ans, size, cudaMemcpyDeviceToHost);
cudaFree(dev_values);
// Construct sorted array
for (int i = 0, j = 0; i < MAX_VALUE; ++i) {
for (int k = 0; k < ans[i]; ++k, ++j) {
values[j] = i;
}
}
}
int main(int argc, char const *argv[])
{
clock_t start, stop;
int *values = (int*)malloc(NUM_VALS * sizeof(int));
FILE *f = fopen("reverse_dataset.txt", "r");
for(int i=0;i< NUM_VALS; i++) {
fscanf(f, "%d\n", &values[i]);
}
cudaEvent_t estart, estop;
start = clock();
cudaEventCreate( &estart );
cudaEventRecord( estart, 0 );
bitonic_sort(values);
cudaEventCreate( &estop );
cudaEventRecord( estop, 0 ) ;
cudaEventSynchronize( estop );
stop = clock();
float elapsedTime;
cudaEventElapsedTime( &elapsedTime,
estart, estop ) ;
printf("Elapsed time: %f\n", elapsedTime);
//print_elapsed(start, stop);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 128 // 2^7
#define BLOCKS 1024 // 2^10
#define NUM_VALS THREADS*BLOCKS
#define MAX_VALUE 8196
void print_elapsed(clock_t start, clock_t stop)
{
double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
__global__ void count(int *A, int *B, int n) {
int b_id = blockIdx.x,
b_num = gridDim.x,
b_size,
b_offset,
t_id = threadIdx.x,
t_num = blockDim.x,
t_size,
t_offset,
offset;
// initialize a shared memory array to store the count for each block.
__shared__ int count[MAX_VALUE];
// set intial values to zeros. Each thread sets its own share to zero.
t_size = (t_num > MAX_VALUE ? 1 : MAX_VALUE / t_num);
offset = t_id * t_size;
for (int i = offset; i < offset + t_size && i < MAX_VALUE; ++i)
count[i] = 0;
// wait until all threads have completed the initialization process.
__syncthreads();
// accumulate the counts of each value. Each thread counts a certain portain
// of the unsorted array.
b_size = (b_num > n ? 1 : n / b_num);
b_offset = b_id * b_size;
t_size = (t_num > b_size ? 1 : b_size / t_num);
offset = b_offset + t_id * t_size;
for (int i = offset; i < offset + t_size && i < b_offset + b_size && i < n; ++i)
atomicAdd(&count[A[i]], 1);
// wait until all threads have completed the couting phase.
__syncthreads();
// copy the block count into global memory. Each thread copies its portioin to
// the global memory.
t_size = (t_num > MAX_VALUE ? 1 : MAX_VALUE / t_num);
t_offset = t_id * t_size;
offset = b_id * MAX_VALUE + t_offset;
if (offset + t_size <= (b_id + 1) * MAX_VALUE)
memcpy(&B[offset], &count[t_offset], sizeof(int) * t_size);
}
__global__ void merge(int *B) {
int b_id = blockIdx.x,
b_num = gridDim.x,
b_size,
b_offset,
t_id = threadIdx.x,
t_num = blockDim.x,
t_size,
offset;
// loop through and merge until all arrays are merged.
for (int i = b_num, j = 2; i != 1; i /= 2, j *= 2) {
// each block will operate on b_size values which equal, the number of
// count arrays * size of count arrays / number of blocks / 2. The final 2
// represents the merge process.
b_size = i * MAX_VALUE / b_num / 2;
b_offset = (b_id / j) * (j * MAX_VALUE) + b_size * (b_id % j);
t_size = (t_num > b_size ? 1 : b_size / t_num);
// calculate the offset that each thread will start at and sum counts.
offset = b_offset + t_id * t_size;
for (int k = offset, l = offset + (MAX_VALUE * (j / 2));
k < offset + t_size && k < b_offset + b_size; ++k, ++l)
B[k] += B[l];
// wait untill all arrays are merged for every step.
__syncthreads();
}
}
void bitonic_sort(int *values)
{
int *dev_values, *dev_ans;
size_t size = NUM_VALS * sizeof(int);
size_t size1 = MAX_VALUE * BLOCKS * sizeof(int);
hipMalloc((void**) &dev_values, size);
hipMalloc((void**) &dev_ans, size1);
hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice);
dim3 blocks(BLOCKS,1);
dim3 threads(THREADS,1);
count<<<blocks, threads>>>(dev_values, dev_ans, NUM_VALS);
merge<<<blocks, threads>>>(dev_ans);
int ans[NUM_VALS];
hipMemcpy(ans, dev_ans, size, hipMemcpyDeviceToHost);
hipFree(dev_values);
// Construct sorted array
for (int i = 0, j = 0; i < MAX_VALUE; ++i) {
for (int k = 0; k < ans[i]; ++k, ++j) {
values[j] = i;
}
}
}
int main(int argc, char const *argv[])
{
clock_t start, stop;
int *values = (int*)malloc(NUM_VALS * sizeof(int));
FILE *f = fopen("reverse_dataset.txt", "r");
for(int i=0;i< NUM_VALS; i++) {
fscanf(f, "%d\n", &values[i]);
}
hipEvent_t estart, estop;
start = clock();
hipEventCreate( &estart );
hipEventRecord( estart, 0 );
bitonic_sort(values);
hipEventCreate( &estop );
hipEventRecord( estop, 0 ) ;
hipEventSynchronize( estop );
stop = clock();
float elapsedTime;
hipEventElapsedTime( &elapsedTime,
estart, estop ) ;
printf("Elapsed time: %f\n", elapsedTime);
//print_elapsed(start, stop);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
using std::endl;
__global__ void sum_kernel(double *A, double *B, double *C, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) { return; }
double a = A[idx];
double b = B[idx];
C[idx] = a + b;
}
int main(int argc, char **argv) {
// Size of vectors
int n = 100000;
// Host vectors
double *h_a, *h_b, *h_c;
// Size, in bytes, of each vector
size_t bytes = n * sizeof(double);
// Allocate memory for each vector on host
h_a = (double *) malloc(bytes);
h_b = (double *) malloc(bytes);
h_c = (double *) malloc(bytes);
int i;
// Initialize vectors on host
for (i = 0; i < n; i++) {
h_a[i] = sin(i) * sin(i);
h_b[i] = cos(i) * cos(i);
}
// Device input vectors
double *d_a, *d_b, *d_c;
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Copy host vectors to device
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize = 1024;
int gridSize = (n - 1) / blockSize + 1;
// Execute the kernel
sum_kernel<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
// Copy array back to host
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
double maxError = 0;
for (int i = 0; i < n; ++i) {
double error = abs(h_c[i] - 1.0);
maxError = error > maxError ? error : maxError;
}
std::cout << "Max error = " << maxError << endl;
}
|
#include <hip/hip_runtime.h>
#include <iostream>
using std::endl;
__global__ void sum_kernel(double *A, double *B, double *C, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) { return; }
double a = A[idx];
double b = B[idx];
C[idx] = a + b;
}
int main(int argc, char **argv) {
// Size of vectors
int n = 100000;
// Host vectors
double *h_a, *h_b, *h_c;
// Size, in bytes, of each vector
size_t bytes = n * sizeof(double);
// Allocate memory for each vector on host
h_a = (double *) malloc(bytes);
h_b = (double *) malloc(bytes);
h_c = (double *) malloc(bytes);
int i;
// Initialize vectors on host
for (i = 0; i < n; i++) {
h_a[i] = sin(i) * sin(i);
h_b[i] = cos(i) * cos(i);
}
// Device input vectors
double *d_a, *d_b, *d_c;
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
// Copy host vectors to device
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize = 1024;
int gridSize = (n - 1) / blockSize + 1;
// Execute the kernel
sum_kernel<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
// Copy array back to host
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
double maxError = 0;
for (int i = 0; i < n; ++i) {
double error = abs(h_c[i] - 1.0);
maxError = error > maxError ? error : maxError;
}
std::cout << "Max error = " << maxError << endl;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/**
* 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#define GPU_DEVICE 0
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size. */
//# define NI 512
//# define NJ 512
//# define NK 512
//# define NL 512
//# define NM 512
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
int NI;
int NJ;
int NK;
int NL;
int NM;
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NK + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NJ + j] = ((DATA_TYPE) i*(j+1)) / NJ;
}
}
for (i = 0; i < NJ; i++)
{
for (j = 0; j < NM; j++)
{
C[i*NM + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
for (i = 0; i < NM; i++)
{
for (j = 0; j < NL; j++)
{
D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
}
}
}
__global__ void mm3_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NJ))
{
int k;
for(k=0; k < NK; k++)
{
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm3_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NJ) && (j < NL))
{
int k;
for(k=0; k < NM; k++)
{
F[i * NL + j] += C[i * NM + k] * D[k * NL +j];
}
}
}
__global__ void mm3_kernel3(DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G, int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NL))
{
int k;
for(k=0; k < NJ; k++)
{
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
void mm3Cuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu, DATA_TYPE* D_gpu, DATA_TYPE* E_gpu, DATA_TYPE* F_gpu,
DATA_TYPE* G_gpu, int NI, int NJ, int NK, int NL, int NM)
{
cudaEvent_t start, end;
float time;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
//for(int i = 0; i < 5; i++){
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
mm3_kernel1<<<grid1,block>>>(A_gpu, B_gpu, E_gpu,NI, NJ, NK, NL, NM);
cudaDeviceSynchronize();
mm3_kernel2<<<grid2,block>>>(C_gpu, D_gpu, F_gpu,NI, NJ, NK, NL, NM);
cudaDeviceSynchronize();
mm3_kernel3<<<grid3,block>>>(E_gpu, F_gpu, G_gpu,NI, NJ, NK, NL, NM);
cudaDeviceSynchronize();
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
fprintf(stdout, "%0.6lf\n", time);
//}
}
int main(int argc, char** argv)
{
if(argc < 2){
printf("please no troll\n");
return 1;
}
NI = atoi(argv[1]);
NJ = atoi(argv[1]);
NK = atoi(argv[1]);
NL = atoi(argv[1]);
NM = atoi(argv[1]);
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* D;
DATA_TYPE* E;
DATA_TYPE* F;
DATA_TYPE* G;
cudaMallocManaged(&A, NI*NK*sizeof(DATA_TYPE));
cudaMallocManaged(&B, NK*NJ*sizeof(DATA_TYPE));
cudaMallocManaged(&C, NJ*NM*sizeof(DATA_TYPE));
cudaMallocManaged(&D, NM*NL*sizeof(DATA_TYPE));
cudaMallocManaged(&E, NI*NJ*sizeof(DATA_TYPE));
cudaMallocManaged(&F, NJ*NL*sizeof(DATA_TYPE));
cudaMallocManaged(&G, NI*NL*sizeof(DATA_TYPE));
init_array(A, B, C, D);
mm3Cuda(A, B, C, D, E, F, G, NI, NJ, NK, NL, NM);
cudaFree(A);
cudaFree(B);
cudaFree(C);
cudaFree(D);
cudaFree(E);
cudaFree(F);
cudaFree(G);
return 0;
}
|
/**
* 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define GPU_DEVICE 0
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size. */
//# define NI 512
//# define NJ 512
//# define NK 512
//# define NL 512
//# define NM 512
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
int NI;
int NJ;
int NK;
int NL;
int NM;
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NK + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NJ + j] = ((DATA_TYPE) i*(j+1)) / NJ;
}
}
for (i = 0; i < NJ; i++)
{
for (j = 0; j < NM; j++)
{
C[i*NM + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
for (i = 0; i < NM; i++)
{
for (j = 0; j < NL; j++)
{
D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
}
}
}
__global__ void mm3_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NJ))
{
int k;
for(k=0; k < NK; k++)
{
E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm3_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F,int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NJ) && (j < NL))
{
int k;
for(k=0; k < NM; k++)
{
F[i * NL + j] += C[i * NM + k] * D[k * NL +j];
}
}
}
__global__ void mm3_kernel3(DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G, int NI, int NJ, int NK, int NL, int NM)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NL))
{
int k;
for(k=0; k < NJ; k++)
{
G[i * NL + j] += E[i * NJ + k] * F[k * NL + j];
}
}
}
void mm3Cuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu, DATA_TYPE* D_gpu, DATA_TYPE* E_gpu, DATA_TYPE* F_gpu,
DATA_TYPE* G_gpu, int NI, int NJ, int NK, int NL, int NM)
{
hipEvent_t start, end;
float time;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) )));
dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) )));
//for(int i = 0; i < 5; i++){
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start);
mm3_kernel1<<<grid1,block>>>(A_gpu, B_gpu, E_gpu,NI, NJ, NK, NL, NM);
hipDeviceSynchronize();
mm3_kernel2<<<grid2,block>>>(C_gpu, D_gpu, F_gpu,NI, NJ, NK, NL, NM);
hipDeviceSynchronize();
mm3_kernel3<<<grid3,block>>>(E_gpu, F_gpu, G_gpu,NI, NJ, NK, NL, NM);
hipDeviceSynchronize();
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&time, start, end);
fprintf(stdout, "%0.6lf\n", time);
//}
}
int main(int argc, char** argv)
{
if(argc < 2){
printf("please no troll\n");
return 1;
}
NI = atoi(argv[1]);
NJ = atoi(argv[1]);
NK = atoi(argv[1]);
NL = atoi(argv[1]);
NM = atoi(argv[1]);
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* C;
DATA_TYPE* D;
DATA_TYPE* E;
DATA_TYPE* F;
DATA_TYPE* G;
hipMallocManaged(&A, NI*NK*sizeof(DATA_TYPE));
hipMallocManaged(&B, NK*NJ*sizeof(DATA_TYPE));
hipMallocManaged(&C, NJ*NM*sizeof(DATA_TYPE));
hipMallocManaged(&D, NM*NL*sizeof(DATA_TYPE));
hipMallocManaged(&E, NI*NJ*sizeof(DATA_TYPE));
hipMallocManaged(&F, NJ*NL*sizeof(DATA_TYPE));
hipMallocManaged(&G, NI*NL*sizeof(DATA_TYPE));
init_array(A, B, C, D);
mm3Cuda(A, B, C, D, E, F, G, NI, NJ, NK, NL, NM);
hipFree(A);
hipFree(B);
hipFree(C);
hipFree(D);
hipFree(E);
hipFree(F);
hipFree(G);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include<iostream>
#define imin(a,b) (a<b?a:b)
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
const int N = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N + threadsPerBlock - 1) / threadsPerBlock);
__global__
void dot(float* a, float* b, float* c)
{
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
// sync between All the threads in a Block
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
{
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads(); // Also incorrect
}
//__syncthreads(); // incorrect
i /= 2;
}
if (cacheIndex == 0)
{
c[blockIdx.x] = cache[0];
}
}
int main(void)
{
float c;
float* a, * b, * partial_c;
float* dev_a, * dev_b, * dev_partial_c;
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
partial_c = (float*)malloc(sizeof(float) * blocksPerGrid);
cudaMalloc((void**)&dev_a, sizeof(float) * N);
cudaMalloc((void**)&dev_b, sizeof(float) * N);
cudaMalloc((void**)&dev_partial_c, sizeof(float) * blocksPerGrid);
for (int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i * 2;
}
cudaMemcpy(dev_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
dot <<<blocksPerGrid, threadsPerBlock>>> (dev_a, dev_b, dev_partial_c);
cudaMemcpy(partial_c, dev_partial_c, sizeof(float) * blocksPerGrid, cudaMemcpyDeviceToHost);
c = 0;
for (int i = 0; i < blocksPerGrid; i++)
{
c += partial_c[i];
}
printf("Does GPU value %.6g = %.6g?\n", c, sum_squares((float)(N - 1)));
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_partial_c);
free(a);
free(b);
free(partial_c);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include<iostream>
#define imin(a,b) (a<b?a:b)
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
const int N = 33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N + threadsPerBlock - 1) / threadsPerBlock);
__global__
void dot(float* a, float* b, float* c)
{
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N)
{
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
// sync between All the threads in a Block
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
{
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads(); // Also incorrect
}
//__syncthreads(); // incorrect
i /= 2;
}
if (cacheIndex == 0)
{
c[blockIdx.x] = cache[0];
}
}
int main(void)
{
float c;
float* a, * b, * partial_c;
float* dev_a, * dev_b, * dev_partial_c;
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
partial_c = (float*)malloc(sizeof(float) * blocksPerGrid);
hipMalloc((void**)&dev_a, sizeof(float) * N);
hipMalloc((void**)&dev_b, sizeof(float) * N);
hipMalloc((void**)&dev_partial_c, sizeof(float) * blocksPerGrid);
for (int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i * 2;
}
hipMemcpy(dev_a, a, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, sizeof(float) * N, hipMemcpyHostToDevice);
dot <<<blocksPerGrid, threadsPerBlock>>> (dev_a, dev_b, dev_partial_c);
hipMemcpy(partial_c, dev_partial_c, sizeof(float) * blocksPerGrid, hipMemcpyDeviceToHost);
c = 0;
for (int i = 0; i < blocksPerGrid; i++)
{
c += partial_c[i];
}
printf("Does GPU value %.6g = %.6g?\n", c, sum_squares((float)(N - 1)));
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_partial_c);
free(a);
free(b);
free(partial_c);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
// RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args "-Xclang" "-fcuda-allow-variadic-functions"
/*
Copyright (c) 2015-present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// CHECK: #include "hip/hip_runtime.h"
#include "cuda_runtime.h"
#include <algorithm>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <type_traits>
#define EXIT_WAIVED 2
const char* sampleName = "hipSimpleAtomicsTest";
using namespace std;
// Auto-Verification Code
bool testResult = true;
bool computeGoldBitwise(...) {
return true;
}
template<typename T, typename enable_if<is_integral<T>{}>::type* = nullptr>
bool computeGoldBitwise(T* gpuData, int len) {
T val = 0xff;
for (int i = 0; i < len; ++i) {
// 9th element should be 1
val &= (2 * i + 7);
}
if (val != gpuData[8]) {
printf("atomicAnd failed\n");
return false;
}
val = 0;
for (int i = 0; i < len; ++i) {
// 10th element should be 0xff
val |= (1 << i);
}
if (val != gpuData[9]) {
printf("atomicOr failed\n");
return false;
}
val = 0xff;
for (int i = 0; i < len; ++i) {
// 11th element should be 0xff
val ^= i;
}
if (val != gpuData[10]) {
printf("atomicXor failed\n");
return false;
}
return true;
}
template<typename T>
bool computeGold(T* gpuData, int len) {
T val = 0;
for (int i = 0; i < len; ++i) {
val += 10;
}
if (val != gpuData[0]) {
printf("atomicAdd failed\n");
return false;
}
val = 0;
for (int i = 0; i < len; ++i) {
val -= 10;
}
if (val != gpuData[1]) {
printf("atomicSub failed\n");
return false;
}
bool found = false;
for (T i = 0; i < len; ++i) {
// third element should be a member of [0, len)
if (i == gpuData[2]) {
found = true;
break;
}
}
if (!found) {
printf("atomicExch failed\n");
return false;
}
val = -(1 << 8);
for (T i = 0; i < len; ++i) {
// fourth element should be len-1
val = max(val, i);
}
if (val != gpuData[3]) {
printf("atomicMax failed\n");
return false;
}
val = 1 << 8;
for (T i = 0; i < len; ++i) {
val = min(val, i);
}
if (val != gpuData[4]) {
printf("atomicMin failed\n");
return false;
}
int limit = 17;
val = 0;
for (int i = 0; i < len; ++i) {
val = (val >= limit) ? 0 : val + 1;
}
if (val != gpuData[5]) {
printf("atomicInc failed\n");
return false;
}
limit = 137;
val = 0;
for (int i = 0; i < len; ++i) {
val = ((val == 0) || (val > limit)) ? limit : val - 1;
}
if (val != gpuData[6]) {
printf("atomicDec failed\n");
return false;
}
found = false;
for (T i = 0; i < len; ++i) {
// eighth element should be a member of [0, len)
if (i == gpuData[7]) {
found = true;
break;
}
}
if (!found) {
printf("atomicCAS failed\n");
return false;
}
return computeGoldBitwise(gpuData, len);
}
__device__
void testKernelExch(...) {}
template<typename T, typename enable_if<!is_same<T, double>{}>::type* = nullptr>
__device__
void testKernelExch(T* g_odata) {
// access thread id
const T tid = blockDim.x * blockIdx.x + threadIdx.x;
// Atomic exchange
atomicExch(&g_odata[2], tid);
}
__device__
void testKernelSub(...) {}
template<
typename T,
typename enable_if<
is_same<T, int>{} || is_same<T, unsigned int>{}>::type* = nullptr>
__device__
void testKernelSub(T* g_odata) {
// Atomic subtraction (final should be 0)
atomicSub(&g_odata[1], 10);
}
__device__
void testKernelIntegral(...) {}
template<typename T, typename enable_if<is_integral<T>{}>::type* = nullptr>
__device__
void testKernelIntegral(T* g_odata) {
// access thread id
const T tid = blockDim.x * blockIdx.x + threadIdx.x;
// Atomic maximum
atomicMax(&g_odata[3], tid);
// Atomic minimum
atomicMin(&g_odata[4], tid);
// Atomic increment (modulo 17+1)
atomicInc((unsigned int*)&g_odata[5], 17);
// Atomic decrement
atomicDec((unsigned int*)&g_odata[6], 137);
// Atomic compare-and-swap
atomicCAS(&g_odata[7], tid - 1, tid);
// Atomic AND
atomicAnd(&g_odata[8], 2 * tid + 7);
// Atomic OR
atomicOr(&g_odata[9], 1 << tid);
// Atomic XOR
atomicXor(&g_odata[10], tid);
testKernelSub(g_odata);
}
template<typename T>
__global__ void testKernel(T* g_odata) {
// Atomic addition
atomicAdd(&g_odata[0], 10);
testKernelIntegral(g_odata);
testKernelExch(g_odata);
}
template<typename T>
void runTest() {
// CHECK: hipDeviceProp_t deviceProp;
cudaDeviceProp deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev = 0;
// CHECK: hipGetDeviceProperties(&deviceProp, dev);
cudaGetDeviceProperties(&deviceProp, dev);
// Statistics about the GPU device
printf(
"> GPU device has %d Multi-Processors, "
"SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
unsigned int numThreads = 256;
unsigned int numBlocks = 64;
unsigned int numData = 11;
unsigned int memSize = sizeof(T) * numData;
// Allocate mem for the result on host side
T* hOData = (T*)malloc(memSize);
// Initialize the memory
for (unsigned int i = 0; i < numData; i++) hOData[i] = 0;
// To make the AND and XOR tests generate something other than 0...
hOData[8] = hOData[10] = 0xff;
// Allocate device memory for result
T* dOData;
// CHECK: hipMalloc((void**)&dOData, memSize);
cudaMalloc((void**)&dOData, memSize);
// Copy host memory to device to initialize to zero
// CHECK: hipMemcpy(dOData, hOData, memSize, hipMemcpyHostToDevice);
cudaMemcpy(dOData, hOData, memSize, cudaMemcpyHostToDevice);
// Execute the kernel
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(testKernel), dim3(numBlocks), dim3(numThreads), 0, 0, dOData);
testKernel<<<numBlocks, numThreads>>>(dOData);
// Copy result from device to host
// CHECK: hipMemcpy(hOData, dOData, memSize, hipMemcpyDeviceToHost);
cudaMemcpy(hOData, dOData, memSize, cudaMemcpyDeviceToHost);
// Compute reference solution
testResult = computeGold(hOData, numThreads * numBlocks);
// Cleanup memory
free(hOData);
// CHECK: hipFree(dOData);
cudaFree(dOData);
}
int main(int argc, char** argv) {
printf("%s starting...\n", sampleName);
runTest<int>();
runTest<unsigned int>();
runTest<unsigned long long>();
runTest<float>();
runTest<double>();
// CHECK: hipDeviceReset();
cudaDeviceReset();
printf("%s completed, returned %s\n", sampleName, testResult ? "OK" : "ERROR!");
exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
// RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args "-Xclang" "-fcuda-allow-variadic-functions"
/*
Copyright (c) 2015-present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// CHECK: #include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include <algorithm>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <type_traits>
#define EXIT_WAIVED 2
const char* sampleName = "hipSimpleAtomicsTest";
using namespace std;
// Auto-Verification Code
bool testResult = true;
bool computeGoldBitwise(...) {
return true;
}
template<typename T, typename enable_if<is_integral<T>{}>::type* = nullptr>
bool computeGoldBitwise(T* gpuData, int len) {
T val = 0xff;
for (int i = 0; i < len; ++i) {
// 9th element should be 1
val &= (2 * i + 7);
}
if (val != gpuData[8]) {
printf("atomicAnd failed\n");
return false;
}
val = 0;
for (int i = 0; i < len; ++i) {
// 10th element should be 0xff
val |= (1 << i);
}
if (val != gpuData[9]) {
printf("atomicOr failed\n");
return false;
}
val = 0xff;
for (int i = 0; i < len; ++i) {
// 11th element should be 0xff
val ^= i;
}
if (val != gpuData[10]) {
printf("atomicXor failed\n");
return false;
}
return true;
}
template<typename T>
bool computeGold(T* gpuData, int len) {
T val = 0;
for (int i = 0; i < len; ++i) {
val += 10;
}
if (val != gpuData[0]) {
printf("atomicAdd failed\n");
return false;
}
val = 0;
for (int i = 0; i < len; ++i) {
val -= 10;
}
if (val != gpuData[1]) {
printf("atomicSub failed\n");
return false;
}
bool found = false;
for (T i = 0; i < len; ++i) {
// third element should be a member of [0, len)
if (i == gpuData[2]) {
found = true;
break;
}
}
if (!found) {
printf("atomicExch failed\n");
return false;
}
val = -(1 << 8);
for (T i = 0; i < len; ++i) {
// fourth element should be len-1
val = max(val, i);
}
if (val != gpuData[3]) {
printf("atomicMax failed\n");
return false;
}
val = 1 << 8;
for (T i = 0; i < len; ++i) {
val = min(val, i);
}
if (val != gpuData[4]) {
printf("atomicMin failed\n");
return false;
}
int limit = 17;
val = 0;
for (int i = 0; i < len; ++i) {
val = (val >= limit) ? 0 : val + 1;
}
if (val != gpuData[5]) {
printf("atomicInc failed\n");
return false;
}
limit = 137;
val = 0;
for (int i = 0; i < len; ++i) {
val = ((val == 0) || (val > limit)) ? limit : val - 1;
}
if (val != gpuData[6]) {
printf("atomicDec failed\n");
return false;
}
found = false;
for (T i = 0; i < len; ++i) {
// eighth element should be a member of [0, len)
if (i == gpuData[7]) {
found = true;
break;
}
}
if (!found) {
printf("atomicCAS failed\n");
return false;
}
return computeGoldBitwise(gpuData, len);
}
__device__
void testKernelExch(...) {}
template<typename T, typename enable_if<!is_same<T, double>{}>::type* = nullptr>
__device__
void testKernelExch(T* g_odata) {
// access thread id
const T tid = blockDim.x * blockIdx.x + threadIdx.x;
// Atomic exchange
atomicExch(&g_odata[2], tid);
}
__device__
void testKernelSub(...) {}
template<
typename T,
typename enable_if<
is_same<T, int>{} || is_same<T, unsigned int>{}>::type* = nullptr>
__device__
void testKernelSub(T* g_odata) {
// Atomic subtraction (final should be 0)
atomicSub(&g_odata[1], 10);
}
__device__
void testKernelIntegral(...) {}
template<typename T, typename enable_if<is_integral<T>{}>::type* = nullptr>
__device__
void testKernelIntegral(T* g_odata) {
// access thread id
const T tid = blockDim.x * blockIdx.x + threadIdx.x;
// Atomic maximum
atomicMax(&g_odata[3], tid);
// Atomic minimum
atomicMin(&g_odata[4], tid);
// Atomic increment (modulo 17+1)
atomicInc((unsigned int*)&g_odata[5], 17);
// Atomic decrement
atomicDec((unsigned int*)&g_odata[6], 137);
// Atomic compare-and-swap
atomicCAS(&g_odata[7], tid - 1, tid);
// Atomic AND
atomicAnd(&g_odata[8], 2 * tid + 7);
// Atomic OR
atomicOr(&g_odata[9], 1 << tid);
// Atomic XOR
atomicXor(&g_odata[10], tid);
testKernelSub(g_odata);
}
template<typename T>
__global__ void testKernel(T* g_odata) {
// Atomic addition
atomicAdd(&g_odata[0], 10);
testKernelIntegral(g_odata);
testKernelExch(g_odata);
}
template<typename T>
void runTest() {
// CHECK: hipDeviceProp_t deviceProp;
hipDeviceProp_t deviceProp;
deviceProp.major = 0;
deviceProp.minor = 0;
int dev = 0;
// CHECK: hipGetDeviceProperties(&deviceProp, dev);
hipGetDeviceProperties(&deviceProp, dev);
// Statistics about the GPU device
printf(
"> GPU device has %d Multi-Processors, "
"SM %d.%d compute capabilities\n\n",
deviceProp.multiProcessorCount, deviceProp.major, deviceProp.minor);
unsigned int numThreads = 256;
unsigned int numBlocks = 64;
unsigned int numData = 11;
unsigned int memSize = sizeof(T) * numData;
// Allocate mem for the result on host side
T* hOData = (T*)malloc(memSize);
// Initialize the memory
for (unsigned int i = 0; i < numData; i++) hOData[i] = 0;
// To make the AND and XOR tests generate something other than 0...
hOData[8] = hOData[10] = 0xff;
// Allocate device memory for result
T* dOData;
// CHECK: hipMalloc((void**)&dOData, memSize);
hipMalloc((void**)&dOData, memSize);
// Copy host memory to device to initialize to zero
// CHECK: hipMemcpy(dOData, hOData, memSize, hipMemcpyHostToDevice);
hipMemcpy(dOData, hOData, memSize, hipMemcpyHostToDevice);
// Execute the kernel
// CHECK: hipLaunchKernelGGL(HIP_KERNEL_NAME(testKernel), dim3(numBlocks), dim3(numThreads), 0, 0, dOData);
testKernel<<<numBlocks, numThreads>>>(dOData);
// Copy result from device to host
// CHECK: hipMemcpy(hOData, dOData, memSize, hipMemcpyDeviceToHost);
hipMemcpy(hOData, dOData, memSize, hipMemcpyDeviceToHost);
// Compute reference solution
testResult = computeGold(hOData, numThreads * numBlocks);
// Cleanup memory
free(hOData);
// CHECK: hipFree(dOData);
hipFree(dOData);
}
int main(int argc, char** argv) {
printf("%s starting...\n", sampleName);
runTest<int>();
runTest<unsigned int>();
runTest<unsigned long long>();
runTest<float>();
runTest<double>();
// CHECK: hipDeviceReset();
hipDeviceReset();
printf("%s completed, returned %s\n", sampleName, testResult ? "OK" : "ERROR!");
exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <cuda.h>
__global__ void _slowKernel(char* ptr, int sz) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (; idx < sz; idx += (gridDim.x * blockDim.x)) {
for (int i = 0; i < 100000; ++i) {
ptr[idx] += ptr[(idx + 1007) % sz] + i;
}
}
}
void slowKernel(void* ptr, int kSize, cudaStream_t stream) {
_slowKernel<<<128, 128, 0, stream>>>((char*)ptr, kSize);
}
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <hip/hip_runtime.h>
__global__ void _slowKernel(char* ptr, int sz) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (; idx < sz; idx += (gridDim.x * blockDim.x)) {
for (int i = 0; i < 100000; ++i) {
ptr[idx] += ptr[(idx + 1007) % sz] + i;
}
}
}
void slowKernel(void* ptr, int kSize, hipStream_t stream) {
_slowKernel<<<128, 128, 0, stream>>>((char*)ptr, kSize);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "math.h"
__constant__ int sobelV[] = {1, 0, -1, 2, 0, -2, 1, 0, -1};
__constant__ int sobelH[] = {1, 2, 1, 0, 0, 0, -1, -2, -1};
extern "C"
__global__ void grayEdgeDetection(int * output, int width, int thresh) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int pixelIndex = idx + (idy * width);
int pixels[9];
for(int i = -1; i <= 1; i++) {
for(int j = -1; j <= 1; j++) {
pixels[((i + 1) * 3) + (j + 1)] = output[(idx + j) + ((idy + i) * width)];
}
}
/*
pixels[0] = output[(idx - 1) + ((idy - 1) * width)];
pixels[1] = output[(idx) + ((idy - 1) * width)];
pixels[2] = output[(idx + 1) + ((idy - 1) * width)];
pixels[3] = output[(idx - 1) + (idy * width)];
pixels[4] = output[(idx) + (idy * width)];
pixels[5] = output[(idx + 1) + (idy * width)];
pixels[6] = output[(idx - 1) + ((idy + 1) * width)];
pixels[7] = output[(idx) + ((idy + 1) * width)];
pixels[8] = output[(idx + 1) + ((idy + 1) * width)];
*/
int vertSum = 0;
int horzSum = 0;
for(int i = 0; i < 9; i++) {
int grayVal = pixels[i] & 0xFF;
vertSum = vertSum + (grayVal * sobelV[i]);
horzSum = horzSum + (grayVal * sobelH[i]);
}
int sum = (int) (sqrt((double)((vertSum * vertSum) + (horzSum * horzSum))));
if(sum > 255) sum = 255;
if(sum < thresh) sum = 0;
output[pixelIndex] = sum;
}
|
#include <hip/hip_runtime.h>
#include "math.h"
__constant__ int sobelV[] = {1, 0, -1, 2, 0, -2, 1, 0, -1};
__constant__ int sobelH[] = {1, 2, 1, 0, 0, 0, -1, -2, -1};
extern "C"
__global__ void grayEdgeDetection(int * output, int width, int thresh) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int pixelIndex = idx + (idy * width);
int pixels[9];
for(int i = -1; i <= 1; i++) {
for(int j = -1; j <= 1; j++) {
pixels[((i + 1) * 3) + (j + 1)] = output[(idx + j) + ((idy + i) * width)];
}
}
/*
pixels[0] = output[(idx - 1) + ((idy - 1) * width)];
pixels[1] = output[(idx) + ((idy - 1) * width)];
pixels[2] = output[(idx + 1) + ((idy - 1) * width)];
pixels[3] = output[(idx - 1) + (idy * width)];
pixels[4] = output[(idx) + (idy * width)];
pixels[5] = output[(idx + 1) + (idy * width)];
pixels[6] = output[(idx - 1) + ((idy + 1) * width)];
pixels[7] = output[(idx) + ((idy + 1) * width)];
pixels[8] = output[(idx + 1) + ((idy + 1) * width)];
*/
int vertSum = 0;
int horzSum = 0;
for(int i = 0; i < 9; i++) {
int grayVal = pixels[i] & 0xFF;
vertSum = vertSum + (grayVal * sobelV[i]);
horzSum = horzSum + (grayVal * sobelH[i]);
}
int sum = (int) (sqrt((double)((vertSum * vertSum) + (horzSum * horzSum))));
if(sum > 255) sum = 255;
if(sum < thresh) sum = 0;
output[pixelIndex] = sum;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
float random_number(int min, int max){
/*
* Function: random_number
* -----------------------
* return a random number between min and max as a float
*/
float num = rand() % (max + 1 - min) + min;
return num;
}
unsigned long int getOffset(int * indices, int * dimensions, int num_dimensions){
/*
* Function: getOffset
* -------------------
* compute the offset from the base address of an array for given indices corresponding
* to dimensions.
* indices: pointer to array of indices e.g A[1,2,3]
* dimensions: pointer to array of dimensions e.g dimensionality of A = 2x3x4
* num_dimensions: number of dimensions e.g dimensions(A) = 3
*
* returns:
* -------
* offset: the offset from the base memory address. (NOTE: independant of datatype)
*/
unsigned long int offset = 0.0;
for(int indexIdx=0; indexIdx<num_dimensions; indexIdx++){
unsigned long int product = indices[indexIdx];
for(int dimIdx=indexIdx+1; dimIdx<num_dimensions; dimIdx ++){
product = product * dimensions[dimIdx];
}
offset = offset + product;
}
return offset;
}
void randFill(float * arr, int * dimensions, int num_dimensions){
/*
* Must be a recursable function
*/
// 1. Base condition
if(num_dimensions==1){
printf("Reached base condition\n");
for(int i=0; i<dimensions[0]; i++){
*(arr + i) = random_number(1,100);
}
}
// 2. Recursive condition
else {
int first_dim = dimensions[0];
int second_dim = dimensions[1];
printf("First dim : %d | Second dim : %d", first_dim, second_dim);
int * new_dims = dimensions + 1;
for(int dim=0; dim<first_dim; dim++){
randFill( arr+second_dim , new_dims, num_dimensions-1);
}
}
}
void randFill2d(float * arr, int * dimensions){
/*
* Function: randFill3d
* --------------------
* fills a 3d array with random numbers between 1,100
*/
int HEIGHT = dimensions[0];
int WIDTH = dimensions[1];
for(int row=0; row<HEIGHT; row++){
for(int col=0; col<WIDTH; col++){
int indices[2] = {row, col};
*(arr + getOffset(indices, dimensions, 2)) = random_number(1,100);
}
}
}
void randFill3d(float * arr, int * dimensions){
/*
* Function: randFill3d
* --------------------
* fills a 3d array with random numbers between 1,100
*/
int CHANNELS = dimensions[0];
int HEIGHT = dimensions[1];
int WIDTH = dimensions[2];
for(int chan=0; chan<CHANNELS; chan++){
for(int row=0; row<HEIGHT; row++){
for(int col=0; col<WIDTH; col++){
int indices[3] = {chan, row, col};
*(arr + getOffset(indices, dimensions, 3)) = random_number(1,100);
}
}
}
}
void printArr2d(float * arr, int * dimensions){
int HEIGHT = dimensions[0];
int WIDTH = dimensions[1];
printf("\n2d Array: \n");
for(int r=0; r<HEIGHT; r++){
printf("\n");
for(int c=0; c<WIDTH; c++){
int indices[] = {r,c};
printf(" %.1f ", *(arr + getOffset(indices, dimensions, 2)));
}
}
printf("\n");
}
void printArr3d(float * arr, int * dimensions){
/* To print a 3d array - used multiple times
*/
int CHANNELS = dimensions[0];
int HEIGHT = dimensions[1];
int WIDTH = dimensions[2];
for(int chan=0; chan<CHANNELS; chan++){
printf("\nChannel %d", chan);
for(int row=0; row<HEIGHT; row++){
printf("\n");
for(int col=0; col<WIDTH; col++){
int indices[3] = {chan, row, col};
printf(" %.1f ", *(arr + getOffset(indices, dimensions, 3)));
}
}
printf("\n");
}
}
float * new3dArray(int DEPTH, int HEIGHT, int WIDTH){
float * arr = (float*)malloc(DEPTH*HEIGHT*WIDTH*sizeof(float));
return arr;
}
float * new2dArray(int HEIGHT, int WIDTH){
float * arr = (float*)malloc(HEIGHT*WIDTH*sizeof(float));
return arr;
}
/*
int main()
{
// Test this!
float arr[4][3] = { {1,2,3}, {4,5,6}, {7,8,9}, {10,11,12} }; // 4x3 array
float * arr_ptr = &arr[0][0];
int indices[] = {2,1};
int dimensions[] = {4,3};
int num_dimensions = 2;
unsigned long int offset = getOffset( indices, dimensions, num_dimensions );
printf("value at ( %d , %d ) is %f \n", indices[0], indices[1], arr[indices[0]][indices[1]]);
printf("value at ( %d , %d ) is %f \n", indices[0], indices[1], *(arr_ptr + offset) );
// Test fillArr and printArr
srand(time(0));
const int HEIGHT=2;
const int WIDTH=3;
const int DEPTH=4;
float * multiArr = (float *)malloc(HEIGHT*WIDTH*DEPTH*sizeof(float));
int dimensions2[] = {DEPTH, HEIGHT, WIDTH};
randFill3d(multiArr, dimensions2);
printArr3d(multiArr, dimensions2);
return 0;
}
*/
|
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
float random_number(int min, int max){
/*
* Function: random_number
* -----------------------
* return a random number between min and max as a float
*/
float num = rand() % (max + 1 - min) + min;
return num;
}
unsigned long int getOffset(int * indices, int * dimensions, int num_dimensions){
/*
* Function: getOffset
* -------------------
* compute the offset from the base address of an array for given indices corresponding
* to dimensions.
* indices: pointer to array of indices e.g A[1,2,3]
* dimensions: pointer to array of dimensions e.g dimensionality of A = 2x3x4
* num_dimensions: number of dimensions e.g dimensions(A) = 3
*
* returns:
* -------
* offset: the offset from the base memory address. (NOTE: independant of datatype)
*/
unsigned long int offset = 0.0;
for(int indexIdx=0; indexIdx<num_dimensions; indexIdx++){
unsigned long int product = indices[indexIdx];
for(int dimIdx=indexIdx+1; dimIdx<num_dimensions; dimIdx ++){
product = product * dimensions[dimIdx];
}
offset = offset + product;
}
return offset;
}
void randFill(float * arr, int * dimensions, int num_dimensions){
/*
* Must be a recursable function
*/
// 1. Base condition
if(num_dimensions==1){
printf("Reached base condition\n");
for(int i=0; i<dimensions[0]; i++){
*(arr + i) = random_number(1,100);
}
}
// 2. Recursive condition
else {
int first_dim = dimensions[0];
int second_dim = dimensions[1];
printf("First dim : %d | Second dim : %d", first_dim, second_dim);
int * new_dims = dimensions + 1;
for(int dim=0; dim<first_dim; dim++){
randFill( arr+second_dim , new_dims, num_dimensions-1);
}
}
}
void randFill2d(float * arr, int * dimensions){
/*
* Function: randFill3d
* --------------------
* fills a 3d array with random numbers between 1,100
*/
int HEIGHT = dimensions[0];
int WIDTH = dimensions[1];
for(int row=0; row<HEIGHT; row++){
for(int col=0; col<WIDTH; col++){
int indices[2] = {row, col};
*(arr + getOffset(indices, dimensions, 2)) = random_number(1,100);
}
}
}
void randFill3d(float * arr, int * dimensions){
/*
* Function: randFill3d
* --------------------
* fills a 3d array with random numbers between 1,100
*/
int CHANNELS = dimensions[0];
int HEIGHT = dimensions[1];
int WIDTH = dimensions[2];
for(int chan=0; chan<CHANNELS; chan++){
for(int row=0; row<HEIGHT; row++){
for(int col=0; col<WIDTH; col++){
int indices[3] = {chan, row, col};
*(arr + getOffset(indices, dimensions, 3)) = random_number(1,100);
}
}
}
}
void printArr2d(float * arr, int * dimensions){
int HEIGHT = dimensions[0];
int WIDTH = dimensions[1];
printf("\n2d Array: \n");
for(int r=0; r<HEIGHT; r++){
printf("\n");
for(int c=0; c<WIDTH; c++){
int indices[] = {r,c};
printf(" %.1f ", *(arr + getOffset(indices, dimensions, 2)));
}
}
printf("\n");
}
void printArr3d(float * arr, int * dimensions){
/* To print a 3d array - used multiple times
*/
int CHANNELS = dimensions[0];
int HEIGHT = dimensions[1];
int WIDTH = dimensions[2];
for(int chan=0; chan<CHANNELS; chan++){
printf("\nChannel %d", chan);
for(int row=0; row<HEIGHT; row++){
printf("\n");
for(int col=0; col<WIDTH; col++){
int indices[3] = {chan, row, col};
printf(" %.1f ", *(arr + getOffset(indices, dimensions, 3)));
}
}
printf("\n");
}
}
float * new3dArray(int DEPTH, int HEIGHT, int WIDTH){
float * arr = (float*)malloc(DEPTH*HEIGHT*WIDTH*sizeof(float));
return arr;
}
float * new2dArray(int HEIGHT, int WIDTH){
float * arr = (float*)malloc(HEIGHT*WIDTH*sizeof(float));
return arr;
}
/*
int main()
{
// Test this!
float arr[4][3] = { {1,2,3}, {4,5,6}, {7,8,9}, {10,11,12} }; // 4x3 array
float * arr_ptr = &arr[0][0];
int indices[] = {2,1};
int dimensions[] = {4,3};
int num_dimensions = 2;
unsigned long int offset = getOffset( indices, dimensions, num_dimensions );
printf("value at ( %d , %d ) is %f \n", indices[0], indices[1], arr[indices[0]][indices[1]]);
printf("value at ( %d , %d ) is %f \n", indices[0], indices[1], *(arr_ptr + offset) );
// Test fillArr and printArr
srand(time(0));
const int HEIGHT=2;
const int WIDTH=3;
const int DEPTH=4;
float * multiArr = (float *)malloc(HEIGHT*WIDTH*DEPTH*sizeof(float));
int dimensions2[] = {DEPTH, HEIGHT, WIDTH};
randFill3d(multiArr, dimensions2);
printArr3d(multiArr, dimensions2);
return 0;
}
*/
|
Convert the following CUDA code to AMD GPU code:
cuda
/*
* ising_cuda_v1.cu
*
* Created on: Dec 26, 2019
* Author: Charalampos Eleftheriadis
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 512
#define threadsNum 64
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Kernel Function.
__global__ void spin(int *G, double *w, int *newG, int n) {
// Calculates Atomic Spin index.
int index = blockIdx.x*blockDim.x + threadIdx.x;
// Checks for out of bounds indexing and if so quits.
if (index >= n*n)
return;
double weightSum = 0;
// Calculates weight contribution for each neighboring Atomic Spin and sums it.
weightSum += w[0] * G[((index/n - 2 + n)%n) * n + (index - 2 + n)%n];
weightSum += w[1] * G[((index/n - 2 + n)%n) * n + (index - 1 + n)%n];
weightSum += w[2] * G[((index/n - 2 + n)%n) * n + (index)%n];
weightSum += w[3] * G[((index/n - 2 + n)%n) * n + (index + 1 + n)%n];
weightSum += w[4] * G[((index/n - 2 + n)%n) * n + (index + 2 + n)%n];
weightSum += w[5] * G[((index/n - 1 + n)%n) * n + (index - 2 + n)%n];
weightSum += w[6] * G[((index/n - 1 + n)%n) * n + (index - 1 + n)%n];
weightSum += w[7] * G[((index/n - 1 + n)%n) * n + (index)%n];
weightSum += w[8] * G[((index/n - 1 + n)%n) * n + (index + 1 + n)%n];
weightSum += w[9] * G[((index/n - 1 + n)%n) * n + (index + 2 + n)%n];
weightSum += w[10] * G[((index/n + n)%n) * n + (index - 2 + n)%n];
weightSum += w[11] * G[((index/n + n)%n) * n + (index - 1 + n)%n];
// w[12] is not contributing anything. It's the current Atomic Spin.
weightSum += w[13] * G[((index/n + n)%n) * n + (index + 1 + n)%n];
weightSum += w[14] * G[((index/n + n)%n) * n + (index + 2 + n)%n];
weightSum += w[15] * G[((index/n + 1 + n)%n) * n + (index - 2 + n)%n];
weightSum += w[16] * G[((index/n + 1 + n)%n) * n + (index - 1 + n)%n];
weightSum += w[17] * G[((index/n + 1 + n)%n) * n + (index)%n];
weightSum += w[18] * G[((index/n + 1 + n)%n) * n + (index + 1 + n)%n];
weightSum += w[19] * G[((index/n + 1 + n)%n) * n + (index + 2 + n)%n];
weightSum += w[20] * G[((index/n + 2 + n)%n) * n + (index - 2 + n)%n];
weightSum += w[21] * G[((index/n + 2 + n)%n) * n + (index - 1 + n)%n];
weightSum += w[22] * G[((index/n + 2 + n)%n) * n + (index)%n];
weightSum += w[23] * G[((index/n + 2 + n)%n) * n + (index + 1 + n)%n];
weightSum += w[24] * G[((index/n + 2 + n)%n) * n + (index + 2 + n)%n];
//! Can it be done more efficiently?
if (weightSum > 0.0001)
newG[index] = 1;
else if (weightSum < -0.0001)
newG[index] = -1;
else
newG[index] = G[index];
}
// Kernel Function that checks whether the new Atomic Spins Matrix is the same as the old one.
__global__ void check(int *G, int *newG, int n, int *same) {
// Calculates Atomic Spin index.
int index = blockIdx.x*blockDim.x + threadIdx.x;
// Checks for out of bounds indexing and if so quits.
if (index >= n*n)
return;
if (G[index] != newG[index])
*same = 0;
}
void ising(int *G, double *w, int k, int n) {
// Creates and transfers the Weight Matrix to GPU memory.
double *w_d;
int w_size = 25*sizeof(double);
gpuErrchk( cudaMalloc((void **) &w_d, w_size) );
gpuErrchk( cudaMemcpy(w_d, w, w_size, cudaMemcpyHostToDevice) );
// Creates and transfers the Atomic Spins Matrix to GPU memory.
int *G_d;
int G_size = n*n*sizeof(int);
gpuErrchk( cudaMalloc((void **) &G_d, G_size) );
gpuErrchk( cudaMemcpy(G_d, G, G_size, cudaMemcpyHostToDevice) );
// Creates the new Atomic Spins Matrix to GPU memory.
int *newG_d;
gpuErrchk( cudaMalloc((void **) &newG_d, G_size) );
// Creates and transfers a flag that states whether the new Atomic Spins Matrix and the old are the same to GPU memory.
int same = 1;
int *same_d;
gpuErrchk( cudaMalloc((void **) &same_d, sizeof(int)) );
gpuErrchk( cudaMemcpy(same_d, &same, sizeof(int), cudaMemcpyHostToDevice) );
// Creates a temporary variable for Atomic Spins Matrices' pointers swapping.
int *temp_d;
// Checks if function has to be iterated.
for (int i=0; i<k; i++) {
// Calls the kernel function balancing load to (n*n+threadsNum-1)/threadsNum blocks with threadsNum threads each.
spin<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, w_d, newG_d, n);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
check<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, newG_d, n, same_d);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
gpuErrchk( cudaMemcpy(&same, same_d, sizeof(int), cudaMemcpyDeviceToHost) );
if (same)
break;
// Atomix Spin Matrices' pointers swapping.
temp_d = G_d;
G_d = newG_d;
newG_d = temp_d;
}
// Copies data from GPU to CPU memory.
gpuErrchk( cudaMemcpy(G, G_d, G_size, cudaMemcpyDeviceToHost) );
// Cleanup.
gpuErrchk( cudaFree(w_d) );
gpuErrchk( cudaFree(G_d) );
gpuErrchk( cudaFree(newG_d) );
}
int main() {
// Weight Matrix.
double w[] = { 0.004, 0.016, 0.026, 0.016, 0.004,
0.016, 0.071, 0.117, 0.071, 0.016,
0.026, 0.117, 0.000, 0.117, 0.026,
0.016, 0.071, 0.117, 0.071, 0.016,
0.004, 0.016, 0.026, 0.016, 0.004 };
// Number of dimensions for the square Atomic Spins Matrix.
int n = N;
// Allocates memory for the Atomic Spins Matrix.
int *G = (int *)malloc(n*n * sizeof(int));
// Randomizes seed.
srand(time(NULL));
// Fills the Atomic Spins Matrix with "-1" and "1" values from a uniform distribution.
for (int i=0; i<n*n; i++)
G[i] = ((rand() % 2) * 2) - 1;
/*
// Reads configuration file.
size_t readStatus;
FILE *conf_init = fopen("conf-init.bin","rb");
int initG[n*n];
readStatus = fread(&initG, sizeof(int), n*n, conf_init);
if (readStatus != n*n)
printf("Could not read conf-init.bin file.\n");
fclose(conf_init);
// Fills the Atomic Spins Matrix with "-1" and "1" values from configuration file.
for (int i=0; i<n*n; i++)
G[i] = initG[i];
*/
ising(G, w, 10, n);
/*
// Reads configuration file for state after one iteration.
size_t readStatus1;
FILE *conf_1 = fopen("conf-1.bin","rb");
int G1[n*n];
readStatus1 = fread(&G1, sizeof(int), n*n, conf_1);
if (readStatus1 != n*n)
printf("Could not read conf-1.bin file.\n");
fclose(conf_1);
// Checks for errors.
int errorsNum = 0;
for (int i=0; i<n; i++)
for (int j=0; j<n; j++)
if (G[i*n+j] != G1[i*n+j])
errorsNum++;
if (errorsNum == 0)
printf("Correct Results!\n");
else
printf("Wrong Results. Number of errors: %d\n", errorsNum);
// Checks the results.
for (int i=0; i<n; i++) {
for (int j=0; j<n; j++) {
if (G[i*n+j] == G1[i*n+j])
printf("=");
else
printf("!");
}
printf("\n");
}
printf("\n\n");
*/
return 0;
}
|
/*
* ising_cuda_v1.cu
*
* Created on: Dec 26, 2019
* Author: Charalampos Eleftheriadis
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 512
#define threadsNum 64
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Kernel Function.
__global__ void spin(int *G, double *w, int *newG, int n) {
// Calculates Atomic Spin index.
int index = blockIdx.x*blockDim.x + threadIdx.x;
// Checks for out of bounds indexing and if so quits.
if (index >= n*n)
return;
double weightSum = 0;
// Calculates weight contribution for each neighboring Atomic Spin and sums it.
weightSum += w[0] * G[((index/n - 2 + n)%n) * n + (index - 2 + n)%n];
weightSum += w[1] * G[((index/n - 2 + n)%n) * n + (index - 1 + n)%n];
weightSum += w[2] * G[((index/n - 2 + n)%n) * n + (index)%n];
weightSum += w[3] * G[((index/n - 2 + n)%n) * n + (index + 1 + n)%n];
weightSum += w[4] * G[((index/n - 2 + n)%n) * n + (index + 2 + n)%n];
weightSum += w[5] * G[((index/n - 1 + n)%n) * n + (index - 2 + n)%n];
weightSum += w[6] * G[((index/n - 1 + n)%n) * n + (index - 1 + n)%n];
weightSum += w[7] * G[((index/n - 1 + n)%n) * n + (index)%n];
weightSum += w[8] * G[((index/n - 1 + n)%n) * n + (index + 1 + n)%n];
weightSum += w[9] * G[((index/n - 1 + n)%n) * n + (index + 2 + n)%n];
weightSum += w[10] * G[((index/n + n)%n) * n + (index - 2 + n)%n];
weightSum += w[11] * G[((index/n + n)%n) * n + (index - 1 + n)%n];
// w[12] is not contributing anything. It's the current Atomic Spin.
weightSum += w[13] * G[((index/n + n)%n) * n + (index + 1 + n)%n];
weightSum += w[14] * G[((index/n + n)%n) * n + (index + 2 + n)%n];
weightSum += w[15] * G[((index/n + 1 + n)%n) * n + (index - 2 + n)%n];
weightSum += w[16] * G[((index/n + 1 + n)%n) * n + (index - 1 + n)%n];
weightSum += w[17] * G[((index/n + 1 + n)%n) * n + (index)%n];
weightSum += w[18] * G[((index/n + 1 + n)%n) * n + (index + 1 + n)%n];
weightSum += w[19] * G[((index/n + 1 + n)%n) * n + (index + 2 + n)%n];
weightSum += w[20] * G[((index/n + 2 + n)%n) * n + (index - 2 + n)%n];
weightSum += w[21] * G[((index/n + 2 + n)%n) * n + (index - 1 + n)%n];
weightSum += w[22] * G[((index/n + 2 + n)%n) * n + (index)%n];
weightSum += w[23] * G[((index/n + 2 + n)%n) * n + (index + 1 + n)%n];
weightSum += w[24] * G[((index/n + 2 + n)%n) * n + (index + 2 + n)%n];
//! Can it be done more efficiently?
if (weightSum > 0.0001)
newG[index] = 1;
else if (weightSum < -0.0001)
newG[index] = -1;
else
newG[index] = G[index];
}
// Kernel Function that checks whether the new Atomic Spins Matrix is the same as the old one.
__global__ void check(int *G, int *newG, int n, int *same) {
// Calculates Atomic Spin index.
int index = blockIdx.x*blockDim.x + threadIdx.x;
// Checks for out of bounds indexing and if so quits.
if (index >= n*n)
return;
if (G[index] != newG[index])
*same = 0;
}
void ising(int *G, double *w, int k, int n) {
// Creates and transfers the Weight Matrix to GPU memory.
double *w_d;
int w_size = 25*sizeof(double);
gpuErrchk( hipMalloc((void **) &w_d, w_size) );
gpuErrchk( hipMemcpy(w_d, w, w_size, hipMemcpyHostToDevice) );
// Creates and transfers the Atomic Spins Matrix to GPU memory.
int *G_d;
int G_size = n*n*sizeof(int);
gpuErrchk( hipMalloc((void **) &G_d, G_size) );
gpuErrchk( hipMemcpy(G_d, G, G_size, hipMemcpyHostToDevice) );
// Creates the new Atomic Spins Matrix to GPU memory.
int *newG_d;
gpuErrchk( hipMalloc((void **) &newG_d, G_size) );
// Creates and transfers a flag that states whether the new Atomic Spins Matrix and the old are the same to GPU memory.
int same = 1;
int *same_d;
gpuErrchk( hipMalloc((void **) &same_d, sizeof(int)) );
gpuErrchk( hipMemcpy(same_d, &same, sizeof(int), hipMemcpyHostToDevice) );
// Creates a temporary variable for Atomic Spins Matrices' pointers swapping.
int *temp_d;
// Checks if function has to be iterated.
for (int i=0; i<k; i++) {
// Calls the kernel function balancing load to (n*n+threadsNum-1)/threadsNum blocks with threadsNum threads each.
spin<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, w_d, newG_d, n);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
check<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, newG_d, n, same_d);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
gpuErrchk( hipMemcpy(&same, same_d, sizeof(int), hipMemcpyDeviceToHost) );
if (same)
break;
// Atomix Spin Matrices' pointers swapping.
temp_d = G_d;
G_d = newG_d;
newG_d = temp_d;
}
// Copies data from GPU to CPU memory.
gpuErrchk( hipMemcpy(G, G_d, G_size, hipMemcpyDeviceToHost) );
// Cleanup.
gpuErrchk( hipFree(w_d) );
gpuErrchk( hipFree(G_d) );
gpuErrchk( hipFree(newG_d) );
}
int main() {
// Weight Matrix.
double w[] = { 0.004, 0.016, 0.026, 0.016, 0.004,
0.016, 0.071, 0.117, 0.071, 0.016,
0.026, 0.117, 0.000, 0.117, 0.026,
0.016, 0.071, 0.117, 0.071, 0.016,
0.004, 0.016, 0.026, 0.016, 0.004 };
// Number of dimensions for the square Atomic Spins Matrix.
int n = N;
// Allocates memory for the Atomic Spins Matrix.
int *G = (int *)malloc(n*n * sizeof(int));
// Randomizes seed.
srand(time(NULL));
// Fills the Atomic Spins Matrix with "-1" and "1" values from a uniform distribution.
for (int i=0; i<n*n; i++)
G[i] = ((rand() % 2) * 2) - 1;
/*
// Reads configuration file.
size_t readStatus;
FILE *conf_init = fopen("conf-init.bin","rb");
int initG[n*n];
readStatus = fread(&initG, sizeof(int), n*n, conf_init);
if (readStatus != n*n)
printf("Could not read conf-init.bin file.\n");
fclose(conf_init);
// Fills the Atomic Spins Matrix with "-1" and "1" values from configuration file.
for (int i=0; i<n*n; i++)
G[i] = initG[i];
*/
ising(G, w, 10, n);
/*
// Reads configuration file for state after one iteration.
size_t readStatus1;
FILE *conf_1 = fopen("conf-1.bin","rb");
int G1[n*n];
readStatus1 = fread(&G1, sizeof(int), n*n, conf_1);
if (readStatus1 != n*n)
printf("Could not read conf-1.bin file.\n");
fclose(conf_1);
// Checks for errors.
int errorsNum = 0;
for (int i=0; i<n; i++)
for (int j=0; j<n; j++)
if (G[i*n+j] != G1[i*n+j])
errorsNum++;
if (errorsNum == 0)
printf("Correct Results!\n");
else
printf("Wrong Results. Number of errors: %d\n", errorsNum);
// Checks the results.
for (int i=0; i<n; i++) {
for (int j=0; j<n; j++) {
if (G[i*n+j] == G1[i*n+j])
printf("=");
else
printf("!");
}
printf("\n");
}
printf("\n\n");
*/
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__device__ void generate2DGaussian(double * output, double sigma, int sz, bool normalize) {
/*x and y coordinates of thread in kernel. The gaussian filters are
*small enough for the kernel to fit into a single thread block of sz*sz*/
const int colIdx = threadIdx.x;
const int rowIdx = threadIdx.y;
int linearIdx = rowIdx*sz + colIdx;
/*calculate distance from centre of filter*/
int distx = abs(colIdx - sz/2);
int disty = abs(rowIdx - sz/2);
output[linearIdx] = exp(-(pow((double)(distx), 2.0)+pow((double)(disty), 2.0))/(2*(pow(sigma, 2.0))));
if(normalize==true) {
/*wait until all threads have assigned a value to their index in the output array*/
__syncthreads();
int i, j;
double sum=0.0;
for(i=0; i<sz; i++) {
for(j=0; j<sz; j++) {
sum += output[i*sz + j];
}
}
/*Let all threads calculate the sum before changing the value of the output array*/
__syncthreads();
output[linearIdx]/=sum;
}
}
__global__ void getGaussian(double * output, double sigma) {
int sz = ceil(sigma*3) * 2 + 1;
int linearIdx = threadIdx.y*sz + threadIdx.x;
if(linearIdx>=sz*sz) return;
generate2DGaussian(output, sigma, sz, false);
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__device__ void generate2DGaussian(double * output, double sigma, int sz, bool normalize) {
/*x and y coordinates of thread in kernel. The gaussian filters are
*small enough for the kernel to fit into a single thread block of sz*sz*/
const int colIdx = threadIdx.x;
const int rowIdx = threadIdx.y;
int linearIdx = rowIdx*sz + colIdx;
/*calculate distance from centre of filter*/
int distx = abs(colIdx - sz/2);
int disty = abs(rowIdx - sz/2);
output[linearIdx] = exp(-(pow((double)(distx), 2.0)+pow((double)(disty), 2.0))/(2*(pow(sigma, 2.0))));
if(normalize==true) {
/*wait until all threads have assigned a value to their index in the output array*/
__syncthreads();
int i, j;
double sum=0.0;
for(i=0; i<sz; i++) {
for(j=0; j<sz; j++) {
sum += output[i*sz + j];
}
}
/*Let all threads calculate the sum before changing the value of the output array*/
__syncthreads();
output[linearIdx]/=sum;
}
}
__global__ void getGaussian(double * output, double sigma) {
int sz = ceil(sigma*3) * 2 + 1;
int linearIdx = threadIdx.y*sz + threadIdx.x;
if(linearIdx>=sz*sz) return;
generate2DGaussian(output, sigma, sz, false);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <vector>
#include <chrono>
using namespace std;
constexpr int M = 1 << 3 ; //M =8
constexpr int N = 1 << 3;
constexpr int K = 1 << 3;
constexpr int THREADS = 1 << 2;
constexpr int M_padded = M + THREADS - M % THREADS; //8+4 - 8%4 = 12
constexpr int N_padded = N + THREADS - N % THREADS;
constexpr int K_padded = K + THREADS - K % THREADS;
constexpr int SHMEM_SIZE = THREADS * THREADS; // size de MemSh par threads
__global__ void matrixMul(const int *a, const int *b, int *c);
void verify_result(vector<int> &a, vector<int> &b, vector<int> &c);
void initMatrix(vector<int>& ,int, int);
void afficheMatrix(vector<int>& m,int line, int colone);
auto get_time() { return chrono::high_resolution_clock::now(); }
int main()
{
size_t bytes_a = M_padded * K_padded * sizeof(int); // MxN = MxK * KxN
size_t bytes_b = K_padded * N_padded * sizeof(int);
size_t bytes_c = M * N * sizeof(int);
// CPU
vector<int> h_a(M_padded * K_padded); //12*12
vector<int> h_b(K_padded * N_padded); // 12*12
vector<int> h_c(M * N); //8*8
initMatrix(h_a,M_padded,K_padded);
initMatrix(h_b,K_padded,N_padded);
// GPU
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, bytes_a);
cudaMalloc(&d_b, bytes_b);
cudaMalloc(&d_c, bytes_c);
// CPU ---> GPU
cudaMemcpy(d_a, h_a.data(), bytes_a, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b.data(), bytes_b, cudaMemcpyHostToDevice);
int BLOCKS_X = N_padded / THREADS; // 12/4 = 3
int BLOCKS_Y = M_padded / THREADS; // 12/4 =3
dim3 threads(THREADS, THREADS); // (4,4)
dim3 blocks(BLOCKS_X, BLOCKS_Y); // (3,3)
auto start = get_time();
matrixMul<<<blocks, threads>>>(d_a, d_b, d_c); //<<< (3,3), (4,4) >>>
cudaMemcpy(h_c.data(), d_c, bytes_c, cudaMemcpyDeviceToHost);
auto finish = get_time();
auto duration =
chrono::duration_cast<chrono::milliseconds>(finish - start);
cout << "temps écoulé en kernel = " << duration.count() << " ms\n";
afficheMatrix(h_c,M,N);
verify_result(h_a, h_b, h_c);
cout << "terminé avec succès"<<endl;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
__global__ void matrixMul(const int *a, const int *b, int *c)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int s_a[SHMEM_SIZE];
__shared__ int s_b[SHMEM_SIZE];
int tmp = 0;
for (int i = 0; i < K_padded; i += blockDim.x)
{
s_a[threadIdx.y * blockDim.x + threadIdx.x] = a[row * K + i + threadIdx.x];
s_b[threadIdx.y * blockDim.x + threadIdx.x] = b[i * N + threadIdx.y * N + col];
__syncthreads();
for (int j = 0; j < blockDim.x; j++)
tmp += s_a[threadIdx.y * blockDim.x + j] * s_b[j * blockDim.x + threadIdx.x];
__syncthreads();
}
if (row < M && col < N) c[row * N + col] = tmp;
}
void verify_result(vector<int> &a, vector<int> &b, vector<int> &c)
{
for (int row = 0; row < M_padded; row++)
{
if (row >= M) continue;
for (int col = 0; col < N_padded; col++)
{
if (col >= N) continue;
int tmp = 0;
for (int i = 0; i < K_padded; i++)
tmp += a[row * K + i] * b[i * N + col];
assert(tmp == c[row * N + col]);
}
}
}
void initMatrix(vector<int>& m,int line, int colone)
{
for (int i = 0; i <line; i++)
{
for (int j = 0; j < colone; j++)
{
if (i < M && j < K) m[i * K + j] = rand() % 100;
else m[i * K + j] = 0;
cout<<m[i]<<" ";
}
cout<<endl;
}
cout<<"\n_______________________________________"<<endl;
}
void afficheMatrix(vector<int>& m,int line, int colone)
{
for (int i = 0; i <line; i++)
{
for (int j = 0; j < colone; j++)
{
cout<<m[i]<<" ";
}
cout<<endl;
}
cout<<"\n_______________________________________"<<endl;
}
|
#include <hip/hip_runtime.h>
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <vector>
#include <chrono>
using namespace std;
constexpr int M = 1 << 3 ; //M =8
constexpr int N = 1 << 3;
constexpr int K = 1 << 3;
constexpr int THREADS = 1 << 2;
constexpr int M_padded = M + THREADS - M % THREADS; //8+4 - 8%4 = 12
constexpr int N_padded = N + THREADS - N % THREADS;
constexpr int K_padded = K + THREADS - K % THREADS;
constexpr int SHMEM_SIZE = THREADS * THREADS; // size de MemSh par threads
__global__ void matrixMul(const int *a, const int *b, int *c);
void verify_result(vector<int> &a, vector<int> &b, vector<int> &c);
void initMatrix(vector<int>& ,int, int);
void afficheMatrix(vector<int>& m,int line, int colone);
auto get_time() { return chrono::high_resolution_clock::now(); }
int main()
{
size_t bytes_a = M_padded * K_padded * sizeof(int); // MxN = MxK * KxN
size_t bytes_b = K_padded * N_padded * sizeof(int);
size_t bytes_c = M * N * sizeof(int);
// CPU
vector<int> h_a(M_padded * K_padded); //12*12
vector<int> h_b(K_padded * N_padded); // 12*12
vector<int> h_c(M * N); //8*8
initMatrix(h_a,M_padded,K_padded);
initMatrix(h_b,K_padded,N_padded);
// GPU
int *d_a, *d_b, *d_c;
hipMalloc(&d_a, bytes_a);
hipMalloc(&d_b, bytes_b);
hipMalloc(&d_c, bytes_c);
// CPU ---> GPU
hipMemcpy(d_a, h_a.data(), bytes_a, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b.data(), bytes_b, hipMemcpyHostToDevice);
int BLOCKS_X = N_padded / THREADS; // 12/4 = 3
int BLOCKS_Y = M_padded / THREADS; // 12/4 =3
dim3 threads(THREADS, THREADS); // (4,4)
dim3 blocks(BLOCKS_X, BLOCKS_Y); // (3,3)
auto start = get_time();
matrixMul<<<blocks, threads>>>(d_a, d_b, d_c); //<<< (3,3), (4,4) >>>
hipMemcpy(h_c.data(), d_c, bytes_c, hipMemcpyDeviceToHost);
auto finish = get_time();
auto duration =
chrono::duration_cast<chrono::milliseconds>(finish - start);
cout << "temps écoulé en kernel = " << duration.count() << " ms\n";
afficheMatrix(h_c,M,N);
verify_result(h_a, h_b, h_c);
cout << "terminé avec succès"<<endl;
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
__global__ void matrixMul(const int *a, const int *b, int *c)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int s_a[SHMEM_SIZE];
__shared__ int s_b[SHMEM_SIZE];
int tmp = 0;
for (int i = 0; i < K_padded; i += blockDim.x)
{
s_a[threadIdx.y * blockDim.x + threadIdx.x] = a[row * K + i + threadIdx.x];
s_b[threadIdx.y * blockDim.x + threadIdx.x] = b[i * N + threadIdx.y * N + col];
__syncthreads();
for (int j = 0; j < blockDim.x; j++)
tmp += s_a[threadIdx.y * blockDim.x + j] * s_b[j * blockDim.x + threadIdx.x];
__syncthreads();
}
if (row < M && col < N) c[row * N + col] = tmp;
}
void verify_result(vector<int> &a, vector<int> &b, vector<int> &c)
{
for (int row = 0; row < M_padded; row++)
{
if (row >= M) continue;
for (int col = 0; col < N_padded; col++)
{
if (col >= N) continue;
int tmp = 0;
for (int i = 0; i < K_padded; i++)
tmp += a[row * K + i] * b[i * N + col];
assert(tmp == c[row * N + col]);
}
}
}
void initMatrix(vector<int>& m,int line, int colone)
{
for (int i = 0; i <line; i++)
{
for (int j = 0; j < colone; j++)
{
if (i < M && j < K) m[i * K + j] = rand() % 100;
else m[i * K + j] = 0;
cout<<m[i]<<" ";
}
cout<<endl;
}
cout<<"\n_______________________________________"<<endl;
}
void afficheMatrix(vector<int>& m,int line, int colone)
{
for (int i = 0; i <line; i++)
{
for (int j = 0; j < colone; j++)
{
cout<<m[i]<<" ";
}
cout<<endl;
}
cout<<"\n_______________________________________"<<endl;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <cuda_profiler_api.h>
#include <tuple>
#include <iostream>
#include <string.h>
double time_host = 0;
double time_device = 0;
int sample_rounds = 10;
void meanFilter_host(unsigned char* image_matrix,unsigned char* filtered_image_data,int image_width, int image_height, int window_size)
{
int half_window = (window_size-window_size % 2)/2;
for(int i = 0; i < image_height; i += 1){
for(int j = 0; j < image_width; j += 1){
int k = 3*(i*image_height+j);
int top_boundary;
int bottom_boundary;
int left_boundary;
int right_boundary;
if(i-half_window >= 0){
top_boundary = i-half_window;
}else{
top_boundary = 0;
}
if(i+half_window <= image_height-1){
bottom_boundary = i+half_window;
}else{
bottom_boundary = image_height-1;
}
if(j-half_window >= 0){
left_boundary = j-half_window;
}else{
left_boundary = 0;
}
if(j+half_window <= image_width-1){
right_boundary = j+half_window;
}else{
right_boundary = image_width-1;
}
double byte1 = 0;
double byte2 = 0;
double byte3 = 0;
for(int x = top_boundary; x <= bottom_boundary; x++){
for(int y = left_boundary; y <= right_boundary; y++){
int pos = 3*(x*image_height + y);
byte1 += image_matrix[pos];
byte2 += image_matrix[pos+1];
byte3 += image_matrix[pos+2];
}
}
int effective_window_size = (bottom_boundary-top_boundary+1)*(right_boundary-left_boundary+1);
filtered_image_data[k] = byte1/effective_window_size;
filtered_image_data[k+1] = byte2/effective_window_size;
filtered_image_data[k+2] = byte3/effective_window_size;
}
}
}
__global__ void meanFilter_device(unsigned char* image_matrix, unsigned char* filtered_image_data, int image_width, int image_height, int window_size)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int half_window = (window_size-window_size % 2)/2;
if (i < image_height && j < image_width){
int k = 3*(i*image_height+j);
int top_boundary;
int bottom_boundary;
int left_boundary;
int right_boundary;
if(i-half_window >= 0){
top_boundary = i-half_window;
}else{
top_boundary = 0;
}
if(i+half_window <= image_height-1){
bottom_boundary = i+half_window;
}else{
bottom_boundary = image_height-1;
}
if(j-half_window >= 0){
left_boundary = j-half_window;
}else{
left_boundary = 0;
}
if(j+half_window <= image_width-1){
right_boundary = j+half_window;
}else{
right_boundary = image_width-1;
}
double byte1 = 0;
double byte2 = 0;
double byte3 = 0;
for(int x = top_boundary; x <= bottom_boundary; x++){
for(int y = left_boundary; y <= right_boundary; y++){
int pos = 3*(x*image_height + y);
byte1 += image_matrix[pos];
byte2 += image_matrix[pos+1];
byte3 += image_matrix[pos+2];
}
}
int effective_window_size = (bottom_boundary-top_boundary+1)*(right_boundary-left_boundary+1);
filtered_image_data[k] = byte1/effective_window_size;
filtered_image_data[k+1] = byte2/effective_window_size;
filtered_image_data[k+2] = byte3/effective_window_size;
}
}
int main(int argc,char **argv)
{
FILE* f = fopen(argv[1], "rb");
unsigned char info[54];
fread(info, sizeof(unsigned char), 54, f);
int width, height;
memcpy(&width, info + 18, sizeof(int));
memcpy(&height, info + 22, sizeof(int));
int window_size = strtol(argv[2],NULL,10);
printf(" Window size: %d\n",window_size);
printf("Image dimensions: (%d, %d)\n",width,height);
int size = 3 * width * abs(height);
unsigned char* data = new unsigned char[size];
unsigned char* result_image_data_d;
unsigned char* result_image_data_h = new unsigned char[size];
unsigned char* result_image_data_h1 = new unsigned char[size];
unsigned char* image_data_d;
fread(data, sizeof(unsigned char), size, f);
fclose(f);
int block_size = 32;
int grid_size = width/block_size;
dim3 dimBlock(block_size, block_size, 1);
dim3 dimGrid(grid_size, grid_size, 1);
for(int i = 0; i < sample_rounds; i += 1)
{
cudaMalloc((void **)&image_data_d,size*sizeof(unsigned char));
cudaMalloc((void **)&result_image_data_d,size*sizeof(unsigned char));
cudaMemcpy(image_data_d,data,size*sizeof(unsigned char),cudaMemcpyHostToDevice);
clock_t start_d=clock();
meanFilter_device <<< dimGrid, dimBlock >>> (image_data_d, result_image_data_d, width, height, window_size);
cudaThreadSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess)
{
fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) );
exit(-1);
}
clock_t end_d = clock();
clock_t start_h = clock();
meanFilter_host(data, result_image_data_h1, width, height, window_size);
clock_t end_h = clock();
cudaMemcpy(result_image_data_h,result_image_data_d,size*sizeof(unsigned char),cudaMemcpyDeviceToHost);
time_host += (double)(end_h-start_h)/CLOCKS_PER_SEC;
time_device += (double)(end_d-start_d)/CLOCKS_PER_SEC;
cudaFree(image_data_d);
cudaFree(result_image_data_d);
}
printf(" GPU Time: %f\n",(time_device/sample_rounds));
printf(" CPU Time: %f\n",(time_host/sample_rounds));
printf("CPU/GPU time: %f\n",(time_host/time_device));
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <tuple>
#include <iostream>
#include <string.h>
double time_host = 0;
double time_device = 0;
int sample_rounds = 10;
void meanFilter_host(unsigned char* image_matrix,unsigned char* filtered_image_data,int image_width, int image_height, int window_size)
{
int half_window = (window_size-window_size % 2)/2;
for(int i = 0; i < image_height; i += 1){
for(int j = 0; j < image_width; j += 1){
int k = 3*(i*image_height+j);
int top_boundary;
int bottom_boundary;
int left_boundary;
int right_boundary;
if(i-half_window >= 0){
top_boundary = i-half_window;
}else{
top_boundary = 0;
}
if(i+half_window <= image_height-1){
bottom_boundary = i+half_window;
}else{
bottom_boundary = image_height-1;
}
if(j-half_window >= 0){
left_boundary = j-half_window;
}else{
left_boundary = 0;
}
if(j+half_window <= image_width-1){
right_boundary = j+half_window;
}else{
right_boundary = image_width-1;
}
double byte1 = 0;
double byte2 = 0;
double byte3 = 0;
for(int x = top_boundary; x <= bottom_boundary; x++){
for(int y = left_boundary; y <= right_boundary; y++){
int pos = 3*(x*image_height + y);
byte1 += image_matrix[pos];
byte2 += image_matrix[pos+1];
byte3 += image_matrix[pos+2];
}
}
int effective_window_size = (bottom_boundary-top_boundary+1)*(right_boundary-left_boundary+1);
filtered_image_data[k] = byte1/effective_window_size;
filtered_image_data[k+1] = byte2/effective_window_size;
filtered_image_data[k+2] = byte3/effective_window_size;
}
}
}
__global__ void meanFilter_device(unsigned char* image_matrix, unsigned char* filtered_image_data, int image_width, int image_height, int window_size)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int half_window = (window_size-window_size % 2)/2;
if (i < image_height && j < image_width){
int k = 3*(i*image_height+j);
int top_boundary;
int bottom_boundary;
int left_boundary;
int right_boundary;
if(i-half_window >= 0){
top_boundary = i-half_window;
}else{
top_boundary = 0;
}
if(i+half_window <= image_height-1){
bottom_boundary = i+half_window;
}else{
bottom_boundary = image_height-1;
}
if(j-half_window >= 0){
left_boundary = j-half_window;
}else{
left_boundary = 0;
}
if(j+half_window <= image_width-1){
right_boundary = j+half_window;
}else{
right_boundary = image_width-1;
}
double byte1 = 0;
double byte2 = 0;
double byte3 = 0;
for(int x = top_boundary; x <= bottom_boundary; x++){
for(int y = left_boundary; y <= right_boundary; y++){
int pos = 3*(x*image_height + y);
byte1 += image_matrix[pos];
byte2 += image_matrix[pos+1];
byte3 += image_matrix[pos+2];
}
}
int effective_window_size = (bottom_boundary-top_boundary+1)*(right_boundary-left_boundary+1);
filtered_image_data[k] = byte1/effective_window_size;
filtered_image_data[k+1] = byte2/effective_window_size;
filtered_image_data[k+2] = byte3/effective_window_size;
}
}
int main(int argc,char **argv)
{
FILE* f = fopen(argv[1], "rb");
unsigned char info[54];
fread(info, sizeof(unsigned char), 54, f);
int width, height;
memcpy(&width, info + 18, sizeof(int));
memcpy(&height, info + 22, sizeof(int));
int window_size = strtol(argv[2],NULL,10);
printf(" Window size: %d\n",window_size);
printf("Image dimensions: (%d, %d)\n",width,height);
int size = 3 * width * abs(height);
unsigned char* data = new unsigned char[size];
unsigned char* result_image_data_d;
unsigned char* result_image_data_h = new unsigned char[size];
unsigned char* result_image_data_h1 = new unsigned char[size];
unsigned char* image_data_d;
fread(data, sizeof(unsigned char), size, f);
fclose(f);
int block_size = 32;
int grid_size = width/block_size;
dim3 dimBlock(block_size, block_size, 1);
dim3 dimGrid(grid_size, grid_size, 1);
for(int i = 0; i < sample_rounds; i += 1)
{
hipMalloc((void **)&image_data_d,size*sizeof(unsigned char));
hipMalloc((void **)&result_image_data_d,size*sizeof(unsigned char));
hipMemcpy(image_data_d,data,size*sizeof(unsigned char),hipMemcpyHostToDevice);
clock_t start_d=clock();
meanFilter_device <<< dimGrid, dimBlock >>> (image_data_d, result_image_data_d, width, height, window_size);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error!=hipSuccess)
{
fprintf(stderr,"ERROR: %s\n", hipGetErrorString(error) );
exit(-1);
}
clock_t end_d = clock();
clock_t start_h = clock();
meanFilter_host(data, result_image_data_h1, width, height, window_size);
clock_t end_h = clock();
hipMemcpy(result_image_data_h,result_image_data_d,size*sizeof(unsigned char),hipMemcpyDeviceToHost);
time_host += (double)(end_h-start_h)/CLOCKS_PER_SEC;
time_device += (double)(end_d-start_d)/CLOCKS_PER_SEC;
hipFree(image_data_d);
hipFree(result_image_data_d);
}
printf(" GPU Time: %f\n",(time_device/sample_rounds));
printf(" CPU Time: %f\n",(time_host/sample_rounds));
printf("CPU/GPU time: %f\n",(time_host/time_device));
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
// pi2.cu
/*
* A simple CUDA-enabled program that approximates \pi using monte-carlo
* sampling. This version generates random numbers on-the-fly within each
* kernel.
*/
#include <iostream>
#include <curand.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdbool.h>
using namespace std;
__global__ void initThreads(float* d_out, curandState_t* states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(idx, idx, 0, &states[idx]);
d_out[idx] = 0.0;
}
__global__ void pi(float* d_out, curandState_t* states, int N_TRIALS) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
curandState_t tmp_state = states[idx];
int count = 0;
for(int i=1; i <= N_TRIALS; i++) {
float x = curand_uniform(&tmp_state);
float y = curand_uniform(&tmp_state);
if( x*x + y*y <= 1.0f ) {
count++;
}
}
states[idx] = tmp_state;
d_out[idx] += float(count)/float(N_TRIALS);
}
int main(int argc, char** argv) {
int GRID_SIZE = 256;
int BLOCK_SIZE = 256;
int N_TRIALS = 1000;
int N_RUNS = 10;
char x;
opterr = 0;
while((x = getopt(argc, argv, "g:b:t:r:")) != -1) {
switch(x) {
case 'g': GRID_SIZE = atoi(optarg); break;
case 'b': BLOCK_SIZE = atoi(optarg); break;
case 't': N_TRIALS = atoi(optarg); break;
case 'r': N_RUNS = atoi(optarg); break;
case '?':
if (optopt == 'g' || optopt == 'b' || optopt == 't' || optopt == 'r') {
fprintf (stderr, "Option -%c requires an argument.\n", optopt);
} else {
fprintf (stderr, "Unknown option `-%c'.\n", optopt);
}
abort();
default: abort();
}
}
int N_KERNELS = GRID_SIZE * BLOCK_SIZE;
float* h_pis = (float*) malloc(N_KERNELS*sizeof(float));
float* d_pis;
cudaMalloc(&d_pis, N_KERNELS * sizeof(float));
curandState_t* states;
cudaMalloc(&states, N_KERNELS * sizeof(curandState_t));
time_t start = clock();
initThreads<<<GRID_SIZE, BLOCK_SIZE>>>(d_pis, states);
for(int irun=1; irun <= N_RUNS; irun++) {
pi<<<GRID_SIZE, BLOCK_SIZE>>>(d_pis, states, N_TRIALS);
}
cudaMemcpy(h_pis, d_pis, N_KERNELS*sizeof(float), cudaMemcpyDeviceToHost);
float avg = 0.0;
for(int i=0; i < N_KERNELS; i++) {
avg += h_pis[i] / N_RUNS;
}
avg /= N_KERNELS;
time_t end = clock();
int64_t iters = int64_t(N_KERNELS)*int64_t(N_TRIALS)*int64_t(N_RUNS);
int elapsed = 1000*(end-start)/CLOCKS_PER_SEC;
cout << 4*avg << "\n";
cout << elapsed << "\n";
cout << float(iters)/float(elapsed) << " iters/ms\n";
free(h_pis);
cudaFree(d_pis);
cudaFree(states);
return 0;
}
|
// pi2.cu
/*
* A simple CUDA-enabled program that approximates \pi using monte-carlo
* sampling. This version generates random numbers on-the-fly within each
* kernel.
*/
#include <hip/hip_runtime.h>
#include <iostream>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdbool.h>
using namespace std;
__global__ void initThreads(float* d_out, hiprandState_t* states) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprand_init(idx, idx, 0, &states[idx]);
d_out[idx] = 0.0;
}
__global__ void pi(float* d_out, hiprandState_t* states, int N_TRIALS) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
hiprandState_t tmp_state = states[idx];
int count = 0;
for(int i=1; i <= N_TRIALS; i++) {
float x = hiprand_uniform(&tmp_state);
float y = hiprand_uniform(&tmp_state);
if( x*x + y*y <= 1.0f ) {
count++;
}
}
states[idx] = tmp_state;
d_out[idx] += float(count)/float(N_TRIALS);
}
int main(int argc, char** argv) {
int GRID_SIZE = 256;
int BLOCK_SIZE = 256;
int N_TRIALS = 1000;
int N_RUNS = 10;
char x;
opterr = 0;
while((x = getopt(argc, argv, "g:b:t:r:")) != -1) {
switch(x) {
case 'g': GRID_SIZE = atoi(optarg); break;
case 'b': BLOCK_SIZE = atoi(optarg); break;
case 't': N_TRIALS = atoi(optarg); break;
case 'r': N_RUNS = atoi(optarg); break;
case '?':
if (optopt == 'g' || optopt == 'b' || optopt == 't' || optopt == 'r') {
fprintf (stderr, "Option -%c requires an argument.\n", optopt);
} else {
fprintf (stderr, "Unknown option `-%c'.\n", optopt);
}
abort();
default: abort();
}
}
int N_KERNELS = GRID_SIZE * BLOCK_SIZE;
float* h_pis = (float*) malloc(N_KERNELS*sizeof(float));
float* d_pis;
hipMalloc(&d_pis, N_KERNELS * sizeof(float));
hiprandState_t* states;
hipMalloc(&states, N_KERNELS * sizeof(hiprandState_t));
time_t start = clock();
initThreads<<<GRID_SIZE, BLOCK_SIZE>>>(d_pis, states);
for(int irun=1; irun <= N_RUNS; irun++) {
pi<<<GRID_SIZE, BLOCK_SIZE>>>(d_pis, states, N_TRIALS);
}
hipMemcpy(h_pis, d_pis, N_KERNELS*sizeof(float), hipMemcpyDeviceToHost);
float avg = 0.0;
for(int i=0; i < N_KERNELS; i++) {
avg += h_pis[i] / N_RUNS;
}
avg /= N_KERNELS;
time_t end = clock();
int64_t iters = int64_t(N_KERNELS)*int64_t(N_TRIALS)*int64_t(N_RUNS);
int elapsed = 1000*(end-start)/CLOCKS_PER_SEC;
cout << 4*avg << "\n";
cout << elapsed << "\n";
cout << float(iters)/float(elapsed) << " iters/ms\n";
free(h_pis);
hipFree(d_pis);
hipFree(states);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include <cuda.h>
#define DATA_TYPE float
#define NX 1024*8 // A = NX * NY
#define NY 1024*32 // B = NY * NZ
#define NZ 1024
#define GPU_DEVICE 0
using namespace std;
__global__ void MatMul(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* Out){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE tmp=0;
for(int i=0; i<NY; ++i){
tmp += A[NY*idy+i] * B[i*NZ+idx];
}
Out[NX*idy + idx] = tmp;
}
void init_mat(DATA_TYPE* MAT, int size){
for(int i=0; i<size; ++i){
MAT[i]=1;
}
}
void GPU_argv_init(){
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
cout<<"setting device "<< GPU_DEVICE << "with name" << deviceProp.name <<endl;
cudaSetDevice( GPU_DEVICE);
}
int main(){
int size_A = NX*NY*sizeof(DATA_TYPE);
int size_B = NY*NZ*sizeof(DATA_TYPE);
int size_C = NX*NZ*sizeof(DATA_TYPE);
DATA_TYPE* hA = (DATA_TYPE*)malloc(size_A);
DATA_TYPE* hB = (DATA_TYPE*)malloc(size_B);
DATA_TYPE* hC = (DATA_TYPE*)malloc(size_C); // result of matrix multiplication
init_mat(hA, NX*NY);
init_mat(hB, NY*NZ);
DATA_TYPE* dA;
DATA_TYPE* dB;
DATA_TYPE* dC;
GPU_argv_init();
cudaMalloc(&dA, size_A);
cudaMalloc(&dB, size_B);
cudaMalloc(&dC, size_C);
cudaMemcpy(dA, hA, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size_B, cudaMemcpyHostToDevice);
dim3 block(32,8);
dim3 grid(NY/block.x, 1);
MatMul<<< block, grid >>>(dA,dB,dC);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
cout<<"Error:"<< cudaGetErrorString(err) <<endl;
cudaMemcpy(hC, dC, size_C, cudaMemcpyDeviceToHost);
for(int i=0; i<10; ++i){
cout<<hC[i];
if((i%128) == 127){
cout<<endl;
}
}
cout<<endl;
cudaFree(hA);
cudaFree(hB);
cudaFree(hC);
}
|
#include <iostream>
#include <hip/hip_runtime.h>
#define DATA_TYPE float
#define NX 1024*8 // A = NX * NY
#define NY 1024*32 // B = NY * NZ
#define NZ 1024
#define GPU_DEVICE 0
using namespace std;
__global__ void MatMul(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* Out){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE tmp=0;
for(int i=0; i<NY; ++i){
tmp += A[NY*idy+i] * B[i*NZ+idx];
}
Out[NX*idy + idx] = tmp;
}
void init_mat(DATA_TYPE* MAT, int size){
for(int i=0; i<size; ++i){
MAT[i]=1;
}
}
void GPU_argv_init(){
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
cout<<"setting device "<< GPU_DEVICE << "with name" << deviceProp.name <<endl;
hipSetDevice( GPU_DEVICE);
}
int main(){
int size_A = NX*NY*sizeof(DATA_TYPE);
int size_B = NY*NZ*sizeof(DATA_TYPE);
int size_C = NX*NZ*sizeof(DATA_TYPE);
DATA_TYPE* hA = (DATA_TYPE*)malloc(size_A);
DATA_TYPE* hB = (DATA_TYPE*)malloc(size_B);
DATA_TYPE* hC = (DATA_TYPE*)malloc(size_C); // result of matrix multiplication
init_mat(hA, NX*NY);
init_mat(hB, NY*NZ);
DATA_TYPE* dA;
DATA_TYPE* dB;
DATA_TYPE* dC;
GPU_argv_init();
hipMalloc(&dA, size_A);
hipMalloc(&dB, size_B);
hipMalloc(&dC, size_C);
hipMemcpy(dA, hA, size_A, hipMemcpyHostToDevice);
hipMemcpy(dB, hB, size_B, hipMemcpyHostToDevice);
dim3 block(32,8);
dim3 grid(NY/block.x, 1);
MatMul<<< block, grid >>>(dA,dB,dC);
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (err != hipSuccess)
cout<<"Error:"<< hipGetErrorString(err) <<endl;
hipMemcpy(hC, dC, size_C, hipMemcpyDeviceToHost);
for(int i=0; i<10; ++i){
cout<<hC[i];
if((i%128) == 127){
cout<<endl;
}
}
cout<<endl;
hipFree(hA);
hipFree(hB);
hipFree(hC);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
//This is a matrix multiplication program in CUDA without any optimizations
//like tiling, using shared memory etc
#include<stdio.h>
#include<stdlib.h>
#include<cuda_runtime.h>
#include<assert.h>
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int width)
{
//2D thread ID
int bx=blockIdx.x;
int by=blockIdx.y;
int tdx=threadIdx.x;
int tdy=threadIdx.y;
int tx=bx*blockDim.x+tdx;
int ty=by*blockDim.y+tdy;
//Pvalue stores the Pd element that is computed by the thread
float Pvalue=0;
for(int k=0;k<width;++k){
float Mdelement=Md[ty*width+k];
float Ndelement=Nd[k*width+tx];
Pvalue += Mdelement*Ndelement;
}
//Write the matrix to device memory each thread writes one element
Pd[ty*width+tx]=Pvalue;
}
int main(int argc, char** argv){
int width;
int BlockDim;
int GridDim;
if (argc == 3){
width=atoi(argv[1]);
BlockDim=atoi(argv[2]);
GridDim=width/BlockDim;
printf("Using matrix dimension %dx%d ,Block Dim %dx%d threads per block, Grid Dim %dx%d blocks per grid\n",width,width,BlockDim,BlockDim,GridDim,GridDim);
}else{
width=512;
BlockDim=16;
GridDim=width/BlockDim;
printf("Using Default Parameters: matrix dimension %dx%d ,Block Dim %dx%d threads per block, Grid Dim %dx%d blocks per grid\n",width,width,BlockDim,BlockDim,GridDim,GridDim);
}
dim3 dimBlock(BlockDim,BlockDim);
dim3 dimGrid(GridDim,GridDim);
cudaError_t error;
cudaDeviceProp deviceProp;
int devID=0;
error=cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error=cudaGetDeviceProperties(&deviceProp,devID);
if (error != cudaSuccess){
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}else{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
int size=width*width*sizeof(float);
float* M=(float*)malloc(size);
float* N=(float*)malloc(size);
float* P=(float*)malloc(size);
float* Md,*Nd,*Pd;
if(!(M&&N)){
printf("Malloc failed\n");
exit(-1);
}
// initialization of host data
for (int j = 0; j < width; j++) {
for (int i = 0; i < width; i++) {
M[j*width + i] = (float)(rand()%50);
N[j*width + i] = (float)(rand()%50);
P[j*width + i] = 0;
}
}
error=cudaMalloc((void**)&Md,size);
if(error!=cudaSuccess){
printf("Device memory allocation for M failed \n");
exit(-1);
}
error=cudaMalloc((void**)&Nd,size);
if(error!=cudaSuccess){
printf("Device memory allocation for N failed \n");
exit(-1);
}
error=cudaMalloc((void**)&Pd,size);
if(error!=cudaSuccess){
printf("Device memory allocation for P failed \n");
exit(-1);
}
error=cudaMemcpy(Md,M,size,cudaMemcpyHostToDevice);
if(error!=cudaSuccess){
printf("Device memory copy for M failed \n");
exit(-1);
}
error=cudaMemcpy(Nd,N,size,cudaMemcpyHostToDevice);
if(error!=cudaSuccess){
printf("Device memory copy for N failed \n");
exit(-1);
}
cudaEvent_t start;
error=cudaEventCreate(&start);
if(error!=cudaSuccess){
printf("cuda event start failed \n");
exit(-1);
}
cudaEvent_t stop;
error=cudaEventCreate(&stop);
if(error!=cudaSuccess){
printf("cuda event stop failed \n");
exit(-1);
}
error =cudaEventRecord(start,NULL);
if(error!=cudaSuccess){
printf("cuda event start record failed \n");
exit(-1);
}
MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd,width);
// error=cudaDeviceSynchronize();
error =cudaEventRecord(stop,NULL);
if(error!=cudaSuccess){
printf("cuda event stop record failed with error=%s\n",cudaGetErrorString(error));
exit(-1);
}
error = cudaEventSynchronize(stop);
if(error!=cudaSuccess){
printf("cuda event sync failed :%s\n",cudaGetErrorString(error));
exit(-1);
}
float msecTotal=0.0f;
error = cudaEventElapsedTime(&msecTotal,start,stop);
if(error!=cudaSuccess){
printf("cuda elapsed time calculation failed \n");
exit(-1);
}
float msecPerMatrixMul = msecTotal;
double flopsPerMatrixMul = 2*width*width*width;
double gigaFlops=(flopsPerMatrixMul*1.0e-9f)/(msecPerMatrixMul/1000.0f);
printf("Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
width * width);
error=cudaMemcpy(P,Pd,size,cudaMemcpyDeviceToHost);
if(error!=cudaSuccess){
printf("Device memoory copy back for Pd failed \n");
exit(-1);
}
printf("Very slow Host Matrix Mult \n");
float temp;
// initialization of host data
for (int i = 0; i < width; ++i) {
for ( int j = 0; j < width; ++j) {
temp=0;
for(int k=0; k<width; ++k)
temp+=M[i*width+k]*N[k*width+j];
if(temp != P[i*width+j]){
printf("Matrix Mult Screwed Up!! differ in values CPU:%f and GPU:%f \n",temp,P[i*width+j]);
exit(-1);
}
}
}
free(M);
free(N);
free(P);
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
cudaDeviceReset();
return 1;
}
|
//This is a matrix multiplication program in CUDA without any optimizations
//like tiling, using shared memory etc
#include<stdio.h>
#include<stdlib.h>
#include<hip/hip_runtime.h>
#include<assert.h>
__global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int width)
{
//2D thread ID
int bx=blockIdx.x;
int by=blockIdx.y;
int tdx=threadIdx.x;
int tdy=threadIdx.y;
int tx=bx*blockDim.x+tdx;
int ty=by*blockDim.y+tdy;
//Pvalue stores the Pd element that is computed by the thread
float Pvalue=0;
for(int k=0;k<width;++k){
float Mdelement=Md[ty*width+k];
float Ndelement=Nd[k*width+tx];
Pvalue += Mdelement*Ndelement;
}
//Write the matrix to device memory each thread writes one element
Pd[ty*width+tx]=Pvalue;
}
int main(int argc, char** argv){
int width;
int BlockDim;
int GridDim;
if (argc == 3){
width=atoi(argv[1]);
BlockDim=atoi(argv[2]);
GridDim=width/BlockDim;
printf("Using matrix dimension %dx%d ,Block Dim %dx%d threads per block, Grid Dim %dx%d blocks per grid\n",width,width,BlockDim,BlockDim,GridDim,GridDim);
}else{
width=512;
BlockDim=16;
GridDim=width/BlockDim;
printf("Using Default Parameters: matrix dimension %dx%d ,Block Dim %dx%d threads per block, Grid Dim %dx%d blocks per grid\n",width,width,BlockDim,BlockDim,GridDim,GridDim);
}
dim3 dimBlock(BlockDim,BlockDim);
dim3 dimGrid(GridDim,GridDim);
hipError_t error;
hipDeviceProp_t deviceProp;
int devID=0;
error=hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error=hipGetDeviceProperties(&deviceProp,devID);
if (error != hipSuccess){
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}else{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
int size=width*width*sizeof(float);
float* M=(float*)malloc(size);
float* N=(float*)malloc(size);
float* P=(float*)malloc(size);
float* Md,*Nd,*Pd;
if(!(M&&N)){
printf("Malloc failed\n");
exit(-1);
}
// initialization of host data
for (int j = 0; j < width; j++) {
for (int i = 0; i < width; i++) {
M[j*width + i] = (float)(rand()%50);
N[j*width + i] = (float)(rand()%50);
P[j*width + i] = 0;
}
}
error=hipMalloc((void**)&Md,size);
if(error!=hipSuccess){
printf("Device memory allocation for M failed \n");
exit(-1);
}
error=hipMalloc((void**)&Nd,size);
if(error!=hipSuccess){
printf("Device memory allocation for N failed \n");
exit(-1);
}
error=hipMalloc((void**)&Pd,size);
if(error!=hipSuccess){
printf("Device memory allocation for P failed \n");
exit(-1);
}
error=hipMemcpy(Md,M,size,hipMemcpyHostToDevice);
if(error!=hipSuccess){
printf("Device memory copy for M failed \n");
exit(-1);
}
error=hipMemcpy(Nd,N,size,hipMemcpyHostToDevice);
if(error!=hipSuccess){
printf("Device memory copy for N failed \n");
exit(-1);
}
hipEvent_t start;
error=hipEventCreate(&start);
if(error!=hipSuccess){
printf("cuda event start failed \n");
exit(-1);
}
hipEvent_t stop;
error=hipEventCreate(&stop);
if(error!=hipSuccess){
printf("cuda event stop failed \n");
exit(-1);
}
error =hipEventRecord(start,NULL);
if(error!=hipSuccess){
printf("cuda event start record failed \n");
exit(-1);
}
MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd,width);
// error=cudaDeviceSynchronize();
error =hipEventRecord(stop,NULL);
if(error!=hipSuccess){
printf("cuda event stop record failed with error=%s\n",hipGetErrorString(error));
exit(-1);
}
error = hipEventSynchronize(stop);
if(error!=hipSuccess){
printf("cuda event sync failed :%s\n",hipGetErrorString(error));
exit(-1);
}
float msecTotal=0.0f;
error = hipEventElapsedTime(&msecTotal,start,stop);
if(error!=hipSuccess){
printf("cuda elapsed time calculation failed \n");
exit(-1);
}
float msecPerMatrixMul = msecTotal;
double flopsPerMatrixMul = 2*width*width*width;
double gigaFlops=(flopsPerMatrixMul*1.0e-9f)/(msecPerMatrixMul/1000.0f);
printf("Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
width * width);
error=hipMemcpy(P,Pd,size,hipMemcpyDeviceToHost);
if(error!=hipSuccess){
printf("Device memoory copy back for Pd failed \n");
exit(-1);
}
printf("Very slow Host Matrix Mult \n");
float temp;
// initialization of host data
for (int i = 0; i < width; ++i) {
for ( int j = 0; j < width; ++j) {
temp=0;
for(int k=0; k<width; ++k)
temp+=M[i*width+k]*N[k*width+j];
if(temp != P[i*width+j]){
printf("Matrix Mult Screwed Up!! differ in values CPU:%f and GPU:%f \n",temp,P[i*width+j]);
exit(-1);
}
}
}
free(M);
free(N);
free(P);
hipFree(Md);
hipFree(Nd);
hipFree(Pd);
hipDeviceReset();
return 1;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#define TILE_DIM 32
template<typename T>
__device__ void vectorDotVector(const T* A, const T* B, T* result, const int length) {
__shared__ T a_tile[TILE_DIM];
__shared__ T b_tile[TILE_DIM];
__shared__ T result_tile[TILE_DIM];
for (int i = 0; i < TILE_DIM; i++) {
result_tile[i] = 0;
}
int tx = threadIdx.x;
for (int t = 0; t < (length - 1) / TILE_DIM + 1; t++) {
int index = t * TILE_DIM + tx;
if (index < length) {
a_tile[tx] = A[index];
b_tile[tx] = B[index];
} else {
a_tile[tx] = 0;
b_tile[tx] = 0;
}
__syncthreads();
result_tile[tx] += a_tile[tx] * b_tile[tx];
__syncthreads();
}
T resultValue = 0;
if (tx == 0) {
for (int i = 0; i < TILE_DIM; i++) {
resultValue += result_tile[i];
}
result[0] = resultValue;
}
}
template<typename T>
__device__ void columnDotRow(const T* vectorA, const T* vectorB, T* resultMatrix,
const int lengthA, const int lengthB) {
__shared__ T vectorA_tile[TILE_DIM];
__shared__ T vectorB_tile[TILE_DIM];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (ty == 0) {
if (row + tx < lengthA) {
vectorA_tile[tx] = vectorA[row + tx];
}
if (col < lengthB) {
vectorB_tile[tx] = vectorB[col];
}
}
__syncthreads();
if (row < lengthA && col < lengthB) {
resultMatrix[row * lengthB + col] = vectorA_tile[ty] * vectorB_tile[tx];
}
}
|
#include <hip/hip_runtime.h>
#define TILE_DIM 32
template<typename T>
__device__ void vectorDotVector(const T* A, const T* B, T* result, const int length) {
__shared__ T a_tile[TILE_DIM];
__shared__ T b_tile[TILE_DIM];
__shared__ T result_tile[TILE_DIM];
for (int i = 0; i < TILE_DIM; i++) {
result_tile[i] = 0;
}
int tx = threadIdx.x;
for (int t = 0; t < (length - 1) / TILE_DIM + 1; t++) {
int index = t * TILE_DIM + tx;
if (index < length) {
a_tile[tx] = A[index];
b_tile[tx] = B[index];
} else {
a_tile[tx] = 0;
b_tile[tx] = 0;
}
__syncthreads();
result_tile[tx] += a_tile[tx] * b_tile[tx];
__syncthreads();
}
T resultValue = 0;
if (tx == 0) {
for (int i = 0; i < TILE_DIM; i++) {
resultValue += result_tile[i];
}
result[0] = resultValue;
}
}
template<typename T>
__device__ void columnDotRow(const T* vectorA, const T* vectorB, T* resultMatrix,
const int lengthA, const int lengthB) {
__shared__ T vectorA_tile[TILE_DIM];
__shared__ T vectorB_tile[TILE_DIM];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (ty == 0) {
if (row + tx < lengthA) {
vectorA_tile[tx] = vectorA[row + tx];
}
if (col < lengthB) {
vectorB_tile[tx] = vectorB[col];
}
}
__syncthreads();
if (row < lengthA && col < lengthB) {
resultMatrix[row * lengthB + col] = vectorA_tile[ty] * vectorB_tile[tx];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <cuda.h>
int *a, *b; // host data
int *c, *c2; // results
//Cuda error checking - non mandatory
void cudaCheckError() {
cudaError_t e=cudaGetLastError();
if(e!=cudaSuccess) {
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e));
exit(0);
}
}
//GPU kernel
__global__
void matrixMultiply(int* A,int* B,int* C, int N){
int ai = threadIdx.y * blockDim.x;
int bi = threadIdx.x;
int ci = threadIdx.x + threadIdx.y * blockDim.x ;
int result = 0;
if(ci < N) {
for(int i = 0; i < 4; i++){
result += A[ai] * B[bi];
ai++;
bi += 4;
}
C[ci] = result;
}
}
int main(int argc,char **argv) {
printf("Begin \n");
//Iterations
int n=16; //cantidad de elementos de la matriz, x * y
//Number of blocks
int nBytes = n*sizeof(int);
//memory allocation
a = (int *) malloc(nBytes);
b = (int *) malloc(nBytes);
c = (int *) malloc(nBytes);
c2 = (int *) malloc(nBytes);
// Data filling
for(int i=0;i<n;i++)
a[i]=i,b[i]=i;
printf("Allocating device memory on host..\n");
//GPU memory allocation
int *a_d,*b_d,*c_d;
cudaMalloc((void **) &a_d, n*sizeof(int));
cudaMalloc((void **) &b_d, n*sizeof(int));
cudaMalloc((void **) &c_d, n*sizeof(int));
printf("Copying to device..\n");
cudaMemcpy(a_d, a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, n*sizeof(int), cudaMemcpyHostToDevice);
//Work definition
dim3 threadsPerBlock(4, 4);
dim3 numBlocks(1, 1);
printf("Doing GPU Vector Multiplication\n");
matrixMultiply<<<numBlocks, threadsPerBlock>>>(a_d, b_d, c_d, n);
cudaCheckError();
//Wait for kernel call to finish
cudaDeviceSynchronize();
//Copying data back to host, this is a blocking call and will not start until all kernels are finished
printf("Copying back to host...\n");
cudaMemcpy(c, c_d, n*sizeof(int), cudaMemcpyDeviceToHost);
int counter = 0;
printf("A\n");
for(int i = 0; i < 4; i++) {
for(int j = 0; j < 4; j++) {
printf("%d ",a[counter]);
counter++;
}
printf("\n");
}
counter = 0;
printf("B\n");
for(int i = 0; i < 4; i++) {
for(int j = 0; j < 4; j++) {
printf("%d ",b[counter]);
counter++;
}
printf("\n");
}
counter = 0;
printf("C\n");
for(int i = 0; i < 4; i++) {
for(int j = 0; j < 4; j++) {
printf("%d ",c[counter]);
counter++;
}
printf("\n");
}
//Free GPU memory
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
}
|
#include <stdio.h>
#include <hip/hip_runtime.h>
int *a, *b; // host data
int *c, *c2; // results
//Cuda error checking - non mandatory
void cudaCheckError() {
hipError_t e=hipGetLastError();
if(e!=hipSuccess) {
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e));
exit(0);
}
}
//GPU kernel
__global__
void matrixMultiply(int* A,int* B,int* C, int N){
int ai = threadIdx.y * blockDim.x;
int bi = threadIdx.x;
int ci = threadIdx.x + threadIdx.y * blockDim.x ;
int result = 0;
if(ci < N) {
for(int i = 0; i < 4; i++){
result += A[ai] * B[bi];
ai++;
bi += 4;
}
C[ci] = result;
}
}
int main(int argc,char **argv) {
printf("Begin \n");
//Iterations
int n=16; //cantidad de elementos de la matriz, x * y
//Number of blocks
int nBytes = n*sizeof(int);
//memory allocation
a = (int *) malloc(nBytes);
b = (int *) malloc(nBytes);
c = (int *) malloc(nBytes);
c2 = (int *) malloc(nBytes);
// Data filling
for(int i=0;i<n;i++)
a[i]=i,b[i]=i;
printf("Allocating device memory on host..\n");
//GPU memory allocation
int *a_d,*b_d,*c_d;
hipMalloc((void **) &a_d, n*sizeof(int));
hipMalloc((void **) &b_d, n*sizeof(int));
hipMalloc((void **) &c_d, n*sizeof(int));
printf("Copying to device..\n");
hipMemcpy(a_d, a, n*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(b_d, b, n*sizeof(int), hipMemcpyHostToDevice);
//Work definition
dim3 threadsPerBlock(4, 4);
dim3 numBlocks(1, 1);
printf("Doing GPU Vector Multiplication\n");
matrixMultiply<<<numBlocks, threadsPerBlock>>>(a_d, b_d, c_d, n);
cudaCheckError();
//Wait for kernel call to finish
hipDeviceSynchronize();
//Copying data back to host, this is a blocking call and will not start until all kernels are finished
printf("Copying back to host...\n");
hipMemcpy(c, c_d, n*sizeof(int), hipMemcpyDeviceToHost);
int counter = 0;
printf("A\n");
for(int i = 0; i < 4; i++) {
for(int j = 0; j < 4; j++) {
printf("%d ",a[counter]);
counter++;
}
printf("\n");
}
counter = 0;
printf("B\n");
for(int i = 0; i < 4; i++) {
for(int j = 0; j < 4; j++) {
printf("%d ",b[counter]);
counter++;
}
printf("\n");
}
counter = 0;
printf("C\n");
for(int i = 0; i < 4; i++) {
for(int j = 0; j < 4; j++) {
printf("%d ",c[counter]);
counter++;
}
printf("\n");
}
//Free GPU memory
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <algorithm>
#include <fstream>
#include <iostream>
#include <sstream>
#include <vector>
#define THREADS 64
// Error check-----
#define gpuErrchk(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file,
line);
if (abort)
exit(code);
}
}
// Error check-----
// This is a very good idea to wrap your calls with that function.. Otherwise
// you will not b Moreover, you may also want to look at how to use
// cuda-memcheck and cuda-gdb for debuggin
__global__ void deviceDFSk5(int *xadj, int *adj, int *nov, int *counter) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int ct[THREADS];
if (id < *nov) {
ct[threadIdx.x] = 0;
for (int i = xadj[id]; i < xadj[id + 1]; i++) {
// adj[i] are the neighbors of id vertex, none can be id by
// definition (no loops of len-1)
if (adj[i] != id) {
for (int j = xadj[adj[i]]; j < xadj[adj[i] + 1]; j++) {
// adj[j] are the neighbors of the second vertex on this path,
// they can't be id.
if (adj[j] != adj[i] && adj[j] != id) {
for (int k = xadj[adj[j]]; k < xadj[adj[j] + 1]; k++) {
// adj[k] are the neighbors of the third vertex,
// they can't be equal to id or the second.
if (adj[k] != adj[j] && adj[k] != adj[i] && adj[k] != id) {
for (int l = xadj[adj[k]]; l < xadj[adj[k] + 1]; l++) {
// adj[l] are the neighbors of the fourth vertex,
// they can't be equal to id second or the third.
if (adj[l] != adj[k] && adj[l] != adj[j] &&
adj[l] != adj[i] && adj[l] != id) {
for (int m = xadj[adj[l]]; m < xadj[adj[l] + 1]; m++) {
// adj[l] are the neighbors of the fourth vertex,
// they can't be equal to id second or the third.
if (adj[m] == id) {
ct[threadIdx.x]++;
}
}
}
}
}
}
}
}
}
}
counter[id] = ct[threadIdx.x];
}
}
__global__ void deviceDFSk4(int *xadj, int *adj, int *nov, int *counter) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int ct[THREADS];
if (id < *nov) {
ct[threadIdx.x] = 0;
for (int i = xadj[id]; i < xadj[id + 1]; i++) {
// adj[i] are the neighbors of id vertex, none can be id by
// definition (no loops of len-1)
if (adj[i] != id) {
for (int j = xadj[adj[i]]; j < xadj[adj[i] + 1]; j++) {
// adj[j] are the neighbors of the second vertex on this path,
// they can't be id.
if (adj[j] != adj[i] && adj[j] != id) {
for (int k = xadj[adj[j]]; k < xadj[adj[j] + 1]; k++) {
// adj[k] are the neighbors of the third vertex,
// they can't be equal to id or the second.
if (adj[k] != adj[j] && adj[k] != adj[i] && adj[k] != id) {
for (int l = xadj[adj[k]]; l < xadj[adj[k] + 1]; l++) {
// adj[l] are the neighbors of the fourth vertex,
if (adj[l] == id) {
ct[threadIdx.x]++;
}
}
}
}
}
}
}
}
counter[id] = ct[threadIdx.x];
}
}
__global__ void deviceDFSk3(int *xadj, int *adj, int *nov, int *counter) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < *nov) {
for (int i = xadj[id]; i < xadj[id + 1]; i++) {
if (adj[i] > id) {
for (int j = xadj[adj[i]]; j < xadj[adj[i] + 1]; j++) {
if (adj[j] > adj[i] && adj[j] > id) {
for (int k = xadj[adj[j]]; k < xadj[adj[j] + 1]; k++) {
if (adj[k] == id) {
atomicAdd(&counter[id], 2);
atomicAdd(&counter[adj[i]], 2);
atomicAdd(&counter[adj[j]], 2);
}
}
}
}
}
}
}
}
void wrapper(int *xadj, int *adj, int *nov, int nnz, int k) {
cudaSetDevice(0);
int *d_xadj;
int *d_adj;
int *d_nov;
int *d_ct;
int *ct = new int[*nov];
cudaMalloc((void **)&d_xadj, (*nov + 1) * sizeof(int));
cudaMalloc((void **)&d_adj, nnz * sizeof(int));
cudaMalloc((void **)&d_nov, sizeof(int));
cudaMalloc((void **)&d_ct, (*nov) * sizeof(int));
if (k == 3) {
/*not that necessary, to ensure d_ct is set to zero in the least amount of
* lines possible for k=3*/
memset(ct, 0, (*nov) * sizeof(int));
cudaMemcpy(d_ct, ct, (*nov) * sizeof(int), cudaMemcpyHostToDevice);
}
cudaMemcpy(d_xadj, xadj, (*nov + 1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_adj, adj, (nnz) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nov, nov, sizeof(int), cudaMemcpyHostToDevice);
gpuErrchk(cudaDeviceSynchronize());
#ifdef DEBUG
std::cout << "malloc copy done" << std::endl;
#endif
gpuErrchk(cudaDeviceSynchronize());
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
if (k == 3)
deviceDFSk3<<<(*nov + THREADS - 1) / THREADS, THREADS>>>(d_xadj, d_adj,
d_nov, d_ct);
if (k == 4)
deviceDFSk4<<<(*nov + THREADS - 1) / THREADS, THREADS>>>(d_xadj, d_adj,
d_nov, d_ct);
if (k == 5)
deviceDFSk5<<<(*nov + THREADS - 1) / THREADS, THREADS>>>(d_xadj, d_adj,
d_nov, d_ct);
gpuErrchk(cudaDeviceSynchronize());
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(ct, d_ct, (*nov) * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < *nov; i++)
printf("%d %d\n", i, ct[i]);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU took: %f s\n", elapsedTime / 1000);
cudaFree(d_xadj);
cudaFree(d_adj);
cudaFree(d_nov);
cudaFree(d_ct);
}
/*Read the given file and return CSR*/
void *read_edges(char *bin_name, int k) {
std::cout << "fname: " << bin_name << std::endl;
// count the newlines
unsigned int number_of_lines = 0;
FILE *infile = fopen(bin_name, "r");
int ch;
while (EOF != (ch = getc(infile)))
if ('\n' == ch)
++number_of_lines;
++number_of_lines;
#ifdef DEBUG
std::cout << number_of_lines << " lines" << std::endl;
#endif
fclose(infile);
// read the first line, set it to no vertices.
std::ifstream bp(bin_name);
int *no_vertices = new int;
std::string line;
int i, j, max = 0;
for (int iter = 0; iter < number_of_lines; iter++) {
std::getline(bp, line);
std::istringstream myss(line);
if (!(myss >> i >> j)) {
break;
}
if (i > max)
max = i;
if (j > max)
max = j;
}
bp.clear();
bp.seekg(0);
*no_vertices = max + 1;
int no_edges = (number_of_lines)*2; // bidirectional
/*TODO unique and no loop decreases this, we should resize adj
* accordingly. Not the end of the world, we will never reach those
* indices.*/
// if file ended with \n you'd keep it as is.
std::vector<int> *A = new std::vector<int>[*no_vertices];
// std::cout << "allocated A" << std::endl;
for (int iter = 0; iter < number_of_lines; iter++) {
std::getline(bp, line);
std::istringstream myss(line);
if (!(myss >> i >> j)) {
break;
}
#ifdef DEBUG
std::cout << i << " " << j << std::endl;
#endif
if (i != j) {
// ignore diagonal edges
A[i].push_back(j);
A[j].push_back(i);
}
}
for (int i = 0; i < *no_vertices; i++) {
std::sort(A[i].begin(), A[i].end());
// sort then unique.
// you may have 3 1 and 1 3
// if you do not sort, unique doesn't do what I think it would.
// also we prefer them sorted in case the file has 1 2 before 1 0 or
// sth. using default comparison:
std::vector<int>::iterator it;
it = std::unique(A[i].begin(), A[i].end()); // 10 20 30 20 10 ? ? ? ?
// ^
A[i].resize(std::distance(A[i].begin(), it)); // 10 20 30 20 10
}
int sum = 0;
int *xadj = new int[*no_vertices + 1]; // last one marks the end of the adj.
int *adj = new int[no_edges]; // there are m+1 lines (m '\n's), 2m edges.
xadj[0] = 0;
for (int i = 0; i < *no_vertices; i++) {
// adj.add_to_end(A[i])
for (int j = 0; j < A[i].size(); j++) {
adj[sum + j] = A[i][j];
}
sum += A[i].size();
xadj[i + 1] = sum;
}
std::cout << "Done reading." << std::endl;
wrapper(xadj, adj, no_vertices, no_edges, k);
return 0;
}
int main(int argc, char *argv[]) {
/*first arg is filename, second is k*/
// omp_set_num_threads(8);
read_edges(argv[1], atoi(argv[2]));
return 0;
}
|
#include <hip/hip_runtime.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <sstream>
#include <vector>
#define THREADS 64
// Error check-----
#define gpuErrchk(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line,
bool abort = true) {
if (code != hipSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file,
line);
if (abort)
exit(code);
}
}
// Error check-----
// This is a very good idea to wrap your calls with that function.. Otherwise
// you will not b Moreover, you may also want to look at how to use
// cuda-memcheck and cuda-gdb for debuggin
__global__ void deviceDFSk5(int *xadj, int *adj, int *nov, int *counter) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int ct[THREADS];
if (id < *nov) {
ct[threadIdx.x] = 0;
for (int i = xadj[id]; i < xadj[id + 1]; i++) {
// adj[i] are the neighbors of id vertex, none can be id by
// definition (no loops of len-1)
if (adj[i] != id) {
for (int j = xadj[adj[i]]; j < xadj[adj[i] + 1]; j++) {
// adj[j] are the neighbors of the second vertex on this path,
// they can't be id.
if (adj[j] != adj[i] && adj[j] != id) {
for (int k = xadj[adj[j]]; k < xadj[adj[j] + 1]; k++) {
// adj[k] are the neighbors of the third vertex,
// they can't be equal to id or the second.
if (adj[k] != adj[j] && adj[k] != adj[i] && adj[k] != id) {
for (int l = xadj[adj[k]]; l < xadj[adj[k] + 1]; l++) {
// adj[l] are the neighbors of the fourth vertex,
// they can't be equal to id second or the third.
if (adj[l] != adj[k] && adj[l] != adj[j] &&
adj[l] != adj[i] && adj[l] != id) {
for (int m = xadj[adj[l]]; m < xadj[adj[l] + 1]; m++) {
// adj[l] are the neighbors of the fourth vertex,
// they can't be equal to id second or the third.
if (adj[m] == id) {
ct[threadIdx.x]++;
}
}
}
}
}
}
}
}
}
}
counter[id] = ct[threadIdx.x];
}
}
__global__ void deviceDFSk4(int *xadj, int *adj, int *nov, int *counter) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int ct[THREADS];
if (id < *nov) {
ct[threadIdx.x] = 0;
for (int i = xadj[id]; i < xadj[id + 1]; i++) {
// adj[i] are the neighbors of id vertex, none can be id by
// definition (no loops of len-1)
if (adj[i] != id) {
for (int j = xadj[adj[i]]; j < xadj[adj[i] + 1]; j++) {
// adj[j] are the neighbors of the second vertex on this path,
// they can't be id.
if (adj[j] != adj[i] && adj[j] != id) {
for (int k = xadj[adj[j]]; k < xadj[adj[j] + 1]; k++) {
// adj[k] are the neighbors of the third vertex,
// they can't be equal to id or the second.
if (adj[k] != adj[j] && adj[k] != adj[i] && adj[k] != id) {
for (int l = xadj[adj[k]]; l < xadj[adj[k] + 1]; l++) {
// adj[l] are the neighbors of the fourth vertex,
if (adj[l] == id) {
ct[threadIdx.x]++;
}
}
}
}
}
}
}
}
counter[id] = ct[threadIdx.x];
}
}
__global__ void deviceDFSk3(int *xadj, int *adj, int *nov, int *counter) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < *nov) {
for (int i = xadj[id]; i < xadj[id + 1]; i++) {
if (adj[i] > id) {
for (int j = xadj[adj[i]]; j < xadj[adj[i] + 1]; j++) {
if (adj[j] > adj[i] && adj[j] > id) {
for (int k = xadj[adj[j]]; k < xadj[adj[j] + 1]; k++) {
if (adj[k] == id) {
atomicAdd(&counter[id], 2);
atomicAdd(&counter[adj[i]], 2);
atomicAdd(&counter[adj[j]], 2);
}
}
}
}
}
}
}
}
void wrapper(int *xadj, int *adj, int *nov, int nnz, int k) {
hipSetDevice(0);
int *d_xadj;
int *d_adj;
int *d_nov;
int *d_ct;
int *ct = new int[*nov];
hipMalloc((void **)&d_xadj, (*nov + 1) * sizeof(int));
hipMalloc((void **)&d_adj, nnz * sizeof(int));
hipMalloc((void **)&d_nov, sizeof(int));
hipMalloc((void **)&d_ct, (*nov) * sizeof(int));
if (k == 3) {
/*not that necessary, to ensure d_ct is set to zero in the least amount of
* lines possible for k=3*/
memset(ct, 0, (*nov) * sizeof(int));
hipMemcpy(d_ct, ct, (*nov) * sizeof(int), hipMemcpyHostToDevice);
}
hipMemcpy(d_xadj, xadj, (*nov + 1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_adj, adj, (nnz) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nov, nov, sizeof(int), hipMemcpyHostToDevice);
gpuErrchk(hipDeviceSynchronize());
#ifdef DEBUG
std::cout << "malloc copy done" << std::endl;
#endif
gpuErrchk(hipDeviceSynchronize());
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventRecord(start, 0);
if (k == 3)
deviceDFSk3<<<(*nov + THREADS - 1) / THREADS, THREADS>>>(d_xadj, d_adj,
d_nov, d_ct);
if (k == 4)
deviceDFSk4<<<(*nov + THREADS - 1) / THREADS, THREADS>>>(d_xadj, d_adj,
d_nov, d_ct);
if (k == 5)
deviceDFSk5<<<(*nov + THREADS - 1) / THREADS, THREADS>>>(d_xadj, d_adj,
d_nov, d_ct);
gpuErrchk(hipDeviceSynchronize());
hipEventCreate(&stop);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipMemcpy(ct, d_ct, (*nov) * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < *nov; i++)
printf("%d %d\n", i, ct[i]);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
printf("GPU took: %f s\n", elapsedTime / 1000);
hipFree(d_xadj);
hipFree(d_adj);
hipFree(d_nov);
hipFree(d_ct);
}
/*Read the given file and return CSR*/
void *read_edges(char *bin_name, int k) {
std::cout << "fname: " << bin_name << std::endl;
// count the newlines
unsigned int number_of_lines = 0;
FILE *infile = fopen(bin_name, "r");
int ch;
while (EOF != (ch = getc(infile)))
if ('\n' == ch)
++number_of_lines;
++number_of_lines;
#ifdef DEBUG
std::cout << number_of_lines << " lines" << std::endl;
#endif
fclose(infile);
// read the first line, set it to no vertices.
std::ifstream bp(bin_name);
int *no_vertices = new int;
std::string line;
int i, j, max = 0;
for (int iter = 0; iter < number_of_lines; iter++) {
std::getline(bp, line);
std::istringstream myss(line);
if (!(myss >> i >> j)) {
break;
}
if (i > max)
max = i;
if (j > max)
max = j;
}
bp.clear();
bp.seekg(0);
*no_vertices = max + 1;
int no_edges = (number_of_lines)*2; // bidirectional
/*TODO unique and no loop decreases this, we should resize adj
* accordingly. Not the end of the world, we will never reach those
* indices.*/
// if file ended with \n you'd keep it as is.
std::vector<int> *A = new std::vector<int>[*no_vertices];
// std::cout << "allocated A" << std::endl;
for (int iter = 0; iter < number_of_lines; iter++) {
std::getline(bp, line);
std::istringstream myss(line);
if (!(myss >> i >> j)) {
break;
}
#ifdef DEBUG
std::cout << i << " " << j << std::endl;
#endif
if (i != j) {
// ignore diagonal edges
A[i].push_back(j);
A[j].push_back(i);
}
}
for (int i = 0; i < *no_vertices; i++) {
std::sort(A[i].begin(), A[i].end());
// sort then unique.
// you may have 3 1 and 1 3
// if you do not sort, unique doesn't do what I think it would.
// also we prefer them sorted in case the file has 1 2 before 1 0 or
// sth. using default comparison:
std::vector<int>::iterator it;
it = std::unique(A[i].begin(), A[i].end()); // 10 20 30 20 10 ? ? ? ?
// ^
A[i].resize(std::distance(A[i].begin(), it)); // 10 20 30 20 10
}
int sum = 0;
int *xadj = new int[*no_vertices + 1]; // last one marks the end of the adj.
int *adj = new int[no_edges]; // there are m+1 lines (m '\n's), 2m edges.
xadj[0] = 0;
for (int i = 0; i < *no_vertices; i++) {
// adj.add_to_end(A[i])
for (int j = 0; j < A[i].size(); j++) {
adj[sum + j] = A[i][j];
}
sum += A[i].size();
xadj[i + 1] = sum;
}
std::cout << "Done reading." << std::endl;
wrapper(xadj, adj, no_vertices, no_edges, k);
return 0;
}
int main(int argc, char *argv[]) {
/*first arg is filename, second is k*/
// omp_set_num_threads(8);
read_edges(argv[1], atoi(argv[2]));
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void simple_addition(int *a, int *b,int *c,int len)
{
int tid=threadIdx.x +blockIdx.x*blockDim.x ;
//while (tid<len)
c[tid]=a[tid]+b[tid];
//printf("I am block: %d with tid: %d Result: %d \n",blockIdx.x,tid,c[tid]);
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void simple_addition(int *a, int *b,int *c,int len)
{
int tid=threadIdx.x +blockIdx.x*blockDim.x ;
//while (tid<len)
c[tid]=a[tid]+b[tid];
//printf("I am block: %d with tid: %d Result: %d \n",blockIdx.x,tid,c[tid]);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <cmath>
#define N 1024
#define BLOCK_SIZE 16
typedef struct
{
float *elements;
int width;
int height;
} Matrix;
void cpu_gj(Matrix A, float *x, float *b, float eps)
{
float res = 1.0f;
float summ1, summ2;
float *temp;
temp = new float[A.width];
int counter = 0;
while(res > eps)
{
summ2 = 0.0f;
for(int i = 0; i < A.width; i++)
{
summ1 = 0.0f;
for(int k =0; k < A.width; k++)
if(k!=i)
summ1 += A.elements[k + i*A.width]*x[k];
temp[i] = 1/A.elements[i+i*A.width]*(b[i] - summ1);
summ2 += abs(temp[i] - x[i]);
}
for(int i = 0; i < A.width; i++)
x[i] = temp[i];
res = summ2;
counter++;
if(counter==A.width)
break;
}
delete[] temp;
std::cout<<"Steps Taken to Convergence = "<< counter<<std::endl;
}
void load_Matrix(std::string file, Matrix A)
{
std::ifstream f;
f.open(file);
for( int i = 0; i <A.height; i++)
for(int j = 0; j < A.width; j++)
{
f >> A.elements[j + A.width*i];
}
f.close();
}
__global__ void naive_gj(Matrix A, float *x, float *xout, float *b) //Computes one iteration of GJ
{
int gid = threadIdx.x + blockIdx.x*blockDim.x;
float summ1 = 0.0f;
float temp;
for (int k =0; k < A.width; k++)
{
if(k!= gid)
summ1 += A.elements[k + gid*A.width]*x[k];
}
temp = 1.0f/A.elements[gid + gid*A.width]*(b[gid] - summ1);
xout[gid] = temp;
}
__global__ void compute_r(float *xold, const float *xnew) //store abs(diff) in xold
{
int gid = threadIdx.x + blockDim.x*blockIdx.x;
float temp = fabs(xnew[gid] - xold[gid]);
xold[gid] = temp;
}
__global__ void reduce_r(float * d_out, const float *d_in)
{
// sdata is allocated in the kernel call: via dynamic shared memeory
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x*blockIdx.x;
int tid = threadIdx.x;
//load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // always sync before using sdata
//do reduction over shared memory
for(int s = blockDim.x/2; s>0; s >>=1)
{
if(tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); //make sure all additions are finished
}
//only tid 0 writes out result!
if(tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
__global__ void fill(float *xout, float *xin)
{
int gid = threadIdx.x + blockDim.x*blockIdx.x;
xout[gid] = xin[gid];
}
void par_gj(Matrix A, float *x, float *b, float eps)
{
float res = 1.0f;
int counter = 0;
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
float *d_x, *d_b, *d_xnew;
float *dres;
dres = (float*)malloc(sizeof(float));
cudaMalloc((void**)&d_A.elements, A.width*A.height*sizeof(float));
cudaMalloc((void**)&d_x, A.width*sizeof(float));
cudaMalloc((void**)&d_b, A.height*sizeof(float));
cudaMalloc((void**)&d_xnew, A.width*sizeof(float));
cudaMemcpy(d_A.elements,A.elements,A.width*A.height*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, A.width*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, A.height*sizeof(float),cudaMemcpyHostToDevice);
dim3 dimBlock(16);
dim3 dimGrid((A.width+ dimBlock.x - 1)/dimBlock.x);
while(res>eps)
{
//Compute x^{n+1}
naive_gj<<<dimGrid,dimBlock>>>(d_A, d_x, d_xnew, d_b);
cudaDeviceSynchronize();
//Compute vector of residuals
compute_r<<<dimGrid,dimBlock>>>(d_x,d_xnew); //Store r in d_x
cudaDeviceSynchronize();
//Reduce vector of residuals to find norm
reduce_r<<<1,N, N*sizeof(float)>>>(d_x, d_x);
cudaMemcpy(dres, d_x, sizeof(float), cudaMemcpyDeviceToHost);
res = dres[0];
std::cout<<res<<std::endl;
//X = Xnew
fill<<<dimGrid,dimBlock>>>(d_x, d_xnew);
cudaDeviceSynchronize();
counter++;
if(counter==A.width)
break;
}
std::cout<<"Steps Taken to Convergence = "<< counter<<std::endl;
//export X
cudaMemcpy(x, d_x, A.width*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_A.elements);
cudaFree(d_xnew);
cudaFree(d_b);
}
int main()
{
// Matrix stuff!
Matrix A;
A.width = N;
A.height = N;
A.elements = (float*)malloc(N*N*sizeof(float));
load_Matrix("matrix.dat", A);
// Vector stuff!
float *x, *b;
x = (float*)malloc(N*sizeof(float));
b = (float*)malloc(N*sizeof(float));
for(int i =0; i < N; i++)
{
b[i] = 1.0f;
x[i] = 0.0f;
}
// Gauss-Jacobi Parameters
float eps = 1e-7;
// Call the Gauss-Jacobi algorithms
par_gj(A, x, b, eps);
std::cout<<"Soln X = "<<std::endl;
for(int i = 0; i <10; i++)
std::cout<< x[i] <<std::endl; // */
free(x);
free(b);
free(A.elements);
}
|
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <cmath>
#define N 1024
#define BLOCK_SIZE 16
typedef struct
{
float *elements;
int width;
int height;
} Matrix;
void cpu_gj(Matrix A, float *x, float *b, float eps)
{
float res = 1.0f;
float summ1, summ2;
float *temp;
temp = new float[A.width];
int counter = 0;
while(res > eps)
{
summ2 = 0.0f;
for(int i = 0; i < A.width; i++)
{
summ1 = 0.0f;
for(int k =0; k < A.width; k++)
if(k!=i)
summ1 += A.elements[k + i*A.width]*x[k];
temp[i] = 1/A.elements[i+i*A.width]*(b[i] - summ1);
summ2 += abs(temp[i] - x[i]);
}
for(int i = 0; i < A.width; i++)
x[i] = temp[i];
res = summ2;
counter++;
if(counter==A.width)
break;
}
delete[] temp;
std::cout<<"Steps Taken to Convergence = "<< counter<<std::endl;
}
void load_Matrix(std::string file, Matrix A)
{
std::ifstream f;
f.open(file);
for( int i = 0; i <A.height; i++)
for(int j = 0; j < A.width; j++)
{
f >> A.elements[j + A.width*i];
}
f.close();
}
__global__ void naive_gj(Matrix A, float *x, float *xout, float *b) //Computes one iteration of GJ
{
int gid = threadIdx.x + blockIdx.x*blockDim.x;
float summ1 = 0.0f;
float temp;
for (int k =0; k < A.width; k++)
{
if(k!= gid)
summ1 += A.elements[k + gid*A.width]*x[k];
}
temp = 1.0f/A.elements[gid + gid*A.width]*(b[gid] - summ1);
xout[gid] = temp;
}
__global__ void compute_r(float *xold, const float *xnew) //store abs(diff) in xold
{
int gid = threadIdx.x + blockDim.x*blockIdx.x;
float temp = fabs(xnew[gid] - xold[gid]);
xold[gid] = temp;
}
__global__ void reduce_r(float * d_out, const float *d_in)
{
// sdata is allocated in the kernel call: via dynamic shared memeory
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x*blockIdx.x;
int tid = threadIdx.x;
//load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // always sync before using sdata
//do reduction over shared memory
for(int s = blockDim.x/2; s>0; s >>=1)
{
if(tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); //make sure all additions are finished
}
//only tid 0 writes out result!
if(tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
__global__ void fill(float *xout, float *xin)
{
int gid = threadIdx.x + blockDim.x*blockIdx.x;
xout[gid] = xin[gid];
}
void par_gj(Matrix A, float *x, float *b, float eps)
{
float res = 1.0f;
int counter = 0;
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
float *d_x, *d_b, *d_xnew;
float *dres;
dres = (float*)malloc(sizeof(float));
hipMalloc((void**)&d_A.elements, A.width*A.height*sizeof(float));
hipMalloc((void**)&d_x, A.width*sizeof(float));
hipMalloc((void**)&d_b, A.height*sizeof(float));
hipMalloc((void**)&d_xnew, A.width*sizeof(float));
hipMemcpy(d_A.elements,A.elements,A.width*A.height*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_x, x, A.width*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_b, b, A.height*sizeof(float),hipMemcpyHostToDevice);
dim3 dimBlock(16);
dim3 dimGrid((A.width+ dimBlock.x - 1)/dimBlock.x);
while(res>eps)
{
//Compute x^{n+1}
naive_gj<<<dimGrid,dimBlock>>>(d_A, d_x, d_xnew, d_b);
hipDeviceSynchronize();
//Compute vector of residuals
compute_r<<<dimGrid,dimBlock>>>(d_x,d_xnew); //Store r in d_x
hipDeviceSynchronize();
//Reduce vector of residuals to find norm
reduce_r<<<1,N, N*sizeof(float)>>>(d_x, d_x);
hipMemcpy(dres, d_x, sizeof(float), hipMemcpyDeviceToHost);
res = dres[0];
std::cout<<res<<std::endl;
//X = Xnew
fill<<<dimGrid,dimBlock>>>(d_x, d_xnew);
hipDeviceSynchronize();
counter++;
if(counter==A.width)
break;
}
std::cout<<"Steps Taken to Convergence = "<< counter<<std::endl;
//export X
hipMemcpy(x, d_x, A.width*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_x);
hipFree(d_A.elements);
hipFree(d_xnew);
hipFree(d_b);
}
int main()
{
// Matrix stuff!
Matrix A;
A.width = N;
A.height = N;
A.elements = (float*)malloc(N*N*sizeof(float));
load_Matrix("matrix.dat", A);
// Vector stuff!
float *x, *b;
x = (float*)malloc(N*sizeof(float));
b = (float*)malloc(N*sizeof(float));
for(int i =0; i < N; i++)
{
b[i] = 1.0f;
x[i] = 0.0f;
}
// Gauss-Jacobi Parameters
float eps = 1e-7;
// Call the Gauss-Jacobi algorithms
par_gj(A, x, b, eps);
std::cout<<"Soln X = "<<std::endl;
for(int i = 0; i <10; i++)
std::cout<< x[i] <<std::endl; // */
free(x);
free(b);
free(A.elements);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/*
* EyLeftUpdater.cpp
*
* Created on: 01 февр. 2016 г.
* Author: aleksandr
*/
#include "EyLeftUpdater.h"
#include "SmartIndex.h"
/*
* indx должен пренадлежать участку от [0, sizeY-1]
*/
__device__
void EyLeftUpdater::operator() (const int indx) {
int n = indx;
Ey(0, n) = coeff[0]*(Ey(2, n) + EyLeft(0, 1, n)) +
coeff[1] * (EyLeft(0, 0, n) + EyLeft(2, 0, n) - Ey(1, n) - EyLeft(1, 1, n)) +
coeff[2] * EyLeft(1, 0, n) - EyLeft(2, 1, n);
for (int m = 0; m < 3; m++) {
EyLeft(m, 1, n) = EyLeft(m, 0, n);
EyLeft(m, 0, n) = Ey(m, n);
}
}
|
/*
* EyLeftUpdater.cpp
*
* Created on: 01 февр. 2016 г.
* Author: aleksandr
*/
#include <hip/hip_runtime.h>
#include "EyLeftUpdater.h"
#include "SmartIndex.h"
/*
* indx должен пренадлежать участку от [0, sizeY-1]
*/
__device__
void EyLeftUpdater::operator() (const int indx) {
int n = indx;
Ey(0, n) = coeff[0]*(Ey(2, n) + EyLeft(0, 1, n)) +
coeff[1] * (EyLeft(0, 0, n) + EyLeft(2, 0, n) - Ey(1, n) - EyLeft(1, 1, n)) +
coeff[2] * EyLeft(1, 0, n) - EyLeft(2, 1, n);
for (int m = 0; m < 3; m++) {
EyLeft(m, 1, n) = EyLeft(m, 0, n);
EyLeft(m, 0, n) = Ey(m, n);
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/* Histogram generation on the GPU.
Host-side code.
Author: Naga Kandasamy
Date modified: 3/11/2017
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <float.h>
#define THREAD_BLOCK_SIZE 256
#define NUM_BLOCKS 60 // Define the size of a tile
#define HISTOGRAM_SIZE 256 // Histogram has 256 bins
// includes, kernels
#include "histogram_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void run_test(int);
void compute_on_device(int *, int *, int, int);
void check_for_error(const char *);
extern "C" void compute_gold(int *, int *, int, int);
void check_histogram(int *, int, int);
int
main( int argc, char** argv)
{
if(argc != 2){
printf("Usage: histogram <num elements> \n");
exit(0);
}
int num_elements = atoi(argv[1]);
run_test(num_elements);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Generate the histogram on the CPU and the GPU and compare results for correctness
////////////////////////////////////////////////////////////////////////////////
void run_test(int num_elements)
{
float diff;
int i;
int *histogram_on_cpu = (int *)malloc(sizeof(int) * HISTOGRAM_SIZE); // Space to store histogram generated by the CPU
int *histogram_on_gpu = (int *)malloc(sizeof(int) * HISTOGRAM_SIZE); // Space to store histogram generated by the GPU
// Allocate memory on the CPU for the input data
int size = sizeof(int) * num_elements;
int *input_data = (int *)malloc(size);
// Randomly generate input data. Initialize the input data to be integer values between 0 and (HISTOGRAM_SIZE - 1)
for(i = 0; i < num_elements; i++)
input_data[i] = floorf((HISTOGRAM_SIZE - 1) * (rand()/(float)RAND_MAX));
printf("Creating histrgram on the CPU.");
// Compute the reference solution on the CPU
struct timeval start, stop;
gettimeofday(&start, NULL);
compute_gold(input_data, histogram_on_cpu, num_elements, HISTOGRAM_SIZE);
gettimeofday(&stop, NULL);
printf("Elapsed time on the CPU = %f \n",stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000);
check_histogram(histogram_on_cpu, num_elements, HISTOGRAM_SIZE);
// Compute the result vector on the GPU
compute_on_device(input_data, histogram_on_gpu, num_elements, HISTOGRAM_SIZE);
check_histogram(histogram_on_gpu, num_elements, HISTOGRAM_SIZE);
// Compute the differences between the CPU and GPU results
diff = 0.0;
for(i = 0; i < HISTOGRAM_SIZE; i++)
diff = diff + abs(histogram_on_cpu[i] - histogram_on_gpu[i]);
printf("Difference between the CPU and GPU result: %f. \n", diff);
// cleanup memory
free(input_data);
free(histogram_on_cpu);
free(histogram_on_gpu);
return;
}
// Transfer the input data to the GPU, set up grid and thread dimensions, excute kernel function, and copy result back to the CPU
void compute_on_device(int *input_data, int *histogram, int num_elements, int histogram_size)
{
int *input_data_on_device = NULL;
int *histogram_on_device = NULL;
// Allocate space on the GPU for the input data
cudaMalloc((void**)&input_data_on_device, num_elements * sizeof(int));
cudaMemcpy(input_data_on_device, input_data, num_elements * sizeof(int), cudaMemcpyHostToDevice);
// Allocate space on the GPU for the histogram and initialize the contents to zero
cudaMalloc((void**)&histogram_on_device, histogram_size * sizeof(int));
cudaMemset(histogram_on_device, 0, histogram_size * sizeof(int));
// Set up the execution grid on the GPU
dim3 thread_block(THREAD_BLOCK_SIZE, 1, 1);
dim3 grid(NUM_BLOCKS,1);
printf("Generating histogram on the GPU. \n");
struct timeval start, stop;
gettimeofday(&start, NULL);
// histogram_kernel_slow<<<grid, thread_block>>>(input_data_on_device, histogram_on_device, num_elements, histogram_size);
histogram_kernel_fast<<<grid, thread_block>>>(input_data_on_device, histogram_on_device, num_elements, histogram_size);
cudaThreadSynchronize();
gettimeofday(&stop, NULL);
printf("Elapsed time on the GPU = %f \n",stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000);
check_for_error("KERNEL FAILURE");
// Copy the result back from the GPU and store
cudaMemcpy(histogram, histogram_on_device, histogram_size * sizeof(int), cudaMemcpyDeviceToHost);
// Free memory on the GPU
cudaFree(input_data_on_device);
cudaFree(histogram_on_device);
}
void check_histogram(int *histogram, int num_elements, int histogram_size)
{
int sum = 0;
for(int i = 0; i < histogram_size; i++)
sum += histogram[i];
printf("Number of histogram entries = %d. \n", sum);
if(sum == num_elements)
printf("Histogram generated successfully. \n");
else
printf("Error generating histogram. \n");
printf("\n");
}
void check_for_error(const char *msg)
{
cudaError_t err = cudaGetLastError();
if(cudaSuccess != err){
printf("CUDA ERROR: %s (%s). \n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
/* Histogram generation on the GPU.
Host-side code.
Author: Naga Kandasamy
Date modified: 3/11/2017
*/
// includes, system
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <float.h>
#define THREAD_BLOCK_SIZE 256
#define NUM_BLOCKS 60 // Define the size of a tile
#define HISTOGRAM_SIZE 256 // Histogram has 256 bins
// includes, kernels
#include "histogram_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void run_test(int);
void compute_on_device(int *, int *, int, int);
void check_for_error(const char *);
extern "C" void compute_gold(int *, int *, int, int);
void check_histogram(int *, int, int);
int
main( int argc, char** argv)
{
if(argc != 2){
printf("Usage: histogram <num elements> \n");
exit(0);
}
int num_elements = atoi(argv[1]);
run_test(num_elements);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Generate the histogram on the CPU and the GPU and compare results for correctness
////////////////////////////////////////////////////////////////////////////////
void run_test(int num_elements)
{
float diff;
int i;
int *histogram_on_cpu = (int *)malloc(sizeof(int) * HISTOGRAM_SIZE); // Space to store histogram generated by the CPU
int *histogram_on_gpu = (int *)malloc(sizeof(int) * HISTOGRAM_SIZE); // Space to store histogram generated by the GPU
// Allocate memory on the CPU for the input data
int size = sizeof(int) * num_elements;
int *input_data = (int *)malloc(size);
// Randomly generate input data. Initialize the input data to be integer values between 0 and (HISTOGRAM_SIZE - 1)
for(i = 0; i < num_elements; i++)
input_data[i] = floorf((HISTOGRAM_SIZE - 1) * (rand()/(float)RAND_MAX));
printf("Creating histrgram on the CPU.");
// Compute the reference solution on the CPU
struct timeval start, stop;
gettimeofday(&start, NULL);
compute_gold(input_data, histogram_on_cpu, num_elements, HISTOGRAM_SIZE);
gettimeofday(&stop, NULL);
printf("Elapsed time on the CPU = %f \n",stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000);
check_histogram(histogram_on_cpu, num_elements, HISTOGRAM_SIZE);
// Compute the result vector on the GPU
compute_on_device(input_data, histogram_on_gpu, num_elements, HISTOGRAM_SIZE);
check_histogram(histogram_on_gpu, num_elements, HISTOGRAM_SIZE);
// Compute the differences between the CPU and GPU results
diff = 0.0;
for(i = 0; i < HISTOGRAM_SIZE; i++)
diff = diff + abs(histogram_on_cpu[i] - histogram_on_gpu[i]);
printf("Difference between the CPU and GPU result: %f. \n", diff);
// cleanup memory
free(input_data);
free(histogram_on_cpu);
free(histogram_on_gpu);
return;
}
// Transfer the input data to the GPU, set up grid and thread dimensions, excute kernel function, and copy result back to the CPU
void compute_on_device(int *input_data, int *histogram, int num_elements, int histogram_size)
{
int *input_data_on_device = NULL;
int *histogram_on_device = NULL;
// Allocate space on the GPU for the input data
hipMalloc((void**)&input_data_on_device, num_elements * sizeof(int));
hipMemcpy(input_data_on_device, input_data, num_elements * sizeof(int), hipMemcpyHostToDevice);
// Allocate space on the GPU for the histogram and initialize the contents to zero
hipMalloc((void**)&histogram_on_device, histogram_size * sizeof(int));
hipMemset(histogram_on_device, 0, histogram_size * sizeof(int));
// Set up the execution grid on the GPU
dim3 thread_block(THREAD_BLOCK_SIZE, 1, 1);
dim3 grid(NUM_BLOCKS,1);
printf("Generating histogram on the GPU. \n");
struct timeval start, stop;
gettimeofday(&start, NULL);
// histogram_kernel_slow<<<grid, thread_block>>>(input_data_on_device, histogram_on_device, num_elements, histogram_size);
histogram_kernel_fast<<<grid, thread_block>>>(input_data_on_device, histogram_on_device, num_elements, histogram_size);
hipDeviceSynchronize();
gettimeofday(&stop, NULL);
printf("Elapsed time on the GPU = %f \n",stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000);
check_for_error("KERNEL FAILURE");
// Copy the result back from the GPU and store
hipMemcpy(histogram, histogram_on_device, histogram_size * sizeof(int), hipMemcpyDeviceToHost);
// Free memory on the GPU
hipFree(input_data_on_device);
hipFree(histogram_on_device);
}
void check_histogram(int *histogram, int num_elements, int histogram_size)
{
int sum = 0;
for(int i = 0; i < histogram_size; i++)
sum += histogram[i];
printf("Number of histogram entries = %d. \n", sum);
if(sum == num_elements)
printf("Histogram generated successfully. \n");
else
printf("Error generating histogram. \n");
printf("\n");
}
void check_for_error(const char *msg)
{
hipError_t err = hipGetLastError();
if(hipSuccess != err){
printf("CUDA ERROR: %s (%s). \n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx == 0)
sizes[idx] = accumulatedSize[0];
else if(idx < size)
{
sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1];
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx == 0)
sizes[idx] = accumulatedSize[0];
else if(idx < size)
{
sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <sys/time.h>
__global__
void log1p(double p, double q, double * y)
{
y[0] = q + log1p(exp(p - q));
}
__global__
void log_1p(double p, double q, double * y)
{
y[0] = q + log(1 + exp(p - q));
}
int main(void)
{
double a, b;
double * y;
cudaMallocManaged(&y, sizeof(*y));
y[0] = 100.0;
printf("Case1:\n");
a = -7869.9955677831958382739685475826263427734375;
b = -7869.5160871966154445544816553592681884765625;
log1p<<<1, 1>>>(a, b, y);
printf("log1p CPU: %.60f\n", b + log1p(exp(a - b)));
cudaDeviceSynchronize();
printf("log1p GPU: %.60f\n", y[0]);
log_1p<<<1, 1>>>(a, b, y);
printf("log(1 + x) CPU: %.60f\n", b + log(1 + exp(a - b)));
cudaDeviceSynchronize();
printf("log(1 + x) GPU: %.60f\n", y[0]);
printf("Case2:\n");
a = -39983.496316437478526495397090911865234375;
b = -39983.274149101882358081638813018798828125;
log1p<<<1, 1>>>(a, b, y);
printf("log1p CPU: %.60f\n", b + log1p(exp(a - b)));
cudaDeviceSynchronize();
printf("log1p GPU: %.60f\n", y[0]);
log_1p<<<1, 1>>>(a, b, y);
printf("log(1 + x) CPU: %.60f\n", b + log(1 + exp(a - b)));
cudaDeviceSynchronize();
printf("log(1 + x) GPU: %.60f\n", y[0]);
printf("Case3:\n");
a = -2639.88414462528953663422726094722747802734375;
b = -2633.387596741364177432842552661895751953125;
log1p<<<1, 1>>>(a, b, y);
printf("log1p CPU: %.60f\n", b + log1p(exp(a - b)));
cudaDeviceSynchronize();
printf("log1p GPU: %.60f\n", y[0]);
log_1p<<<1, 1>>>(a, b, y);
printf("log(1 + x) CPU: %.60f\n", b + log(1 + exp(a - b)));
cudaDeviceSynchronize();
printf("log(1 + x) GPU: %.60f\n", y[0]);
cudaFree(y);
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
__global__
void log1p(double p, double q, double * y)
{
y[0] = q + log1p(exp(p - q));
}
__global__
void log_1p(double p, double q, double * y)
{
y[0] = q + log(1 + exp(p - q));
}
int main(void)
{
double a, b;
double * y;
hipMallocManaged(&y, sizeof(*y));
y[0] = 100.0;
printf("Case1:\n");
a = -7869.9955677831958382739685475826263427734375;
b = -7869.5160871966154445544816553592681884765625;
log1p<<<1, 1>>>(a, b, y);
printf("log1p CPU: %.60f\n", b + log1p(exp(a - b)));
hipDeviceSynchronize();
printf("log1p GPU: %.60f\n", y[0]);
log_1p<<<1, 1>>>(a, b, y);
printf("log(1 + x) CPU: %.60f\n", b + log(1 + exp(a - b)));
hipDeviceSynchronize();
printf("log(1 + x) GPU: %.60f\n", y[0]);
printf("Case2:\n");
a = -39983.496316437478526495397090911865234375;
b = -39983.274149101882358081638813018798828125;
log1p<<<1, 1>>>(a, b, y);
printf("log1p CPU: %.60f\n", b + log1p(exp(a - b)));
hipDeviceSynchronize();
printf("log1p GPU: %.60f\n", y[0]);
log_1p<<<1, 1>>>(a, b, y);
printf("log(1 + x) CPU: %.60f\n", b + log(1 + exp(a - b)));
hipDeviceSynchronize();
printf("log(1 + x) GPU: %.60f\n", y[0]);
printf("Case3:\n");
a = -2639.88414462528953663422726094722747802734375;
b = -2633.387596741364177432842552661895751953125;
log1p<<<1, 1>>>(a, b, y);
printf("log1p CPU: %.60f\n", b + log1p(exp(a - b)));
hipDeviceSynchronize();
printf("log1p GPU: %.60f\n", y[0]);
log_1p<<<1, 1>>>(a, b, y);
printf("log(1 + x) CPU: %.60f\n", b + log(1 + exp(a - b)));
hipDeviceSynchronize();
printf("log(1 + x) GPU: %.60f\n", y[0]);
hipFree(y);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/*
This is the function you need to implement. Quick reference:
- input rows: 0 <= y < ny
- input columns: 0 <= x < nx
- element at row y and column x is stored in data[x + y*nx]
- correlation between rows i and row j has to be stored in result[i + j*ny]
- only parts with 0 <= j <= i < ny need to be filled
*/
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <iostream>
#include <math.h>
#include <vector>
static inline void check(cudaError_t err, const char* context) {
if (err != cudaSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< cudaGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
#define CHECK(x) check(x, #x)
template <class T>
void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) {
CHECK(cudaMemcpy(target, source, num * sizeof(T), direction));
}
// params:
// data : transposed padding data
__global__ void correlate_gpu(int ny, int nx, const float*data, float *result, int new_ny){
const int nd=16;// nd: nd==blockDim.x==blockDim.y
// compute nd*nd results each thread.
int step=nd*nd;// each block will compute step*step results.
int ia=threadIdx.x;
int ja=threadIdx.y;
int ic=blockIdx.x;
int jc=blockIdx.y;
// int i=threadIdx.x+blockIdx.x*blockDim.x;
// int j=threadIdx.y+blockIdx.y*blockDim.y;
// if(i>=ny || j>=ny) return;
// if (i>j){
// result[i*ny+j]=0;
// return;
// }
float v[nd][nd];
// double temp=0;
for(int ib=0; ib<nd; ib++){
for(int jb=0; jb<nd; jb++){
v[ib][jb]=0;
}
}
for (int k=0; k<nx; ++k){
float x[nd];
float y[nd];
for(int ib=0; ib<nd; ib++){
int i=ic*step+ib*nd+ia;
x[ib]=data[k*new_ny +i];
}
for(int jb=0; jb<nd; jb++){
int j=jc*step+jb*nd+ja;
y[jb]=data[k*new_ny+j];
}
for(int ib=0; ib<nd; ib++){
for(int jb=0; jb<nd; jb++){
v[ib][jb]+=x[ib]*y[jb];
}
}
}
for(int ib=0; ib<nd; ib++){
for(int jb=0; jb<nd; jb++){
int i=ic*step+ib*nd+ia;
int j=jc*step+jb*nd+ja;
if(i<ny&&j<ny&&i<=j){
result[ny*i+j]=v[ib][jb];
}
}
}
// result[i*ny+j]=temp;
}
__global__ void padding_transpose(int ny, int nx, const float*data, float* result, int new_ny){
//result is padding and transpose data
int ja=threadIdx.x;
int i=blockIdx.y;
for (int jb=0; jb<nx; jb+=blockDim.x){
int j=jb+ja;
if (j>=nx) break;
float v=i<ny?data[i*ny+j]:0; //padding
result[new_ny*j+i]=v; //transpose
}
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
static inline int roundup(int a, int b) {
return divup(a, b) * b;
}
void correlate(int ny, int nx, const float *data, float *result) {
// const int nd=16;//compute nd*nd results each thread. could not less than
const int block_size=16; //16*16 threads
const int step=block_size*block_size; // each block will compute step*step results.
int new_ny=roundup(ny,step);
//allocate memory & copy data to GPU
float *dGPU=NULL;
CHECK(cudaMalloc((void**)&dGPU,ny*nx*sizeof(float)));
float *padding=NULL;
CHECK(cudaMalloc((void**)&padding,new_ny*nx*sizeof(float)));
float *rGPU=NULL;
CHECK(cudaMalloc((void**)&rGPU,ny*ny*sizeof(float)));
// float *avg=new float[ny]{0};
// float *normalized=new float[ny*nx]{0};
// float *sqrtSqureSum=new float[ny]{0};
std::vector<float> avg(ny,0);
std::vector<float> normalized(ny*nx,0);
std::vector<float> sqrtSqureSum(ny,0);
std::vector<float> transposed(nx*new_ny,0);
for (int y=0; y<ny; ++y){
double temp=0;
for (int x=0; x<nx; ++x){
temp+=data[y*nx+x];
}
avg[y]=temp/nx;
}
for (int y=0; y<ny; ++y){
for (int x=0; x<nx; ++x){
normalized[y*nx+x]=data[y*nx+x]-avg[y];
}
}
// delete[] avg;
for (int y=0; y<ny; ++y){
for (int x=0; x<nx; ++x){
sqrtSqureSum[y]+=pow(normalized[y*nx+x],2);
}
sqrtSqureSum[y]=sqrt(sqrtSqureSum[y]);
}
for (int y=0; y<ny; ++y){
for (int x=0; x<nx; ++x){
normalized[y*nx+x]/=sqrtSqureSum[y];
}
}
for (int y=0; y<ny; ++y){
for (int x=0; x<nx; ++x){
std::cout << normalized[y*nx+x] << " ";
}
std::cout<< std::endl ;
}
cuda_memcpy(dGPU,normalized.data(),ny*nx,cudaMemcpyHostToDevice);
// Run kernel to padding and transpose
{
dim3 dimBlock(64,1);
dim3 dimGrid(1,new_ny);
padding_transpose<<<dimGrid,dimBlock>>>(ny,nx,dGPU,padding,new_ny);
CHECK(cudaGetLastError());
}
cuda_memcpy(transposed.data(), padding, new_ny * nx, cudaMemcpyDeviceToHost);
std::cout << new_ny<<std::endl;
std::cout << transposed.size()<<std::endl;
for(int x=0;x<nx;x++){
for(int y=0;y<new_ny;y++){
std::cout<< transposed[x*new_ny+y] << " ";
}
std::cout<< std::endl<< "----"<< std::endl;
}
// for (int x=0;x<nx;++x){
// for (int y=0; y<ny; ++y){
// transposed[x*ny+y]=normalized[y*nx+x];
// }
// }
// Run kernel to calculate cp
{
dim3 dimBlock(block_size,block_size);
dim3 dimGrid(new_ny/step,new_ny/step);
correlate_gpu<<<dimGrid,dimBlock>>>(ny,nx,padding,rGPU,new_ny);
CHECK(cudaGetLastError());
}
cuda_memcpy(result, rGPU, ny * ny, cudaMemcpyDeviceToHost);
// CHECK(cudaMemcpy(result, rGPU, ny * ny * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(cudaFree(dGPU));
CHECK(cudaFree(padding));
CHECK(cudaFree(rGPU));
// delete[] normalized;
}
int main(){
int ny=3;
int nx=2;
// float data[]={+0.81472367, +0.90579194,
// +0.45150527, +0.49610928}; //result: 1 1 0 1
float data[]={
-1.00000000, +1.00000000,
+1.00000000, -1.00000000,
-1.00000000, +1.00000000
};
// float data[]={-1.0, 1.0, 1.0, -1.0, -1.0,1.0, -1.0, -1.0, 1.0, 1.0,
// -1.0, 1.0, 1.0, -1.0, -1.0,1.0, -1.0, -1.0, 1.0, 1.0,};
float result[9];
correlate(ny, nx, data, result);
std::cout<<"result"<<std::endl;
for (int i=0;i<4;i++){
std::cout<<result[i]<<" ";
}
// int nc=20;
// std::vector<std::tuple<int,int,int>> rows((1+nc)*nc/2);
// // #pragma omp parallel for
// for (int ia = 0; ia < nc; ++ia) {
// for (int ja = ia; ja < nc; ++ja) {
// int ija = _pdep_u32(ia, 0x55555555) | _pdep_u32(ja, 0xAAAAAAAA);
// int p= ia*nc-(ia-1)*ia/2;
// std::cout<<p+ja-ia<<std::endl;
// rows[p+(ja-ia)]=std::make_tuple(ija, ia, ja) ;
// }
// }
// std::sort(rows.begin(), rows.end());
// int i=0;
// std::cout<<"result"<<std::endl;
// for(auto row:rows){
// std::cout<< i << ","<<std::get<0>(row) << ":"
// << std::get<1>(row) << ","
// << std::get<2>(row) << std::endl;
// i++;
// }
// double4_t test={0,1,2,3};
// double4_t sw1=swap1(test);
// double4_t sw2=swap2(test);
// for (int i=0;i<4;i++){
// std::cout<<test[i]<<" ";
// }
// std::cout<<"result"<<std::endl;
// for (int i=0;i<4;i++){
// std::cout<<sw1[i]<<" ";
// }
// std::cout<<"result"<<std::endl;
// for (int i=0;i<4;i++){
// std::cout<<sw2[i]<<" ";
// }
return 0;
}
|
/*
This is the function you need to implement. Quick reference:
- input rows: 0 <= y < ny
- input columns: 0 <= x < nx
- element at row y and column x is stored in data[x + y*nx]
- correlation between rows i and row j has to be stored in result[i + j*ny]
- only parts with 0 <= j <= i < ny need to be filled
*/
#include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
#include <vector>
static inline void check(hipError_t err, const char* context) {
if (err != hipSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< hipGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
#define CHECK(x) check(x, #x)
template <class T>
void cuda_memcpy(T* target, const T* source, std::size_t num, hipMemcpyKind direction) {
CHECK(hipMemcpy(target, source, num * sizeof(T), direction));
}
// params:
// data : transposed padding data
__global__ void correlate_gpu(int ny, int nx, const float*data, float *result, int new_ny){
const int nd=16;// nd: nd==blockDim.x==blockDim.y
// compute nd*nd results each thread.
int step=nd*nd;// each block will compute step*step results.
int ia=threadIdx.x;
int ja=threadIdx.y;
int ic=blockIdx.x;
int jc=blockIdx.y;
// int i=threadIdx.x+blockIdx.x*blockDim.x;
// int j=threadIdx.y+blockIdx.y*blockDim.y;
// if(i>=ny || j>=ny) return;
// if (i>j){
// result[i*ny+j]=0;
// return;
// }
float v[nd][nd];
// double temp=0;
for(int ib=0; ib<nd; ib++){
for(int jb=0; jb<nd; jb++){
v[ib][jb]=0;
}
}
for (int k=0; k<nx; ++k){
float x[nd];
float y[nd];
for(int ib=0; ib<nd; ib++){
int i=ic*step+ib*nd+ia;
x[ib]=data[k*new_ny +i];
}
for(int jb=0; jb<nd; jb++){
int j=jc*step+jb*nd+ja;
y[jb]=data[k*new_ny+j];
}
for(int ib=0; ib<nd; ib++){
for(int jb=0; jb<nd; jb++){
v[ib][jb]+=x[ib]*y[jb];
}
}
}
for(int ib=0; ib<nd; ib++){
for(int jb=0; jb<nd; jb++){
int i=ic*step+ib*nd+ia;
int j=jc*step+jb*nd+ja;
if(i<ny&&j<ny&&i<=j){
result[ny*i+j]=v[ib][jb];
}
}
}
// result[i*ny+j]=temp;
}
__global__ void padding_transpose(int ny, int nx, const float*data, float* result, int new_ny){
//result is padding and transpose data
int ja=threadIdx.x;
int i=blockIdx.y;
for (int jb=0; jb<nx; jb+=blockDim.x){
int j=jb+ja;
if (j>=nx) break;
float v=i<ny?data[i*ny+j]:0; //padding
result[new_ny*j+i]=v; //transpose
}
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
static inline int roundup(int a, int b) {
return divup(a, b) * b;
}
void correlate(int ny, int nx, const float *data, float *result) {
// const int nd=16;//compute nd*nd results each thread. could not less than
const int block_size=16; //16*16 threads
const int step=block_size*block_size; // each block will compute step*step results.
int new_ny=roundup(ny,step);
//allocate memory & copy data to GPU
float *dGPU=NULL;
CHECK(hipMalloc((void**)&dGPU,ny*nx*sizeof(float)));
float *padding=NULL;
CHECK(hipMalloc((void**)&padding,new_ny*nx*sizeof(float)));
float *rGPU=NULL;
CHECK(hipMalloc((void**)&rGPU,ny*ny*sizeof(float)));
// float *avg=new float[ny]{0};
// float *normalized=new float[ny*nx]{0};
// float *sqrtSqureSum=new float[ny]{0};
std::vector<float> avg(ny,0);
std::vector<float> normalized(ny*nx,0);
std::vector<float> sqrtSqureSum(ny,0);
std::vector<float> transposed(nx*new_ny,0);
for (int y=0; y<ny; ++y){
double temp=0;
for (int x=0; x<nx; ++x){
temp+=data[y*nx+x];
}
avg[y]=temp/nx;
}
for (int y=0; y<ny; ++y){
for (int x=0; x<nx; ++x){
normalized[y*nx+x]=data[y*nx+x]-avg[y];
}
}
// delete[] avg;
for (int y=0; y<ny; ++y){
for (int x=0; x<nx; ++x){
sqrtSqureSum[y]+=pow(normalized[y*nx+x],2);
}
sqrtSqureSum[y]=sqrt(sqrtSqureSum[y]);
}
for (int y=0; y<ny; ++y){
for (int x=0; x<nx; ++x){
normalized[y*nx+x]/=sqrtSqureSum[y];
}
}
for (int y=0; y<ny; ++y){
for (int x=0; x<nx; ++x){
std::cout << normalized[y*nx+x] << " ";
}
std::cout<< std::endl ;
}
cuda_memcpy(dGPU,normalized.data(),ny*nx,hipMemcpyHostToDevice);
// Run kernel to padding and transpose
{
dim3 dimBlock(64,1);
dim3 dimGrid(1,new_ny);
padding_transpose<<<dimGrid,dimBlock>>>(ny,nx,dGPU,padding,new_ny);
CHECK(hipGetLastError());
}
cuda_memcpy(transposed.data(), padding, new_ny * nx, hipMemcpyDeviceToHost);
std::cout << new_ny<<std::endl;
std::cout << transposed.size()<<std::endl;
for(int x=0;x<nx;x++){
for(int y=0;y<new_ny;y++){
std::cout<< transposed[x*new_ny+y] << " ";
}
std::cout<< std::endl<< "----"<< std::endl;
}
// for (int x=0;x<nx;++x){
// for (int y=0; y<ny; ++y){
// transposed[x*ny+y]=normalized[y*nx+x];
// }
// }
// Run kernel to calculate cp
{
dim3 dimBlock(block_size,block_size);
dim3 dimGrid(new_ny/step,new_ny/step);
correlate_gpu<<<dimGrid,dimBlock>>>(ny,nx,padding,rGPU,new_ny);
CHECK(hipGetLastError());
}
cuda_memcpy(result, rGPU, ny * ny, hipMemcpyDeviceToHost);
// CHECK(cudaMemcpy(result, rGPU, ny * ny * sizeof(float), cudaMemcpyDeviceToHost));
CHECK(hipFree(dGPU));
CHECK(hipFree(padding));
CHECK(hipFree(rGPU));
// delete[] normalized;
}
int main(){
int ny=3;
int nx=2;
// float data[]={+0.81472367, +0.90579194,
// +0.45150527, +0.49610928}; //result: 1 1 0 1
float data[]={
-1.00000000, +1.00000000,
+1.00000000, -1.00000000,
-1.00000000, +1.00000000
};
// float data[]={-1.0, 1.0, 1.0, -1.0, -1.0,1.0, -1.0, -1.0, 1.0, 1.0,
// -1.0, 1.0, 1.0, -1.0, -1.0,1.0, -1.0, -1.0, 1.0, 1.0,};
float result[9];
correlate(ny, nx, data, result);
std::cout<<"result"<<std::endl;
for (int i=0;i<4;i++){
std::cout<<result[i]<<" ";
}
// int nc=20;
// std::vector<std::tuple<int,int,int>> rows((1+nc)*nc/2);
// // #pragma omp parallel for
// for (int ia = 0; ia < nc; ++ia) {
// for (int ja = ia; ja < nc; ++ja) {
// int ija = _pdep_u32(ia, 0x55555555) | _pdep_u32(ja, 0xAAAAAAAA);
// int p= ia*nc-(ia-1)*ia/2;
// std::cout<<p+ja-ia<<std::endl;
// rows[p+(ja-ia)]=std::make_tuple(ija, ia, ja) ;
// }
// }
// std::sort(rows.begin(), rows.end());
// int i=0;
// std::cout<<"result"<<std::endl;
// for(auto row:rows){
// std::cout<< i << ","<<std::get<0>(row) << ":"
// << std::get<1>(row) << ","
// << std::get<2>(row) << std::endl;
// i++;
// }
// double4_t test={0,1,2,3};
// double4_t sw1=swap1(test);
// double4_t sw2=swap2(test);
// for (int i=0;i<4;i++){
// std::cout<<test[i]<<" ";
// }
// std::cout<<"result"<<std::endl;
// for (int i=0;i<4;i++){
// std::cout<<sw1[i]<<" ";
// }
// std::cout<<"result"<<std::endl;
// for (int i=0;i<4;i++){
// std::cout<<sw2[i]<<" ";
// }
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<stdio.h>
#include<cuda.h>
# define M 1000
# define N 1000
__global__ void mult( int * a, int * b, int * c)
{
unsigned int i= blockDim.x *blockIdx.x + threadIdx.x;
unsigned int j= blockDim.y *blockIdx.y + threadIdx.y;
int sum=0;
if(i<M && j<N)
{
for(int k=0;k<N;k++)
{
sum+=(a[i*N+k]* b[k*N+j]);
}
c[i*N+j]=sum;
}
else
return ;
}
int check(int *a, int *b, int *c)
{
for(int i=0;i<M;i++)
{
for(int j=0;j<N;j++)
{
int sum=0;
for(int k=0;k<N;k++)
{
sum+=a[i*N+k]*b[k*N+j];
}
if(c[i*N+j]!=sum)
return 0;
}
}
return 1;
}
int main()
{
int *h_a, *h_b, *h_c;
int *d_a, *d_b, *d_c;
// allocating memory on host
h_a = (int *)malloc(M * N * sizeof(int));
h_b = (int *)malloc(M * N * sizeof(int));
h_c = (int *)malloc(M * N * sizeof(int));
//assigning random values to the array elements
for(int i=0;i<M;i++)
{
for(int j=0;j<N;j++)
{
h_a[i*M+j]=1;
h_b[i*M+j]=2;
}
}
//assigning memory on the device
cudaMalloc((void **)&d_a, M*N*sizeof(int));
cudaMalloc((void **)&d_b, M*N*sizeof(int));
cudaMalloc((void **)&d_c, M*N*sizeof(int));
//copying elements from host to device
cudaMemcpy(d_a, h_a, M*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, M*N*sizeof(int), cudaMemcpyHostToDevice);
//declaring the number of blocks and number of threads per block
dim3 threads(32,32);
dim3 blocks(M/32+1, N/32+1);
//calling the function and calculating the sum on device
mult<<< blocks, threads >>>(d_a, d_b, d_c);
//copying the result to host memory
cudaMemcpy(h_c, d_c, M*N*sizeof(int), cudaMemcpyDeviceToHost);
if(check(h_a, h_b, h_c))
printf("Matrix multiplication is correct\n");
else
printf("Matrix multiplication is incorrect\n");
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
}
|
#include<stdio.h>
#include<hip/hip_runtime.h>
# define M 1000
# define N 1000
__global__ void mult( int * a, int * b, int * c)
{
unsigned int i= blockDim.x *blockIdx.x + threadIdx.x;
unsigned int j= blockDim.y *blockIdx.y + threadIdx.y;
int sum=0;
if(i<M && j<N)
{
for(int k=0;k<N;k++)
{
sum+=(a[i*N+k]* b[k*N+j]);
}
c[i*N+j]=sum;
}
else
return ;
}
int check(int *a, int *b, int *c)
{
for(int i=0;i<M;i++)
{
for(int j=0;j<N;j++)
{
int sum=0;
for(int k=0;k<N;k++)
{
sum+=a[i*N+k]*b[k*N+j];
}
if(c[i*N+j]!=sum)
return 0;
}
}
return 1;
}
int main()
{
int *h_a, *h_b, *h_c;
int *d_a, *d_b, *d_c;
// allocating memory on host
h_a = (int *)malloc(M * N * sizeof(int));
h_b = (int *)malloc(M * N * sizeof(int));
h_c = (int *)malloc(M * N * sizeof(int));
//assigning random values to the array elements
for(int i=0;i<M;i++)
{
for(int j=0;j<N;j++)
{
h_a[i*M+j]=1;
h_b[i*M+j]=2;
}
}
//assigning memory on the device
hipMalloc((void **)&d_a, M*N*sizeof(int));
hipMalloc((void **)&d_b, M*N*sizeof(int));
hipMalloc((void **)&d_c, M*N*sizeof(int));
//copying elements from host to device
hipMemcpy(d_a, h_a, M*N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, M*N*sizeof(int), hipMemcpyHostToDevice);
//declaring the number of blocks and number of threads per block
dim3 threads(32,32);
dim3 blocks(M/32+1, N/32+1);
//calling the function and calculating the sum on device
mult<<< blocks, threads >>>(d_a, d_b, d_c);
//copying the result to host memory
hipMemcpy(h_c, d_c, M*N*sizeof(int), hipMemcpyDeviceToHost);
if(check(h_a, h_b, h_c))
printf("Matrix multiplication is correct\n");
else
printf("Matrix multiplication is incorrect\n");
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(h_a);
free(h_b);
free(h_c);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
/*
#define N 512
#define N 2048
#define THREADS_PER_BLOCK 512
*/
const int THREADS_PER_BLOCK = 32;
const int N = 2048;
__global__ void dotProd( int *a, int *b, int *c ) {
__shared__ int temp[N];
temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads(); // Evita condición de carrera.
if( 0 == threadIdx.x ) {
int sum = 0;
for(int i = 0; i < N; i++ ) {
sum += temp[i]; //lento
}
*c = sum;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
/*
#define N 512
#define N 2048
#define THREADS_PER_BLOCK 512
*/
const int THREADS_PER_BLOCK = 32;
const int N = 2048;
__global__ void dotProd( int *a, int *b, int *c ) {
__shared__ int temp[N];
temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
__syncthreads(); // Evita condición de carrera.
if( 0 == threadIdx.x ) {
int sum = 0;
for(int i = 0; i < N; i++ ) {
sum += temp[i]; //lento
}
*c = sum;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
__global__ void
convolution2D(float *A,float *B,const int numRows,const int numCols)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i<numRows && j<numRows)
{
float pos1=0,pos2=0,pos3=0,pos4=0,pos5=0,pos6=0,pos7=0,pos8=0;
if((i-1)>=0 && (j-1)>=0)pos1 = A[(i-1)*numCols+(j-1)];
if((i-1)>=0)pos2 = A[(i-1)*numCols+(j)];
if((i-1)>=0 && (j+1)<numCols)pos3 = A[(i-1)*numCols+(j+1)];
if((j-1)>=0)pos4 = A[(i)*numCols+(j-1)];
if((j+1)<numCols)pos5 = A[(i)*numCols+(j+1)];
if((i+1)<numRows && (j-1)>0)pos6 = A[(i+1)*numCols+(j-1)];
if((i+1)<numRows)pos7 = A[(i+1)*numCols+(j)];
if((i+1)<numRows && (j+1)<numCols)pos8 = A[(i+1)*numCols+(j+1)];
B[i*numCols + j] = (pos1 + pos2 + pos3 +pos4 + pos5 + pos6 + pos7 + pos8)/8;
}
}
|
#include <hip/hip_runtime.h>
__global__ void
convolution2D(float *A,float *B,const int numRows,const int numCols)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i<numRows && j<numRows)
{
float pos1=0,pos2=0,pos3=0,pos4=0,pos5=0,pos6=0,pos7=0,pos8=0;
if((i-1)>=0 && (j-1)>=0)pos1 = A[(i-1)*numCols+(j-1)];
if((i-1)>=0)pos2 = A[(i-1)*numCols+(j)];
if((i-1)>=0 && (j+1)<numCols)pos3 = A[(i-1)*numCols+(j+1)];
if((j-1)>=0)pos4 = A[(i)*numCols+(j-1)];
if((j+1)<numCols)pos5 = A[(i)*numCols+(j+1)];
if((i+1)<numRows && (j-1)>0)pos6 = A[(i+1)*numCols+(j-1)];
if((i+1)<numRows)pos7 = A[(i+1)*numCols+(j)];
if((i+1)<numRows && (j+1)<numCols)pos8 = A[(i+1)*numCols+(j+1)];
B[i*numCols + j] = (pos1 + pos2 + pos3 +pos4 + pos5 + pos6 + pos7 + pos8)/8;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/* Write GPU code to perform the step(s) involved in counting sort.
Add additional kernels and device functions as needed. */
__global__ void counting_sort_kernel(int *input_array, int *sorted_array, int *histogram, int *scan, int num_elements, int range)
{
extern __shared__ int temp[];
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int blockID = threadIdx.x;
int pout = 0, pin = 1;
int n = range + 1;
temp[blockID] = (blockID > 0) ? histogram[blockID - 1] : 0;
int offset;
for (offset = 1; offset < n; offset *= 2) {
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
temp[pout * n + blockID] = temp[pin * n + blockID];
if (blockID >= offset)
temp[pout * n + blockID] += temp[pin * n + blockID - offset];
}
__syncthreads();
scan[blockID] = temp[pout * n + blockID];
int j;
int start_idx = scan[threadID];
if (histogram[threadID] != 0)
for (j = 0; j < histogram[threadID]; j++)
sorted_array[start_idx + j] = threadID;
return;
}
__global__ void histogram_kernel_fast(int *input_data, int *histogram, int num_elements, int histogram_size)
{
extern __shared__ unsigned int s[];
if(threadIdx.x < histogram_size)
s[threadIdx.x] = 0;
__syncthreads();
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while (offset < num_elements) {
atomicAdd(&s[input_data[offset]], 1);
offset += stride;
}
__syncthreads();
if(threadIdx.x < histogram_size)
atomicAdd(&(histogram[threadIdx.x]), s[threadIdx.x]);
}
|
#include <hip/hip_runtime.h>
/* Write GPU code to perform the step(s) involved in counting sort.
Add additional kernels and device functions as needed. */
__global__ void counting_sort_kernel(int *input_array, int *sorted_array, int *histogram, int *scan, int num_elements, int range)
{
extern __shared__ int temp[];
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
int blockID = threadIdx.x;
int pout = 0, pin = 1;
int n = range + 1;
temp[blockID] = (blockID > 0) ? histogram[blockID - 1] : 0;
int offset;
for (offset = 1; offset < n; offset *= 2) {
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
temp[pout * n + blockID] = temp[pin * n + blockID];
if (blockID >= offset)
temp[pout * n + blockID] += temp[pin * n + blockID - offset];
}
__syncthreads();
scan[blockID] = temp[pout * n + blockID];
int j;
int start_idx = scan[threadID];
if (histogram[threadID] != 0)
for (j = 0; j < histogram[threadID]; j++)
sorted_array[start_idx + j] = threadID;
return;
}
__global__ void histogram_kernel_fast(int *input_data, int *histogram, int num_elements, int histogram_size)
{
extern __shared__ unsigned int s[];
if(threadIdx.x < histogram_size)
s[threadIdx.x] = 0;
__syncthreads();
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while (offset < num_elements) {
atomicAdd(&s[input_data[offset]], 1);
offset += stride;
}
__syncthreads();
if(threadIdx.x < histogram_size)
atomicAdd(&(histogram[threadIdx.x]), s[threadIdx.x]);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void BoxReciprocalGPU(double *gpu_prefact, double *gpu_sumRnew, double *gpu_sumInew, double *gpu_energyRecip, int imageSize)
{
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if(threadID >= imageSize)
return;
gpu_energyRecip[threadID] = ((gpu_sumRnew[threadID] * gpu_sumRnew[threadID] +
gpu_sumInew[threadID] * gpu_sumInew[threadID]) *
gpu_prefact[threadID]);
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void BoxReciprocalGPU(double *gpu_prefact, double *gpu_sumRnew, double *gpu_sumInew, double *gpu_energyRecip, int imageSize)
{
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if(threadID >= imageSize)
return;
gpu_energyRecip[threadID] = ((gpu_sumRnew[threadID] * gpu_sumRnew[threadID] +
gpu_sumInew[threadID] * gpu_sumInew[threadID]) *
gpu_prefact[threadID]);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <device_functions.h>
//#include "device_launch_parameters.h"
#include <cuda_runtime_api.h>
#include <iostream>
#include <cstdlib>
#include <fstream>
#include <string>
#include <cmath>
#include <time.h>
using namespace std;
const int range = 128;
const int workPerThread = 64;//cannot be smaller than the range
int halfRange = range / 2;
float cube[range*range*range];
int weight[range*range*range];
bool ou[range][range][range];
//float p = 0.8;
__global__
void calculate(float *cubecu, int *weightcu, float *pixelcu, float *rotcu,float mx,float my,float mz,int range,int workPerThread)// float *mxcu, float *mycu, float *mzcu, int *rangecu)
{
float pos1[3];
float pos2[3];
int pos3[3];
int i, j, k,l,m;
float tmp1;
int posInCube;
int zdim;
__shared__ float rotcus[9];//,range;
__shared__ float pixelcus[10000];
int xtot = 99;
int ytot = 99;
int total = xtot*ytot;
int pixelstart= (threadIdx.y*blockDim.x + threadIdx.x)*64;
int pixelend=pixelstart+64;
if (pixelend > total)
pixelend = total;
for (i=pixelstart;i<pixelend;i++)//parallel fetch the depth image
pixelcus[i] = pixelcu[i];
if ((threadIdx.x == 0) && (threadIdx.y == 0) && (threadIdx.z == 0))
{
for (i = 0; i < 9; i++)
rotcus[i] = rotcu[i];
//for (i = 0; i <total; i++)
//pixelcus[i] = pixelcu[i];
}
__syncthreads();
i = blockDim.x*blockIdx.x + threadIdx.x;
j = blockDim.y*blockIdx.y + threadIdx.y;
for (zdim = 0; zdim < workPerThread; zdim++)
{
//k = blockDim.z*blockIdx.z + threadIdx.z;
k = workPerThread * blockIdx.z + zdim;
//cubecu[k*range*range + j*range + i] = rotcus[0];
pos1[0] = i*0.2 / (range)-0.1 - mx;
pos1[1] = j*0.2 / (range)-my;
pos1[2] = k*0.2 / (range)-0.1 - mz;
for (l = 0; l < 3; l++)
{
pos2[l] = 0;
for (m = 0; m < 3; m++)
{
pos2[l] = pos2[l] + rotcus[l * 3 + m] * pos1[m];
}
}
pos3[0] = int((pos2[0] + 0.1) / 0.002 + 0.5);
pos3[1] = int(pos2[1] / 0.002 + 0.5);
//if ((blockIdx.x == 0) && (blockIdx.y == 0) && (blockIdx.z == 0) && (threadIdx.x == 0) && (threadIdx.y == 0) && (threadIdx.z == 0))
//cubecu[k*range*range + j*range + i]=rotcus[0];
//cubecu[0] = 2.0;
if (((pos3[0] * ytot + pos3[1]) < xtot*ytot) && ((pos3[0] * ytot + pos3[1]) >= 0))
if (pixelcus[pos3[0] * ytot + pos3[1]] > -0.01)
{
//cubecu[k*range*range + j*range + i] = range;
tmp1 = (pixelcus[pos3[0] * ytot + pos3[1]] - pos2[2]) / 0.001;
if ((tmp1 > 1) || (tmp1 < -1))
tmp1 = 0;
posInCube = i*range*range + j*range + k;
cubecu[posInCube] = (cubecu[posInCube] * weightcu[posInCube] + tmp1) / (weightcu[posInCube] + 1);
weightcu[posInCube] = weightcu[posInCube] + 1;
}
}
}
void pro(string a1, float *cubecu, int *weightcu)//calculate rotation matrix and call the 'calculate' function
{
int xtot, ytot;
int i, j;
float mx, my, mz, rx, ry, rz,an;
float rot[9];
float pixel[100 * 100];
float *pixelcu, *rotcu;
a1 += "data.txt";
ifstream fin1(a1);
fin1 >> mx;
fin1 >> my;
fin1 >> mz;
fin1 >> rx;
fin1 >> ry;
fin1 >> rz;
fin1 >> an;
fin1 >> xtot;
fin1 >> ytot;
for (i = 0; i<xtot; i++)
for (j = 0; j<ytot; j++)
{
fin1 >> pixel[j+i*ytot];
}
fin1.close();
rot[0] = 1 - 2 * ry*ry - 2 * rz*rz;
rot[1] = 2 * rx*ry - 2 * rz*an;
rot[2] = 2 * rx*rz + 2 * ry*an;
rot[3] = 2 * rx*ry + 2 * rz*an;
rot[4] = 1 - 2 * rx*rx - 2 * rz*rz;
rot[5] = 2 * ry*rz - 2 * rx*an;
rot[6] = 2 * rx*rz - 2 * ry*an;
rot[7] = 2 * ry*rz + 2 * rx*an;
rot[8] = 1 - 2 * rx*rx - 2 * ry*ry;
dim3 gb(range/32, range/32, range/workPerThread);
dim3 tb(32, 32, 1);//I set the dimension of x and y as 32 because the max threadNum per block is 1024
cudaMalloc((void **)&pixelcu, xtot*ytot * sizeof(float));
cudaMemcpy(pixelcu, pixel, xtot*ytot*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void **)&rotcu, 9 * sizeof(float));
cudaMemcpy(rotcu, rot, 9 * sizeof(float), cudaMemcpyHostToDevice);
calculate <<<gb, tb >>> (cubecu, weightcu, pixelcu, rotcu, mx, my, mz, range,workPerThread);
//cudaFree(&mxcu); cudaFree(&mycu); cudaFree(&mzcu); cudaFree(&rangecu);
cudaThreadSynchronize();
cudaFree(pixelcu);
cudaFree(rotcu);
}
int main()
{
char tmp;
int totalVertexOut;
int i, j, k;
float *cubecu;
int *weightcu;
clock_t startt = clock();
if (cudaSuccess != cudaMalloc((void **)&cubecu, range*range*range * sizeof(float)))
cout << "error1";
if (cudaSuccess != cudaMemset(cubecu, 0, range*range*range * sizeof(float)))
cout << "error2";
if (cudaSuccess != cudaMalloc((void **)&weightcu, range*range*range * sizeof(int)))
cout << "error3";
if (cudaSuccess != cudaMemset(weightcu, 0, range*range*range * sizeof(int)))
cout << "error4";
//----------------load the pixels and calculate the 3D matrix-----------------------
pro("bun000",cubecu,weightcu);
pro("bun090",cubecu,weightcu);
pro("bun180", cubecu, weightcu);
pro("bun270", cubecu, weightcu);
pro("bun045", cubecu, weightcu);
pro("ear_back", cubecu, weightcu);
pro("top2", cubecu, weightcu);
pro("top3", cubecu, weightcu);
pro("bun315", cubecu, weightcu);
pro("chin", cubecu, weightcu);
//-----------------------------------------------------
cudaMemcpy(cube, cubecu, range*range*range * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(weight, weightcu, range*range*range * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(cubecu);
cudaFree(weightcu);
tmp = char(10);
ofstream fout1("bunoutparallel.ply");
totalVertexOut = 0;
//----------------struct the final output model------------------------
for (i = 0; i<range - 1; i++)
for (j = 0; j<range - 1; j++)
for (k = 0; k<range - 1; k++)
{
ou[i][j][k] = false;
if (weight[i*range*range+j*range+k]>0)
{
if (weight[(i+1)*range*range+j*range+k]>0)
if (cube[i*range*range + j*range + k] * cube[(i + 1)*range*range + j*range + k]<0)
ou[i][j][k] = true;
if (weight[i*range*range+(j+1)*range+k]>0)
if (cube[i *range*range + j*range + k] * cube[i*range*range + (j + 1)*range + k]<0)
ou[i][j][k] = true;
if (weight[i*range*range +j*range+(k+1)]>0)
if (cube[i*range*range + j*range + k] * cube[i*range*range + j*range + (k + 1)]<0)
ou[i][j][k] = true;
if (ou[i][j][k] == true)
totalVertexOut++;
}
else
ou[i][j][k] = false;
}
//-----------------------------------------------------------
clock_t endt = clock();
fout1 << "ply" << tmp << "format ascii 1.0" << tmp;
fout1 << "element vertex " << totalVertexOut << tmp;
fout1 << "property float x" << tmp;
fout1 << "property float y" << tmp;
fout1 << "property float z" << tmp;
fout1 << "end_header" << tmp;
for (i = 0; i<range - 1; i++)
for (j = 0; j<range - 1; j++)
for (k = 0; k<range - 1; k++)
if (ou[i][j][k] == true)
{
fout1 << i - halfRange << ' ' << j - halfRange << ' ' << k - halfRange << tmp;
}
fout1.close();
ofstream fout2("calculateTime");
double time = (double)(endt - startt) / CLOCKS_PER_SEC;
fout2 << "cubesize:" << range << '*' << range << '*' << range << '\n';
fout2 << "time used (without initialize and output part):" << time << '\n';
fout2.close();
return 0;
}
|
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <hip/device_functions.h>
//#include "device_launch_parameters.h"
#include <hip/hip_runtime_api.h>
#include <iostream>
#include <cstdlib>
#include <fstream>
#include <string>
#include <cmath>
#include <time.h>
using namespace std;
const int range = 128;
const int workPerThread = 64;//cannot be smaller than the range
int halfRange = range / 2;
float cube[range*range*range];
int weight[range*range*range];
bool ou[range][range][range];
//float p = 0.8;
__global__
void calculate(float *cubecu, int *weightcu, float *pixelcu, float *rotcu,float mx,float my,float mz,int range,int workPerThread)// float *mxcu, float *mycu, float *mzcu, int *rangecu)
{
float pos1[3];
float pos2[3];
int pos3[3];
int i, j, k,l,m;
float tmp1;
int posInCube;
int zdim;
__shared__ float rotcus[9];//,range;
__shared__ float pixelcus[10000];
int xtot = 99;
int ytot = 99;
int total = xtot*ytot;
int pixelstart= (threadIdx.y*blockDim.x + threadIdx.x)*64;
int pixelend=pixelstart+64;
if (pixelend > total)
pixelend = total;
for (i=pixelstart;i<pixelend;i++)//parallel fetch the depth image
pixelcus[i] = pixelcu[i];
if ((threadIdx.x == 0) && (threadIdx.y == 0) && (threadIdx.z == 0))
{
for (i = 0; i < 9; i++)
rotcus[i] = rotcu[i];
//for (i = 0; i <total; i++)
//pixelcus[i] = pixelcu[i];
}
__syncthreads();
i = blockDim.x*blockIdx.x + threadIdx.x;
j = blockDim.y*blockIdx.y + threadIdx.y;
for (zdim = 0; zdim < workPerThread; zdim++)
{
//k = blockDim.z*blockIdx.z + threadIdx.z;
k = workPerThread * blockIdx.z + zdim;
//cubecu[k*range*range + j*range + i] = rotcus[0];
pos1[0] = i*0.2 / (range)-0.1 - mx;
pos1[1] = j*0.2 / (range)-my;
pos1[2] = k*0.2 / (range)-0.1 - mz;
for (l = 0; l < 3; l++)
{
pos2[l] = 0;
for (m = 0; m < 3; m++)
{
pos2[l] = pos2[l] + rotcus[l * 3 + m] * pos1[m];
}
}
pos3[0] = int((pos2[0] + 0.1) / 0.002 + 0.5);
pos3[1] = int(pos2[1] / 0.002 + 0.5);
//if ((blockIdx.x == 0) && (blockIdx.y == 0) && (blockIdx.z == 0) && (threadIdx.x == 0) && (threadIdx.y == 0) && (threadIdx.z == 0))
//cubecu[k*range*range + j*range + i]=rotcus[0];
//cubecu[0] = 2.0;
if (((pos3[0] * ytot + pos3[1]) < xtot*ytot) && ((pos3[0] * ytot + pos3[1]) >= 0))
if (pixelcus[pos3[0] * ytot + pos3[1]] > -0.01)
{
//cubecu[k*range*range + j*range + i] = range;
tmp1 = (pixelcus[pos3[0] * ytot + pos3[1]] - pos2[2]) / 0.001;
if ((tmp1 > 1) || (tmp1 < -1))
tmp1 = 0;
posInCube = i*range*range + j*range + k;
cubecu[posInCube] = (cubecu[posInCube] * weightcu[posInCube] + tmp1) / (weightcu[posInCube] + 1);
weightcu[posInCube] = weightcu[posInCube] + 1;
}
}
}
void pro(string a1, float *cubecu, int *weightcu)//calculate rotation matrix and call the 'calculate' function
{
int xtot, ytot;
int i, j;
float mx, my, mz, rx, ry, rz,an;
float rot[9];
float pixel[100 * 100];
float *pixelcu, *rotcu;
a1 += "data.txt";
ifstream fin1(a1);
fin1 >> mx;
fin1 >> my;
fin1 >> mz;
fin1 >> rx;
fin1 >> ry;
fin1 >> rz;
fin1 >> an;
fin1 >> xtot;
fin1 >> ytot;
for (i = 0; i<xtot; i++)
for (j = 0; j<ytot; j++)
{
fin1 >> pixel[j+i*ytot];
}
fin1.close();
rot[0] = 1 - 2 * ry*ry - 2 * rz*rz;
rot[1] = 2 * rx*ry - 2 * rz*an;
rot[2] = 2 * rx*rz + 2 * ry*an;
rot[3] = 2 * rx*ry + 2 * rz*an;
rot[4] = 1 - 2 * rx*rx - 2 * rz*rz;
rot[5] = 2 * ry*rz - 2 * rx*an;
rot[6] = 2 * rx*rz - 2 * ry*an;
rot[7] = 2 * ry*rz + 2 * rx*an;
rot[8] = 1 - 2 * rx*rx - 2 * ry*ry;
dim3 gb(range/32, range/32, range/workPerThread);
dim3 tb(32, 32, 1);//I set the dimension of x and y as 32 because the max threadNum per block is 1024
hipMalloc((void **)&pixelcu, xtot*ytot * sizeof(float));
hipMemcpy(pixelcu, pixel, xtot*ytot*sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void **)&rotcu, 9 * sizeof(float));
hipMemcpy(rotcu, rot, 9 * sizeof(float), hipMemcpyHostToDevice);
calculate <<<gb, tb >>> (cubecu, weightcu, pixelcu, rotcu, mx, my, mz, range,workPerThread);
//cudaFree(&mxcu); cudaFree(&mycu); cudaFree(&mzcu); cudaFree(&rangecu);
hipDeviceSynchronize();
hipFree(pixelcu);
hipFree(rotcu);
}
int main()
{
char tmp;
int totalVertexOut;
int i, j, k;
float *cubecu;
int *weightcu;
clock_t startt = clock();
if (hipSuccess != hipMalloc((void **)&cubecu, range*range*range * sizeof(float)))
cout << "error1";
if (hipSuccess != hipMemset(cubecu, 0, range*range*range * sizeof(float)))
cout << "error2";
if (hipSuccess != hipMalloc((void **)&weightcu, range*range*range * sizeof(int)))
cout << "error3";
if (hipSuccess != hipMemset(weightcu, 0, range*range*range * sizeof(int)))
cout << "error4";
//----------------load the pixels and calculate the 3D matrix-----------------------
pro("bun000",cubecu,weightcu);
pro("bun090",cubecu,weightcu);
pro("bun180", cubecu, weightcu);
pro("bun270", cubecu, weightcu);
pro("bun045", cubecu, weightcu);
pro("ear_back", cubecu, weightcu);
pro("top2", cubecu, weightcu);
pro("top3", cubecu, weightcu);
pro("bun315", cubecu, weightcu);
pro("chin", cubecu, weightcu);
//-----------------------------------------------------
hipMemcpy(cube, cubecu, range*range*range * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(weight, weightcu, range*range*range * sizeof(int), hipMemcpyDeviceToHost);
hipFree(cubecu);
hipFree(weightcu);
tmp = char(10);
ofstream fout1("bunoutparallel.ply");
totalVertexOut = 0;
//----------------struct the final output model------------------------
for (i = 0; i<range - 1; i++)
for (j = 0; j<range - 1; j++)
for (k = 0; k<range - 1; k++)
{
ou[i][j][k] = false;
if (weight[i*range*range+j*range+k]>0)
{
if (weight[(i+1)*range*range+j*range+k]>0)
if (cube[i*range*range + j*range + k] * cube[(i + 1)*range*range + j*range + k]<0)
ou[i][j][k] = true;
if (weight[i*range*range+(j+1)*range+k]>0)
if (cube[i *range*range + j*range + k] * cube[i*range*range + (j + 1)*range + k]<0)
ou[i][j][k] = true;
if (weight[i*range*range +j*range+(k+1)]>0)
if (cube[i*range*range + j*range + k] * cube[i*range*range + j*range + (k + 1)]<0)
ou[i][j][k] = true;
if (ou[i][j][k] == true)
totalVertexOut++;
}
else
ou[i][j][k] = false;
}
//-----------------------------------------------------------
clock_t endt = clock();
fout1 << "ply" << tmp << "format ascii 1.0" << tmp;
fout1 << "element vertex " << totalVertexOut << tmp;
fout1 << "property float x" << tmp;
fout1 << "property float y" << tmp;
fout1 << "property float z" << tmp;
fout1 << "end_header" << tmp;
for (i = 0; i<range - 1; i++)
for (j = 0; j<range - 1; j++)
for (k = 0; k<range - 1; k++)
if (ou[i][j][k] == true)
{
fout1 << i - halfRange << ' ' << j - halfRange << ' ' << k - halfRange << tmp;
}
fout1.close();
ofstream fout2("calculateTime");
double time = (double)(endt - startt) / CLOCKS_PER_SEC;
fout2 << "cubesize:" << range << '*' << range << '*' << range << '\n';
fout2 << "time used (without initialize and output part):" << time << '\n';
fout2.close();
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include <math.h>
#include<cuda_profiler_api.h>
//function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
//cudaProfilerStart();
int N = 1<<20; //1M elements
//int N = 100; //100 elements
//Allocate Unified Memory -- accessible from CPU or GPU
float *x, *y, *d_x, *d_y;
x = (float *)malloc(N*sizeof(float));
y = (float *)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
//initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
//Copy
cudaMemcpy(d_x,x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y,y, N*sizeof(float), cudaMemcpyHostToDevice);
// Run kernel on 1M elements on the CPU
add<<<1,1>>>(N, d_x, d_y);
//Wait for GPU to finish before accessing on host
//cudaDeviceSynchronize();
cudaMemcpy(y,d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
//cudaProfilerStop();
return 0;
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
#include<hip/hip_runtime_api.h>
//function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
//cudaProfilerStart();
int N = 1<<20; //1M elements
//int N = 100; //100 elements
//Allocate Unified Memory -- accessible from CPU or GPU
float *x, *y, *d_x, *d_y;
x = (float *)malloc(N*sizeof(float));
y = (float *)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
//initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
//Copy
hipMemcpy(d_x,x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y,y, N*sizeof(float), hipMemcpyHostToDevice);
// Run kernel on 1M elements on the CPU
add<<<1,1>>>(N, d_x, d_y);
//Wait for GPU to finish before accessing on host
//cudaDeviceSynchronize();
hipMemcpy(y,d_y, N*sizeof(float), hipMemcpyDeviceToHost);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
//cudaProfilerStop();
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
//
// Created by Peter Rigole on 2019-04-17.
//
#include "NeuronProperties.cuh"
NeuronProperties::NeuronProperties() : long_time_lambda(0.5),
medium_time_lambda(0.5) {}
// Copy constructor
NeuronProperties::NeuronProperties(const NeuronProperties &neuronProperties) :
long_time_lambda(neuronProperties.long_time_lambda),
medium_time_lambda(neuronProperties.medium_time_lambda) {}
// Destructor
NeuronProperties::~NeuronProperties() {}
__host__ __device__
float NeuronProperties::getLongTimeLambda() const { return long_time_lambda; }
__host__ __device__
float NeuronProperties::getMediumTimeLambda() const { return medium_time_lambda; }
|
//
// Created by Peter Rigole on 2019-04-17.
//
#include <hip/hip_runtime.h>
#include "NeuronProperties.cuh"
NeuronProperties::NeuronProperties() : long_time_lambda(0.5),
medium_time_lambda(0.5) {}
// Copy constructor
NeuronProperties::NeuronProperties(const NeuronProperties &neuronProperties) :
long_time_lambda(neuronProperties.long_time_lambda),
medium_time_lambda(neuronProperties.medium_time_lambda) {}
// Destructor
NeuronProperties::~NeuronProperties() {}
__host__ __device__
float NeuronProperties::getLongTimeLambda() const { return long_time_lambda; }
__host__ __device__
float NeuronProperties::getMediumTimeLambda() const { return medium_time_lambda; }
|
Convert the following CUDA code to AMD GPU code:
cuda
/*
* BackpropagationCUDA.cu
*
* Created on: Jan 30, 2012
* Author: wchan
*/
/**
* This file is needed because nvcc doesn't support C++0x yet... we can merge it back in later when nvcc adds support for the C++11 standard
*/
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
void mult(double* x, double* y, size_t count) {
thrust::device_ptr<double> x_ptr(x);
thrust::device_ptr<double> y_ptr(y);
thrust::transform(x_ptr, x_ptr + count, y_ptr, x_ptr, thrust::multiplies<double>());
}
|
/*
* BackpropagationCUDA.cu
*
* Created on: Jan 30, 2012
* Author: wchan
*/
/**
* This file is needed because nvcc doesn't support C++0x yet... we can merge it back in later when nvcc adds support for the C++11 standard
*/
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/transform.h>
void mult(double* x, double* y, size_t count) {
thrust::device_ptr<double> x_ptr(x);
thrust::device_ptr<double> y_ptr(y);
thrust::transform(x_ptr, x_ptr + count, y_ptr, x_ptr, thrust::multiplies<double>());
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void copy_sort_int( const float *orig, const unsigned int *sort_idx, const unsigned int nitems, float *sorted ) {
for( int i = 0; i < nitems; ++ i ) {
sorted[sort_idx[i]] = orig[i];
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void copy_sort_int( const float *orig, const unsigned int *sort_idx, const unsigned int nitems, float *sorted ) {
for( int i = 0; i < nitems; ++ i ) {
sorted[sort_idx[i]] = orig[i];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void LeftRightBound2D(double *Hs, double *Ztopo, double *K2e, double *K2w, int BC2D, int M, int N) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < M) {
// no-flow BCs
if (BC2D == 0) {
Hs[tid*N] = Hs[tid*N+1];
Hs[(tid+1)*N-1] = Hs[(tid+1)*N-2];
} else { // Critical depth flow BCs
Hs[tid*N] = hcri + Ztopo[tid*N];
Hs[(tid+1)*N-1] = hcri + Ztopo[(tid+1)*N-1];
}
K2w[tid*N] = K2w[tid*N+1];
K2e[(tid+1)*N-1] = K2e[(tid+1)*N-2];
tid += blockDim.x * gridDim.x;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void LeftRightBound2D(double *Hs, double *Ztopo, double *K2e, double *K2w, int BC2D, int M, int N) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < M) {
// no-flow BCs
if (BC2D == 0) {
Hs[tid*N] = Hs[tid*N+1];
Hs[(tid+1)*N-1] = Hs[(tid+1)*N-2];
} else { // Critical depth flow BCs
Hs[tid*N] = hcri + Ztopo[tid*N];
Hs[(tid+1)*N-1] = hcri + Ztopo[(tid+1)*N-1];
}
K2w[tid*N] = K2w[tid*N+1];
K2e[(tid+1)*N-1] = K2e[(tid+1)*N-2];
tid += blockDim.x * gridDim.x;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void imgGray(unsigned char * d_image, unsigned char* d_imagegray, int width, int height){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if ((width > col) && (height > row)){
d_imagegray[row*width+col]=d_image[(row*width+col)*3+2]*0.299+d_image[(row*width+col)*3+1]*0.587+d_image[(row*width+col)*3]*0.114;
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void imgGray(unsigned char * d_image, unsigned char* d_imagegray, int width, int height){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if ((width > col) && (height > row)){
d_imagegray[row*width+col]=d_image[(row*width+col)*3+2]*0.299+d_image[(row*width+col)*3+1]*0.587+d_image[(row*width+col)*3]*0.114;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include "../include/lglist.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
using namespace std;
const int MAX_LENGTH = 170;
__device__ void printList(gpu_linearized_stl::list<float, MAX_LENGTH> &l){
if(l.empty()){
printf("\n===== Empty list =====\n\n");
return;
}
printf("\n===== Showing the list =====\n");
printf("list size = %d \n", l.size());
for(auto i=l.begin(); i!=l.end(); l.increment(i)){
printf("%.2f ", l.at(i));
}
printf("\n\n");
}
__global__ void test(float *output){
gpu_linearized_stl::list<float, MAX_LENGTH> list;
printList(list);
//
for(int i=0;i<5;++i) list.push_back(i*1.7);
printf("for(int i=0;i<5;++i) list.push_back(i*1.7);");
printList(list);
//
for(int i=6;i<10;++i) list.push_front(i*2.5);
printf("for(int i=6;i<11;++i) list.push_front(i*2.5);");
printList(list);
//
for(int i=0;i<2;++i) list.pop_back();
printf("for(int i=0;i<2;++i) list.pop_back();");
printList(list);
//
for(int i=0;i<2;++i) list.pop_front();
printf("for(int i=0;i<2;++i) list.pop_front();");
printList(list);
//
printf("list.at(list.begin())\n");
printf("%.2f\n\n", list.at(list.begin()));
//
printf("list.front()\n");
printf("%.2f\n\n", list.front());
//
auto idx = list.end();
printf("auto idx = list.end(); list.at(list.decrement(idx))\n");
printf("%.2f\n\n", list.at(list.decrement(idx)));
//
printf("list.back()\n");
printf("%.2f\n\n", list.back());
//
list.reverse();
printf("list.reverse();");
printList(list);
//
list.insert(list.begin(), 3.14);
printf("list.insert(list.begin(), 3.14);");
printList(list);
//
list.insert(list.end(), 3.14);
printf("list.insert(list.end(), 3.14);");
printList(list);
//
list.erase(list.begin());
printf("list.erase(list.begin());");
printList(list);
//
printf("list.at(list.begin())\n");
printf("%.2f\n\n", list.at(list.begin()));
//
list.erase(list.end());
printf("list.erase(list.end());");
printList(list);
//
printf("list.back()\n");
printf("%.2f\n\n", list.back());
//
for(auto p=list.begin(); p!=list.end(); list.increment(p)) list.insert(p, 22.22);
printf("for(auto p=list.begin(); p!=list.end(); list.increment(p)) list.insert(p, 22.22);");
printList(list);
//
int p;
while((p=list.find(22.22))!=list.end()) list.erase(p);
printf("while((auto p=list.find(22.22))!=list.end()) list.erase(p);");
printList(list);
gpu_linearized_stl::list<float, MAX_LENGTH> list2;
list2.push_back(list.at(list.begin()));
list.at(list.begin()) = 10086;
printf("Reference checking");
printList(list);
printList(list2);
}
int main(){
def_dvec(float) dev_out(1, 0);
test<<<1, 2>>>(to_ptr(dev_out));
return 0;
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include "../include/lglist.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
using namespace std;
const int MAX_LENGTH = 170;
__device__ void printList(gpu_linearized_stl::list<float, MAX_LENGTH> &l){
if(l.empty()){
printf("\n===== Empty list =====\n\n");
return;
}
printf("\n===== Showing the list =====\n");
printf("list size = %d \n", l.size());
for(auto i=l.begin(); i!=l.end(); l.increment(i)){
printf("%.2f ", l.at(i));
}
printf("\n\n");
}
__global__ void test(float *output){
gpu_linearized_stl::list<float, MAX_LENGTH> list;
printList(list);
//
for(int i=0;i<5;++i) list.push_back(i*1.7);
printf("for(int i=0;i<5;++i) list.push_back(i*1.7);");
printList(list);
//
for(int i=6;i<10;++i) list.push_front(i*2.5);
printf("for(int i=6;i<11;++i) list.push_front(i*2.5);");
printList(list);
//
for(int i=0;i<2;++i) list.pop_back();
printf("for(int i=0;i<2;++i) list.pop_back();");
printList(list);
//
for(int i=0;i<2;++i) list.pop_front();
printf("for(int i=0;i<2;++i) list.pop_front();");
printList(list);
//
printf("list.at(list.begin())\n");
printf("%.2f\n\n", list.at(list.begin()));
//
printf("list.front()\n");
printf("%.2f\n\n", list.front());
//
auto idx = list.end();
printf("auto idx = list.end(); list.at(list.decrement(idx))\n");
printf("%.2f\n\n", list.at(list.decrement(idx)));
//
printf("list.back()\n");
printf("%.2f\n\n", list.back());
//
list.reverse();
printf("list.reverse();");
printList(list);
//
list.insert(list.begin(), 3.14);
printf("list.insert(list.begin(), 3.14);");
printList(list);
//
list.insert(list.end(), 3.14);
printf("list.insert(list.end(), 3.14);");
printList(list);
//
list.erase(list.begin());
printf("list.erase(list.begin());");
printList(list);
//
printf("list.at(list.begin())\n");
printf("%.2f\n\n", list.at(list.begin()));
//
list.erase(list.end());
printf("list.erase(list.end());");
printList(list);
//
printf("list.back()\n");
printf("%.2f\n\n", list.back());
//
for(auto p=list.begin(); p!=list.end(); list.increment(p)) list.insert(p, 22.22);
printf("for(auto p=list.begin(); p!=list.end(); list.increment(p)) list.insert(p, 22.22);");
printList(list);
//
int p;
while((p=list.find(22.22))!=list.end()) list.erase(p);
printf("while((auto p=list.find(22.22))!=list.end()) list.erase(p);");
printList(list);
gpu_linearized_stl::list<float, MAX_LENGTH> list2;
list2.push_back(list.at(list.begin()));
list.at(list.begin()) = 10086;
printf("Reference checking");
printList(list);
printList(list2);
}
int main(){
def_dvec(float) dev_out(1, 0);
test<<<1, 2>>>(to_ptr(dev_out));
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
const int kThreadsPerBlock = 256;
int nsteps, // Number of time steps
tpoints; // Total points along string
float values[MAXPOINTS + 2]; // Values at time t
void check_param(void) {
char tchar[20];
// check number of points, number of iterations
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: ", MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
// Device function to calculate new values using wave equation
__device__ float do_math(float value, float oldval) {
float dtime = 0.3;
float c = 1.0;
float dx = 1.0;
float tau = c * dtime / dx;
float sqtau = tau * tau;
return (2.0 * value - oldval + sqtau * (-2.0) * value);
}
// Kernel for computing value of a point at specific time with speific time step
__global__ void init_and_update(float *dValues, int tpoints, int nsteps) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
float fac, x;
float value, oldval, newval;
if (index >= 1 && index <= tpoints) {
// Initialization
fac = 2.0 * PI; // Double store in float
x = (index - 1.0) / (tpoints - 1.0); // Double store in float
value = sin((double)(fac * x)); // Force sin calculation in double
oldval = value;
// Update
for (int i = 1; i <= nsteps; i++) {
newval = (index == 1 || index == tpoints)? 0:do_math(value, oldval);
oldval = value;
value = newval;
}
dValues[index] = value;
}
}
void printfinal() {
for (int i = 1; i <= tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 0)
printf("\n");
}
}
int main(int argc, char *argv[]) {
float *dValues; // Values in device
int size = (MAXPOINTS + 2) * sizeof(float); // Size of memory to store values
int numOfBlocks; // Number of blocks used to call kernel
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
cudaMalloc(&dValues, size); // Allocate memory in device
numOfBlocks = (tpoints - 1) / kThreadsPerBlock + 1; // Compute and ceil number of block
printf("Initializing points on the line...\n");
printf("Updating all points for all time steps...\n");
init_and_update<<<numOfBlocks, kThreadsPerBlock>>>(dValues, tpoints, nsteps);
cudaMemcpy(values, dValues, size, cudaMemcpyDeviceToHost); // Copy result back to main memory
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
cudaFree(dValues); // Free memory in device
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
const int kThreadsPerBlock = 256;
int nsteps, // Number of time steps
tpoints; // Total points along string
float values[MAXPOINTS + 2]; // Values at time t
void check_param(void) {
char tchar[20];
// check number of points, number of iterations
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: ", MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
// Device function to calculate new values using wave equation
__device__ float do_math(float value, float oldval) {
float dtime = 0.3;
float c = 1.0;
float dx = 1.0;
float tau = c * dtime / dx;
float sqtau = tau * tau;
return (2.0 * value - oldval + sqtau * (-2.0) * value);
}
// Kernel for computing value of a point at specific time with speific time step
__global__ void init_and_update(float *dValues, int tpoints, int nsteps) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
float fac, x;
float value, oldval, newval;
if (index >= 1 && index <= tpoints) {
// Initialization
fac = 2.0 * PI; // Double store in float
x = (index - 1.0) / (tpoints - 1.0); // Double store in float
value = sin((double)(fac * x)); // Force sin calculation in double
oldval = value;
// Update
for (int i = 1; i <= nsteps; i++) {
newval = (index == 1 || index == tpoints)? 0:do_math(value, oldval);
oldval = value;
value = newval;
}
dValues[index] = value;
}
}
void printfinal() {
for (int i = 1; i <= tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 0)
printf("\n");
}
}
int main(int argc, char *argv[]) {
float *dValues; // Values in device
int size = (MAXPOINTS + 2) * sizeof(float); // Size of memory to store values
int numOfBlocks; // Number of blocks used to call kernel
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
hipMalloc(&dValues, size); // Allocate memory in device
numOfBlocks = (tpoints - 1) / kThreadsPerBlock + 1; // Compute and ceil number of block
printf("Initializing points on the line...\n");
printf("Updating all points for all time steps...\n");
init_and_update<<<numOfBlocks, kThreadsPerBlock>>>(dValues, tpoints, nsteps);
hipMemcpy(values, dValues, size, hipMemcpyDeviceToHost); // Copy result back to main memory
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
hipFree(dValues); // Free memory in device
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void bcnn_backward_upsample_cuda_kernel(size_t dst_sz, float *src, int w, int h, int c, int n, int size, float *dst) {
size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i >= dst_sz) {
return;
}
int dst_idx = i;
int dst_w = i % (w * size);
i = i / (w * size);
int dst_h = i % (h * size);
i = i / (h * size);
int dst_c = i % c;
i = i / c;
int b = i % n;
int in_w = dst_w / size;
int in_h = dst_h / size;
int in_c = dst_c;
int src_idx = b * w * h * c + in_c * w * h + in_h * w + in_w;
src[src_idx] += dst[dst_idx];
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void bcnn_backward_upsample_cuda_kernel(size_t dst_sz, float *src, int w, int h, int c, int n, int size, float *dst) {
size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i >= dst_sz) {
return;
}
int dst_idx = i;
int dst_w = i % (w * size);
i = i / (w * size);
int dst_h = i % (h * size);
i = i / (h * size);
int dst_c = i % c;
i = i / c;
int b = i % n;
int in_w = dst_w / size;
int in_h = dst_h / size;
int in_c = dst_c;
int src_idx = b * w * h * c + in_c * w * h + in_h * w + in_w;
src[src_idx] += dst[dst_idx];
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/*
* reduce an array of 1's by the sum
*/
#include <stdio.h>
#include <stdlib.h>
void nonCudaReduce(float* out, float *in, int size);
void startClock(char*);
void stopClock(char*);
void printClock(char*);
int main(int argc, char** argv) {
if (argc < 2) {
printf("Usage: %s #-of-floats\n",argv[0]);
exit(1);
}
int size = atoi(argv[1]);
printf("size = %d\n",size);
float *h_in;
float h_out;
h_in = (float*) malloc(size*sizeof(float));
for (int i = 0; i < size; i++) {
h_in[i] = 1;
}
startClock("compute");
nonCudaReduce(&h_out,h_in,size);
stopClock("compute");
printf("The sum is %f\n",h_out);
free(h_in);
printClock("compute");
}
void nonCudaReduce(float* out, float* in, int size) {
*out = 0.0;
for (int i = 0; i < size; i++) {
*out += in[i];
}
}
|
/*
* reduce an array of 1's by the sum
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
void nonCudaReduce(float* out, float *in, int size);
void startClock(char*);
void stopClock(char*);
void printClock(char*);
int main(int argc, char** argv) {
if (argc < 2) {
printf("Usage: %s #-of-floats\n",argv[0]);
exit(1);
}
int size = atoi(argv[1]);
printf("size = %d\n",size);
float *h_in;
float h_out;
h_in = (float*) malloc(size*sizeof(float));
for (int i = 0; i < size; i++) {
h_in[i] = 1;
}
startClock("compute");
nonCudaReduce(&h_out,h_in,size);
stopClock("compute");
printf("The sum is %f\n",h_out);
free(h_in);
printClock("compute");
}
void nonCudaReduce(float* out, float* in, int size) {
*out = 0.0;
for (int i = 0; i < size; i++) {
*out += in[i];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void repack_input_kernel(float *input, float *re_packed_input, int w, int h, int c)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
const int items_per_channel = w * h;
int c_pack = index % 32;
int chan_index = index / 32;
int chan = (chan_index * 32) % c;
int i = (chan_index * 32) / c;
//for (chan = 0; chan < c; chan += 32)
{
//for (i = 0; i < items_per_channel; ++i)
if(i < items_per_channel)
{
//for (c_pack = 0; c_pack < 32; ++c_pack)
{
float src = input[(chan + c_pack)*items_per_channel + i];
re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src;
}
}
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void repack_input_kernel(float *input, float *re_packed_input, int w, int h, int c)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
const int items_per_channel = w * h;
int c_pack = index % 32;
int chan_index = index / 32;
int chan = (chan_index * 32) % c;
int i = (chan_index * 32) / c;
//for (chan = 0; chan < c; chan += 32)
{
//for (i = 0; i < items_per_channel; ++i)
if(i < items_per_channel)
{
//for (c_pack = 0; c_pack < 32; ++c_pack)
{
float src = input[(chan + c_pack)*items_per_channel + i];
re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src;
}
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<stdio.h>
void cpu()
{
printf("cpu\n");
}
__global__ void gpu()
{
printf("gpu\n");
}
int main()
{
cpu();
gpu<<<1,1>>>();
cudaDeviceSynchronize();
}
|
#include <hip/hip_runtime.h>
#include<stdio.h>
void cpu()
{
printf("cpu\n");
}
__global__ void gpu()
{
printf("gpu\n");
}
int main()
{
cpu();
gpu<<<1,1>>>();
hipDeviceSynchronize();
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void reduceSmemDyn(int *g_idata, int *g_odata, unsigned int n)
{
extern __shared__ int smem[];
// set thread ID
unsigned int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
// set to smem by each threads
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void reduceSmemDyn(int *g_idata, int *g_odata, unsigned int n)
{
extern __shared__ int smem[];
// set thread ID
unsigned int tid = threadIdx.x;
int *idata = g_idata + blockIdx.x * blockDim.x;
// set to smem by each threads
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<stdio.h>
#include<iostream>
#include<fstream>
#include<cuda.h>
#include<time.h>
#include<sys/time.h>
#define IMAGE_LENGTH 2000
#define KERNEL_LENGTH 5
#define MAX_NUMBER 12
#define NumOfBlocks IMAGE_LENGTH/16
#define NumOfThreads 16
using namespace std;
ofstream fs("convolucion.txt");
void print_Matrix(int** matrix, int n, int m){
for(int i = 0; i < n; i++){
for(int j = 0; j < m; j++){
cout<<" "<<matrix[i][j];
}
cout<<endl;
}
}
int** create_Matrix(int n, int m){
int **matrix;
matrix = (int **)malloc(n*sizeof(int *));
for(int i=0; i < n; i++) {
matrix[i] = (int *)malloc(m*sizeof(int));
}
return matrix;
}
int** gpu_create_Matrix(int n, int m){
int **matrix;
cudaMalloc((void***)&matrix, n*sizeof(int*));
for(int i=0; i < n; i++) {
cudaMalloc((void**) &(matrix[i]), m*sizeof(int));
}
return matrix;
}
int** gpu_copy_Matrix(int **host, int n, int m){
int** device = (int **)malloc(n * sizeof(int *));
int **aux = (int **)malloc(n * sizeof(int *));
cudaMalloc((void***)&device, n*sizeof(int*));
for(int i=0; i<n; i++) {
cudaMalloc((void**) &(aux[i]), m*sizeof(int));
cudaMemcpy (aux[i], host[i], m*sizeof(float), cudaMemcpyHostToDevice);
}
cudaMemcpy(device, aux, n*sizeof(int *), cudaMemcpyHostToDevice);
return device;
}
int** cpu_copy_Matrix(int **device, int n, int m){
int** host = (int **)malloc(n * sizeof(int *));
int **aux = (int **)malloc(n * sizeof(int *));
cudaMalloc((void***)&host, n*sizeof(int*));
for(int i=0; i<n; i++) {
cudaMalloc((void**) &(aux[i]), m*sizeof(int));
cudaMemcpy (aux[i], device[i], m*sizeof(float), cudaMemcpyDeviceToHost);
}
cudaMemcpy(host, aux, n*sizeof(int *), cudaMemcpyDeviceToHost);
return host;
}
int** copy_Matrix(int **orig, int n, int m){
int** cpy = create_Matrix(n,m);
for(int i=0; i<n; i++) {
for(int j=0; i<m; i++){
cpy[i][j] = orig[i][j];
}
}
return cpy;
}
void generate_Kernel_Matrix(int** a, int n, int m){
srand( (unsigned)time( NULL ) );
for(int i=0; i<n;i++){
for(int j=0;j<m;j++){
//a[i][j] = (rand()%MAX_NUMBER)+1;;
a[i][j] = 1;
}
}
}
void generate_Image_Matrix(int** a, int n, int m, int index){
srand( (unsigned)time( NULL ) );
for(int i=0;i<index;i++){
for(int j=0;j<m;j++){
a[i][j] = 0;
}
}
for(int i=0;i<n-index;i++){
for(int j=0;j<index;j++){
a[i][j] = 0;
}
}
for(int i=n-index;i<n;i++){
for(int j=0;j<m;j++){
a[i][j] = 0;
}
}
for(int i=0;i<n;i++){
for(int j=m-index;j<m;j++){
a[i][j] = 0;
}
}
for(int i=index; i<n-index;i++){
for(int j=index;j<m-index;j++){
a[i][j] = (rand()%MAX_NUMBER)+1;
//a[i][j] = j;
}
}
}
void init_Matrix(int** a,int n, int m){
for(int i=0; i<n;i++){
for(int j=0;j<m;j++){
a[i][j]=0;
}
}
}
bool compare_Matrix(int** A, int** B, int n, int m){
bool same = true;
for(int i=0; i<n;i++){
for(int j=0;j<m;j++){
if(A[i][j]!=B[i][j]){
same = false;
break;
}
}
}
return same;
}
int sumTermsMatrix(int** a, int n, int m){
int suma = 0;
for(int i=0; i<n;i++){
for(int j=0;j<m;j++){
suma+=a[i][j];
}
}
return suma;
}
void convolucion(int** kernel, int** image, int** result, int KERNELCOUNT){
int i, j, n, m;
int acumulador=0;
for (i=0; i < IMAGE_LENGTH; i++){
for (j=0; j < IMAGE_LENGTH; j++){
for (n = 0; n < KERNEL_LENGTH; n++){
for (m = 0; m < KERNEL_LENGTH; m++){
acumulador += image[i + n][j + m] * kernel[n][m];
}
}
result[i][j] = acumulador/KERNELCOUNT;
acumulador = 0;
}
}
}
__global__ void convolucionCUDA(int** kernel, int** image, int** result, int KERNELCOUNT){
int i, j, n, m;
int acumulador=0;
for (i=0; i < IMAGE_LENGTH; i++){
for (j=0; j < IMAGE_LENGTH; j++){
for (n = 0; n < KERNEL_LENGTH; n++){
for (m = 0; m < KERNEL_LENGTH; m++){
acumulador += image[i + n][j + m] * kernel[n][m];
}
}
result[i][j] = acumulador/KERNELCOUNT;
acumulador = 0;
}
}
}
__global__ void convKernel(int *inData, int *filter, int dataCol, int dataRow, int filRowRad, int filColRad,
int *outData)
{
__shared__ int padRect[2*1024];
int i, col, row, sum = 0;
int globalCol = threadIdx.x + blockIdx.x * blockDim.x;
int globalRow = threadIdx.y + blockIdx.y * blockDim.y;
int globalIdx = globalCol * dataRow + globalRow;
int localIdx = threadIdx.x * blockDim.y + threadIdx.y;
int localCells = blockDim.x * blockDim.y;
int padRectCol = threadIdx.x + filColRad;
int padRectRow = threadIdx.y + filRowRad;
int padRectOffset = 2*filRowRad + blockDim.y;
int padRectCells = padRectOffset * (blockDim.x + 2*filColRad);
int *padRectOut = (int*)&padRect[((padRectCells-1)/32 + 1) * 32]; //Padding up with 32
padRectOut[localIdx] = 0;
int filOffset = filRowRad*2 + 1;
int filCells = filOffset * (filColRad*2 + 1);
int *localFilter = (int *)&padRectOut[((localCells-1)/32 + 1) * 32]; //Padding up with 32
// Copying the filter elements to shared memory
for(i = 0; i < (filCells/localCells) + 1; i++) {
int index = i*localCells + localIdx;
if(index < filCells) {
localFilter[index] = filter[index];
}
}
// Copying the Data elements to padded shared memory
for(i = 0; i < (padRectCells/localCells) + 1; i++) {
int index = i*localCells + localIdx;
if(index < padRectCells) {
int prCol = index / padRectOffset;
int prRow = index % padRectOffset;
int glCol = prCol + blockIdx.x*blockDim.x - filColRad;
int glRow = prRow + blockIdx.y*blockDim.y - filRowRad;
int glIdx = glCol * dataRow + glRow;
if(glRow >= 0 && glRow < dataRow && glCol >= 0 && glCol < dataCol)
padRect[index] = inData[glIdx];
else
padRect[index] = 0;
}
}
__syncthreads();
//Taking Convolution
for(col = -filColRad; col <= filColRad; col++) {
for(row = -filRowRad; row <= filRowRad; row++) {
int filCol = filColRad - col;
int filRow = filRowRad - row;
int filIdx = filCol*filOffset + filRow;
int filVal = localFilter[filIdx];
int prCol = padRectCol + col;
int prRow = padRectRow + row;
int prIdx = prCol*padRectOffset + prRow;
sum += filVal * padRect[prIdx];
}
}
padRectOut[localIdx] = sum;
__syncthreads();
outData[globalIdx] = padRectOut[localIdx];
}
int main(){
float parallelTime, serialTime;
cudaEvent_t tStart, tStop;
cudaEventCreate(&tStart, 0);
cudaEventCreate(&tStop, 0);
int realKernelLength=0, index = 0;
if(KERNEL_LENGTH%2!=0){
realKernelLength = KERNEL_LENGTH - 1;
index = KERNEL_LENGTH/2;
} else{
cout << "Matrix Convolution siempre debe ser 2*N+1"<<endl;
exit(0);
}
int** kernel = create_Matrix(KERNEL_LENGTH, KERNEL_LENGTH);
int** image = create_Matrix(IMAGE_LENGTH+realKernelLength, IMAGE_LENGTH+realKernelLength);
int** result = create_Matrix(IMAGE_LENGTH, IMAGE_LENGTH);
int** resultCUDA = create_Matrix(IMAGE_LENGTH, IMAGE_LENGTH);
generate_Kernel_Matrix(kernel, KERNEL_LENGTH,KERNEL_LENGTH);
generate_Image_Matrix(image, IMAGE_LENGTH+realKernelLength, IMAGE_LENGTH+realKernelLength, index);
init_Matrix(result, IMAGE_LENGTH, IMAGE_LENGTH);
int KERNELCOUNT = sumTermsMatrix(kernel,KERNEL_LENGTH,KERNEL_LENGTH);
cudaEventRecord(tStart, 0);
convolucion(kernel, image, result, KERNELCOUNT);
cudaEventRecord(tStop, 0);
cudaEventSynchronize(tStop);
cudaEventElapsedTime(&serialTime, tStart, tStop);
cudaEventDestroy(tStart);
cudaEventDestroy(tStop);
if(IMAGE_LENGTH<10){
cout << "Image: "<<endl;
print_Matrix(image, IMAGE_LENGTH+realKernelLength, IMAGE_LENGTH+realKernelLength);
cout << endl << endl;
cout << "Kernel: "<<endl;
print_Matrix(kernel, KERNEL_LENGTH, KERNEL_LENGTH);
cout << endl << endl;
cout << "Result: "<<endl;
print_Matrix(result, IMAGE_LENGTH, IMAGE_LENGTH);
}
cout << "El tiempo en realizar la convolución en tiempo secuencial es: " << serialTime/1000 << endl;
// int ns[] = {1, 2, 4, 8};
fs << "ImgLenght KerLenght numthd serialTime parallelTime eff speedUp"<< endl;
// ----
int** gpu_kernel = copy_Matrix(kernel, KERNEL_LENGTH, KERNEL_LENGTH);
int** gpu_image = copy_Matrix(image, IMAGE_LENGTH+realKernelLength, IMAGE_LENGTH+realKernelLength);
init_Matrix(resultCUDA, IMAGE_LENGTH, IMAGE_LENGTH);
int** gpu_result = copy_Matrix(resultCUDA, IMAGE_LENGTH, IMAGE_LENGTH);
cudaEventCreate(&tStart, 0);
cudaEventCreate(&tStop, 0);
cudaEventRecord(tStart, 0);
dim3 num_threads(NumOfThreads, 16, 1);
dim3 numOfBlocks(NumOfBlocks, 1, 1);
//convolucionCUDA<<<numOfBlocks, num_threads>>>(gpu_image, gpu_kernel, gpu_result, KERNELCOUNT);
cudaEventRecord(tStop, 0);
cudaEventSynchronize(tStop);
cudaEventElapsedTime(¶llelTime, tStart, tStop);
cudaEventDestroy(tStart);
cudaEventDestroy(tStop);
//resultCUDA = cpu_copy_Matrix(gpu_result, IMAGE_LENGTH, IMAGE_LENGTH);
cout << "Son iguales: "<< compare_Matrix(result, resultCUDA, IMAGE_LENGTH, IMAGE_LENGTH) << endl;
cout << "El tiempo en realizar la convolución en tiempo Paralelo con " << NumOfBlocks << " bloques y " << NumOfThreads <<" hilos es: " << parallelTime << endl;
cout << "Speed UP: "<< serialTime/(parallelTime) << endl;
cout << "Eficiencia: "<< serialTime/(parallelTime*NumOfThreads) << endl;
fs << IMAGE_LENGTH << " " << KERNEL_LENGTH << " " << NumOfThreads <<" " << serialTime <<" " << parallelTime << " " << serialTime/(parallelTime*NumOfThreads)<<" "<< serialTime/(parallelTime) << endl;
cudaFree(gpu_image);
cudaFree(gpu_result);
cudaFree(gpu_kernel);
free(image);
free(result);
free(kernel);
return 0;
}
|
#include<stdio.h>
#include<iostream>
#include<fstream>
#include<hip/hip_runtime.h>
#include<time.h>
#include<sys/time.h>
#define IMAGE_LENGTH 2000
#define KERNEL_LENGTH 5
#define MAX_NUMBER 12
#define NumOfBlocks IMAGE_LENGTH/16
#define NumOfThreads 16
using namespace std;
ofstream fs("convolucion.txt");
void print_Matrix(int** matrix, int n, int m){
for(int i = 0; i < n; i++){
for(int j = 0; j < m; j++){
cout<<" "<<matrix[i][j];
}
cout<<endl;
}
}
int** create_Matrix(int n, int m){
int **matrix;
matrix = (int **)malloc(n*sizeof(int *));
for(int i=0; i < n; i++) {
matrix[i] = (int *)malloc(m*sizeof(int));
}
return matrix;
}
int** gpu_create_Matrix(int n, int m){
int **matrix;
hipMalloc((void***)&matrix, n*sizeof(int*));
for(int i=0; i < n; i++) {
hipMalloc((void**) &(matrix[i]), m*sizeof(int));
}
return matrix;
}
int** gpu_copy_Matrix(int **host, int n, int m){
int** device = (int **)malloc(n * sizeof(int *));
int **aux = (int **)malloc(n * sizeof(int *));
hipMalloc((void***)&device, n*sizeof(int*));
for(int i=0; i<n; i++) {
hipMalloc((void**) &(aux[i]), m*sizeof(int));
hipMemcpy (aux[i], host[i], m*sizeof(float), hipMemcpyHostToDevice);
}
hipMemcpy(device, aux, n*sizeof(int *), hipMemcpyHostToDevice);
return device;
}
int** cpu_copy_Matrix(int **device, int n, int m){
int** host = (int **)malloc(n * sizeof(int *));
int **aux = (int **)malloc(n * sizeof(int *));
hipMalloc((void***)&host, n*sizeof(int*));
for(int i=0; i<n; i++) {
hipMalloc((void**) &(aux[i]), m*sizeof(int));
hipMemcpy (aux[i], device[i], m*sizeof(float), hipMemcpyDeviceToHost);
}
hipMemcpy(host, aux, n*sizeof(int *), hipMemcpyDeviceToHost);
return host;
}
int** copy_Matrix(int **orig, int n, int m){
int** cpy = create_Matrix(n,m);
for(int i=0; i<n; i++) {
for(int j=0; i<m; i++){
cpy[i][j] = orig[i][j];
}
}
return cpy;
}
void generate_Kernel_Matrix(int** a, int n, int m){
srand( (unsigned)time( NULL ) );
for(int i=0; i<n;i++){
for(int j=0;j<m;j++){
//a[i][j] = (rand()%MAX_NUMBER)+1;;
a[i][j] = 1;
}
}
}
void generate_Image_Matrix(int** a, int n, int m, int index){
srand( (unsigned)time( NULL ) );
for(int i=0;i<index;i++){
for(int j=0;j<m;j++){
a[i][j] = 0;
}
}
for(int i=0;i<n-index;i++){
for(int j=0;j<index;j++){
a[i][j] = 0;
}
}
for(int i=n-index;i<n;i++){
for(int j=0;j<m;j++){
a[i][j] = 0;
}
}
for(int i=0;i<n;i++){
for(int j=m-index;j<m;j++){
a[i][j] = 0;
}
}
for(int i=index; i<n-index;i++){
for(int j=index;j<m-index;j++){
a[i][j] = (rand()%MAX_NUMBER)+1;
//a[i][j] = j;
}
}
}
void init_Matrix(int** a,int n, int m){
for(int i=0; i<n;i++){
for(int j=0;j<m;j++){
a[i][j]=0;
}
}
}
bool compare_Matrix(int** A, int** B, int n, int m){
bool same = true;
for(int i=0; i<n;i++){
for(int j=0;j<m;j++){
if(A[i][j]!=B[i][j]){
same = false;
break;
}
}
}
return same;
}
int sumTermsMatrix(int** a, int n, int m){
int suma = 0;
for(int i=0; i<n;i++){
for(int j=0;j<m;j++){
suma+=a[i][j];
}
}
return suma;
}
void convolucion(int** kernel, int** image, int** result, int KERNELCOUNT){
int i, j, n, m;
int acumulador=0;
for (i=0; i < IMAGE_LENGTH; i++){
for (j=0; j < IMAGE_LENGTH; j++){
for (n = 0; n < KERNEL_LENGTH; n++){
for (m = 0; m < KERNEL_LENGTH; m++){
acumulador += image[i + n][j + m] * kernel[n][m];
}
}
result[i][j] = acumulador/KERNELCOUNT;
acumulador = 0;
}
}
}
__global__ void convolucionCUDA(int** kernel, int** image, int** result, int KERNELCOUNT){
int i, j, n, m;
int acumulador=0;
for (i=0; i < IMAGE_LENGTH; i++){
for (j=0; j < IMAGE_LENGTH; j++){
for (n = 0; n < KERNEL_LENGTH; n++){
for (m = 0; m < KERNEL_LENGTH; m++){
acumulador += image[i + n][j + m] * kernel[n][m];
}
}
result[i][j] = acumulador/KERNELCOUNT;
acumulador = 0;
}
}
}
__global__ void convKernel(int *inData, int *filter, int dataCol, int dataRow, int filRowRad, int filColRad,
int *outData)
{
__shared__ int padRect[2*1024];
int i, col, row, sum = 0;
int globalCol = threadIdx.x + blockIdx.x * blockDim.x;
int globalRow = threadIdx.y + blockIdx.y * blockDim.y;
int globalIdx = globalCol * dataRow + globalRow;
int localIdx = threadIdx.x * blockDim.y + threadIdx.y;
int localCells = blockDim.x * blockDim.y;
int padRectCol = threadIdx.x + filColRad;
int padRectRow = threadIdx.y + filRowRad;
int padRectOffset = 2*filRowRad + blockDim.y;
int padRectCells = padRectOffset * (blockDim.x + 2*filColRad);
int *padRectOut = (int*)&padRect[((padRectCells-1)/32 + 1) * 32]; //Padding up with 32
padRectOut[localIdx] = 0;
int filOffset = filRowRad*2 + 1;
int filCells = filOffset * (filColRad*2 + 1);
int *localFilter = (int *)&padRectOut[((localCells-1)/32 + 1) * 32]; //Padding up with 32
// Copying the filter elements to shared memory
for(i = 0; i < (filCells/localCells) + 1; i++) {
int index = i*localCells + localIdx;
if(index < filCells) {
localFilter[index] = filter[index];
}
}
// Copying the Data elements to padded shared memory
for(i = 0; i < (padRectCells/localCells) + 1; i++) {
int index = i*localCells + localIdx;
if(index < padRectCells) {
int prCol = index / padRectOffset;
int prRow = index % padRectOffset;
int glCol = prCol + blockIdx.x*blockDim.x - filColRad;
int glRow = prRow + blockIdx.y*blockDim.y - filRowRad;
int glIdx = glCol * dataRow + glRow;
if(glRow >= 0 && glRow < dataRow && glCol >= 0 && glCol < dataCol)
padRect[index] = inData[glIdx];
else
padRect[index] = 0;
}
}
__syncthreads();
//Taking Convolution
for(col = -filColRad; col <= filColRad; col++) {
for(row = -filRowRad; row <= filRowRad; row++) {
int filCol = filColRad - col;
int filRow = filRowRad - row;
int filIdx = filCol*filOffset + filRow;
int filVal = localFilter[filIdx];
int prCol = padRectCol + col;
int prRow = padRectRow + row;
int prIdx = prCol*padRectOffset + prRow;
sum += filVal * padRect[prIdx];
}
}
padRectOut[localIdx] = sum;
__syncthreads();
outData[globalIdx] = padRectOut[localIdx];
}
int main(){
float parallelTime, serialTime;
hipEvent_t tStart, tStop;
hipEventCreateWithFlags(&tStart, 0);
hipEventCreateWithFlags(&tStop, 0);
int realKernelLength=0, index = 0;
if(KERNEL_LENGTH%2!=0){
realKernelLength = KERNEL_LENGTH - 1;
index = KERNEL_LENGTH/2;
} else{
cout << "Matrix Convolution siempre debe ser 2*N+1"<<endl;
exit(0);
}
int** kernel = create_Matrix(KERNEL_LENGTH, KERNEL_LENGTH);
int** image = create_Matrix(IMAGE_LENGTH+realKernelLength, IMAGE_LENGTH+realKernelLength);
int** result = create_Matrix(IMAGE_LENGTH, IMAGE_LENGTH);
int** resultCUDA = create_Matrix(IMAGE_LENGTH, IMAGE_LENGTH);
generate_Kernel_Matrix(kernel, KERNEL_LENGTH,KERNEL_LENGTH);
generate_Image_Matrix(image, IMAGE_LENGTH+realKernelLength, IMAGE_LENGTH+realKernelLength, index);
init_Matrix(result, IMAGE_LENGTH, IMAGE_LENGTH);
int KERNELCOUNT = sumTermsMatrix(kernel,KERNEL_LENGTH,KERNEL_LENGTH);
hipEventRecord(tStart, 0);
convolucion(kernel, image, result, KERNELCOUNT);
hipEventRecord(tStop, 0);
hipEventSynchronize(tStop);
hipEventElapsedTime(&serialTime, tStart, tStop);
hipEventDestroy(tStart);
hipEventDestroy(tStop);
if(IMAGE_LENGTH<10){
cout << "Image: "<<endl;
print_Matrix(image, IMAGE_LENGTH+realKernelLength, IMAGE_LENGTH+realKernelLength);
cout << endl << endl;
cout << "Kernel: "<<endl;
print_Matrix(kernel, KERNEL_LENGTH, KERNEL_LENGTH);
cout << endl << endl;
cout << "Result: "<<endl;
print_Matrix(result, IMAGE_LENGTH, IMAGE_LENGTH);
}
cout << "El tiempo en realizar la convolución en tiempo secuencial es: " << serialTime/1000 << endl;
// int ns[] = {1, 2, 4, 8};
fs << "ImgLenght KerLenght numthd serialTime parallelTime eff speedUp"<< endl;
// ----
int** gpu_kernel = copy_Matrix(kernel, KERNEL_LENGTH, KERNEL_LENGTH);
int** gpu_image = copy_Matrix(image, IMAGE_LENGTH+realKernelLength, IMAGE_LENGTH+realKernelLength);
init_Matrix(resultCUDA, IMAGE_LENGTH, IMAGE_LENGTH);
int** gpu_result = copy_Matrix(resultCUDA, IMAGE_LENGTH, IMAGE_LENGTH);
hipEventCreateWithFlags(&tStart, 0);
hipEventCreateWithFlags(&tStop, 0);
hipEventRecord(tStart, 0);
dim3 num_threads(NumOfThreads, 16, 1);
dim3 numOfBlocks(NumOfBlocks, 1, 1);
//convolucionCUDA<<<numOfBlocks, num_threads>>>(gpu_image, gpu_kernel, gpu_result, KERNELCOUNT);
hipEventRecord(tStop, 0);
hipEventSynchronize(tStop);
hipEventElapsedTime(¶llelTime, tStart, tStop);
hipEventDestroy(tStart);
hipEventDestroy(tStop);
//resultCUDA = cpu_copy_Matrix(gpu_result, IMAGE_LENGTH, IMAGE_LENGTH);
cout << "Son iguales: "<< compare_Matrix(result, resultCUDA, IMAGE_LENGTH, IMAGE_LENGTH) << endl;
cout << "El tiempo en realizar la convolución en tiempo Paralelo con " << NumOfBlocks << " bloques y " << NumOfThreads <<" hilos es: " << parallelTime << endl;
cout << "Speed UP: "<< serialTime/(parallelTime) << endl;
cout << "Eficiencia: "<< serialTime/(parallelTime*NumOfThreads) << endl;
fs << IMAGE_LENGTH << " " << KERNEL_LENGTH << " " << NumOfThreads <<" " << serialTime <<" " << parallelTime << " " << serialTime/(parallelTime*NumOfThreads)<<" "<< serialTime/(parallelTime) << endl;
hipFree(gpu_image);
hipFree(gpu_result);
hipFree(gpu_kernel);
free(image);
free(result);
free(kernel);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void ReturnFloat( float *sum, float *out, const float *pIn )
{
out[threadIdx.x] = atomicAdd( &out[threadIdx.x], pIn[threadIdx.x] );
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void ReturnFloat( float *sum, float *out, const float *pIn )
{
out[threadIdx.x] = atomicAdd( &out[threadIdx.x], pIn[threadIdx.x] );
}
|
Convert the following CUDA code to AMD GPU code:
cuda
// https://devblogs.nvidia.com/even-easier-introduction-cuda
// https://devblogs.nvidia.com/unified-memory-cuda-beginners
#include <iostream>
#include <math.h>
// cuda kernel
__global__
void add(size_t num_elements, const float* x, float* result)
{
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t stride = blockDim.x * gridDim.x;
for (size_t i = index; i < num_elements; i += stride)
{
result[i] += x[i];
}
}
int main()
{
const size_t num_elements = 1 << 20; // 1M elements
float* x;
float* y;
cudaMallocManaged(&x, num_elements * sizeof(float));
cudaMallocManaged(&y, num_elements * sizeof(float));
for (size_t i = 0; i < num_elements; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// prefetch data to GPU
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(x, num_elements * sizeof(float), device, NULL);
cudaMemPrefetchAsync(y, num_elements * sizeof(float), device, NULL);
size_t block_size = 256;
size_t num_blocks = (num_elements + block_size - 1) / block_size;
// run kernel on GPU
add<<<num_blocks, block_size>>>(num_elements, x, y);
// wait for GPU to finish
cudaDeviceSynchronize();
float max_error = 0.0f;
for (size_t i = 0; i < num_elements; i++)
{
max_error = fmax(max_error, fabs(y[i] - 3.0f));
}
std::cout << "max error: " << max_error << std::endl;
cudaFree(x);
cudaFree(y);
}
|
// https://devblogs.nvidia.com/even-easier-introduction-cuda
// https://devblogs.nvidia.com/unified-memory-cuda-beginners
#include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
// cuda kernel
__global__
void add(size_t num_elements, const float* x, float* result)
{
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t stride = blockDim.x * gridDim.x;
for (size_t i = index; i < num_elements; i += stride)
{
result[i] += x[i];
}
}
int main()
{
const size_t num_elements = 1 << 20; // 1M elements
float* x;
float* y;
hipMallocManaged(&x, num_elements * sizeof(float));
hipMallocManaged(&y, num_elements * sizeof(float));
for (size_t i = 0; i < num_elements; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// prefetch data to GPU
int device = -1;
hipGetDevice(&device);
hipMemPrefetchAsync(x, num_elements * sizeof(float), device, NULL);
hipMemPrefetchAsync(y, num_elements * sizeof(float), device, NULL);
size_t block_size = 256;
size_t num_blocks = (num_elements + block_size - 1) / block_size;
// run kernel on GPU
add<<<num_blocks, block_size>>>(num_elements, x, y);
// wait for GPU to finish
hipDeviceSynchronize();
float max_error = 0.0f;
for (size_t i = 0; i < num_elements; i++)
{
max_error = fmax(max_error, fabs(y[i] - 3.0f));
}
std::cout << "max error: " << max_error << std::endl;
hipFree(x);
hipFree(y);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
__global__ void
reduction_sum(float *A,int num_elements){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<num_elements){
for(int stride = 1;stride<num_elements;stride*=2){
__syncthreads();
if(i%(2*stride) == 0){
float temp = 0;
if(i+stride<num_elements)temp = A[i + stride];
float partial_sum = A[i] + temp;
A[i] = partial_sum;
}
}
}
}
|
#include <hip/hip_runtime.h>
__global__ void
reduction_sum(float *A,int num_elements){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<num_elements){
for(int stride = 1;stride<num_elements;stride*=2){
__syncthreads();
if(i%(2*stride) == 0){
float temp = 0;
if(i+stride<num_elements)temp = A[i + stride];
float partial_sum = A[i] + temp;
A[i] = partial_sum;
}
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cuda.h>
#include <iostream>
using namespace std;
/**
* C = A + B (one element per thread)
*/
__global__
void addMatricesElt(float* C, const float* A, const float* B, int dim)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(col < dim && row < dim) {
int i = row*dim + col;
C[i] = A[i] + B[i];
}
}
/**
* C = A + B (one row per thread)
*/
__global__
void addMatricesRow(float* C, const float* A, const float* B, int dim)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row < dim) {
int rowdim = row*dim;
for (int col = 0; col < dim; ++col) {
int i = rowdim + col;
C[i] = A[i] + B[i];
}
}
}
/**
* C = A + B (one column per thread)
*/
__global__
void addMatricesCol(float* C, const float* A, const float* B, int dim)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < dim) {
for (int row = 0; row < dim; ++row) {
int i = row*dim + col;
C[i] = A[i] + B[i];
}
}
}
int main(int argc, char* argv[])
{
int dim = atoi(argv[1]);
int size = dim*dim;
// creating matrices on host side
float* h_A = new float[size];
float* h_B = new float[size];
for (int i = 0; i < size; ++i) {
h_A[i] = 1.0f;
h_B[i] = 2.0f;
}
// Copy matrices on device side
float* d_A;
cudaMalloc((void**)&d_A, size*sizeof(float));
cudaMemcpy((void*)d_A, (void*)h_A, size*sizeof(float), cudaMemcpyHostToDevice);
float* d_B;
cudaMalloc((void**)&d_B, size*sizeof(float));
cudaMemcpy((void*)d_B, (void*)h_B, size*sizeof(float), cudaMemcpyHostToDevice);
// Allocate C matrix on device
float* d_C;
cudaMalloc((void**)&d_C, size*sizeof(float));
// call Kernel
int type = atoi(argv[2]);
if (type == 1) { // one element per thread
dim3 dimGrid(ceil(dim/16.0f), ceil(dim/16.0f), 1);
dim3 dimBlock(16, 16, 1);
addMatricesElt<<<dimGrid, dimBlock>>> (d_C, d_A, d_B, dim);
}
else if (type == 2) { // one row per thread
dim3 dimGrid(1, ceil(dim/256.0f), 1);
dim3 dimBlock(1, 256, 1);
addMatricesRow<<<dimGrid, dimBlock>>> (d_C, d_A, d_B, dim);
}
else if (type == 3) { // one column per thread
dim3 dimGrid(ceil(dim/256.0f), 1, 1);
dim3 dimBlock(256, 1, 1);
addMatricesCol<<<dimGrid, dimBlock>>> (d_C, d_A, d_B, dim);
}
else
cout << "invalid argument!" << endl;
// Recover C matrix from device to host
float* h_C = new float[size];
cudaMemcpy((void*)h_C, (void*)d_C, size*sizeof(float), cudaMemcpyDeviceToHost);
// Check results
for (int i = 0; i < size; ++i) {
if (fabs(h_C[i] - 3.0f) > 0.0001f) {
cout << "ERROR: something is not right." << endl;
break;
}
}
// Finalize storage
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
delete [] h_A;
delete [] h_B;
delete [] h_C;
cout << "Closing..." << endl;
return 0;
}
|
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
/**
* C = A + B (one element per thread)
*/
__global__
void addMatricesElt(float* C, const float* A, const float* B, int dim)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if(col < dim && row < dim) {
int i = row*dim + col;
C[i] = A[i] + B[i];
}
}
/**
* C = A + B (one row per thread)
*/
__global__
void addMatricesRow(float* C, const float* A, const float* B, int dim)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row < dim) {
int rowdim = row*dim;
for (int col = 0; col < dim; ++col) {
int i = rowdim + col;
C[i] = A[i] + B[i];
}
}
}
/**
* C = A + B (one column per thread)
*/
__global__
void addMatricesCol(float* C, const float* A, const float* B, int dim)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < dim) {
for (int row = 0; row < dim; ++row) {
int i = row*dim + col;
C[i] = A[i] + B[i];
}
}
}
int main(int argc, char* argv[])
{
int dim = atoi(argv[1]);
int size = dim*dim;
// creating matrices on host side
float* h_A = new float[size];
float* h_B = new float[size];
for (int i = 0; i < size; ++i) {
h_A[i] = 1.0f;
h_B[i] = 2.0f;
}
// Copy matrices on device side
float* d_A;
hipMalloc((void**)&d_A, size*sizeof(float));
hipMemcpy((void*)d_A, (void*)h_A, size*sizeof(float), hipMemcpyHostToDevice);
float* d_B;
hipMalloc((void**)&d_B, size*sizeof(float));
hipMemcpy((void*)d_B, (void*)h_B, size*sizeof(float), hipMemcpyHostToDevice);
// Allocate C matrix on device
float* d_C;
hipMalloc((void**)&d_C, size*sizeof(float));
// call Kernel
int type = atoi(argv[2]);
if (type == 1) { // one element per thread
dim3 dimGrid(ceil(dim/16.0f), ceil(dim/16.0f), 1);
dim3 dimBlock(16, 16, 1);
addMatricesElt<<<dimGrid, dimBlock>>> (d_C, d_A, d_B, dim);
}
else if (type == 2) { // one row per thread
dim3 dimGrid(1, ceil(dim/256.0f), 1);
dim3 dimBlock(1, 256, 1);
addMatricesRow<<<dimGrid, dimBlock>>> (d_C, d_A, d_B, dim);
}
else if (type == 3) { // one column per thread
dim3 dimGrid(ceil(dim/256.0f), 1, 1);
dim3 dimBlock(256, 1, 1);
addMatricesCol<<<dimGrid, dimBlock>>> (d_C, d_A, d_B, dim);
}
else
cout << "invalid argument!" << endl;
// Recover C matrix from device to host
float* h_C = new float[size];
hipMemcpy((void*)h_C, (void*)d_C, size*sizeof(float), hipMemcpyDeviceToHost);
// Check results
for (int i = 0; i < size; ++i) {
if (fabs(h_C[i] - 3.0f) > 0.0001f) {
cout << "ERROR: something is not right." << endl;
break;
}
}
// Finalize storage
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
delete [] h_A;
delete [] h_B;
delete [] h_C;
cout << "Closing..." << endl;
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
extern "C" __global__ void
shift(float2* arr, int resx, int resy, int resz, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int idx = resx / 2;
int idy = resy / 2;
int idz = resz / 2;
float2 tmp;
if (i / resy / resz < idx) {
if (i / resz % resy < idy) {
if (i % resz < idz) {
//0
tmp = arr[i];
arr[i] = arr[i + idz + idy * resz + idx * resy * resz];
arr[i + idz + idy * resz + idx * resy * resz] = tmp;
}
else {
//1
tmp = arr[i];
arr[i] = arr[i - idz + idy * resz + idx * resy * resz];
arr[i - idz + idy * resz + idx * resy * resz] = tmp;
}
}
else {
if (i % resz < idz) {
//2
tmp = arr[i];
arr[i] = arr[i + idz - idy * resz + idx * resy * resz];
arr[i + idz - idy * resz + idx * resy * resz] = tmp;
}
else {
//3
tmp = arr[i];
arr[i] = arr[i - idz - idy * resz + idx * resy * resz];
arr[i - idz - idy * resz + idx * resy * resz] = tmp;
}
}
}
}
|
#include <hip/hip_runtime.h>
extern "C" __global__ void
shift(float2* arr, int resx, int resy, int resz, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int idx = resx / 2;
int idy = resy / 2;
int idz = resz / 2;
float2 tmp;
if (i / resy / resz < idx) {
if (i / resz % resy < idy) {
if (i % resz < idz) {
//0
tmp = arr[i];
arr[i] = arr[i + idz + idy * resz + idx * resy * resz];
arr[i + idz + idy * resz + idx * resy * resz] = tmp;
}
else {
//1
tmp = arr[i];
arr[i] = arr[i - idz + idy * resz + idx * resy * resz];
arr[i - idz + idy * resz + idx * resy * resz] = tmp;
}
}
else {
if (i % resz < idz) {
//2
tmp = arr[i];
arr[i] = arr[i + idz - idy * resz + idx * resy * resz];
arr[i + idz - idy * resz + idx * resy * resz] = tmp;
}
else {
//3
tmp = arr[i];
arr[i] = arr[i - idz - idy * resz + idx * resy * resz];
arr[i - idz - idy * resz + idx * resy * resz] = tmp;
}
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void breadth_first_search_csr_gpu(unsigned int* cum_row_indexes, unsigned int* column_indexes, int* matrix_data, unsigned int* in_infections, unsigned int* out_infections, unsigned int rows) {
unsigned int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < rows) {
if (in_infections[row] == 1) {
out_infections[row] = 1;
unsigned int row_start = cum_row_indexes[row];
unsigned int row_end = cum_row_indexes[row+1];
for (int i = row_start; i < row_end; i++) {
int timesteps_to_transmission = matrix_data[i];
if (timesteps_to_transmission != 0) {
if (timesteps_to_transmission == 1) {
out_infections[column_indexes[i]] = 1;
}
matrix_data[i] -= 1;
}
}
}
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void breadth_first_search_csr_gpu(unsigned int* cum_row_indexes, unsigned int* column_indexes, int* matrix_data, unsigned int* in_infections, unsigned int* out_infections, unsigned int rows) {
unsigned int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < rows) {
if (in_infections[row] == 1) {
out_infections[row] = 1;
unsigned int row_start = cum_row_indexes[row];
unsigned int row_end = cum_row_indexes[row+1];
for (int i = row_start; i < row_end; i++) {
int timesteps_to_transmission = matrix_data[i];
if (timesteps_to_transmission != 0) {
if (timesteps_to_transmission == 1) {
out_infections[column_indexes[i]] = 1;
}
matrix_data[i] -= 1;
}
}
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void ForwardLinear(float *A, float *W, float *b, int nRowsW, int nColsW, int nColsA, float *Z)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float ZValue = 0;
if (row < nRowsW && col < nColsA)
{
for (int i = 0; i < nColsW; i++)
{
ZValue += W[row * nColsW + i] * A[i * nColsA + col];
}
Z[row * nColsA + col] = ZValue + b[row];
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void ForwardLinear(float *A, float *W, float *b, int nRowsW, int nColsW, int nColsA, float *Z)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float ZValue = 0;
if (row < nRowsW && col < nColsA)
{
for (int i = 0; i < nColsW; i++)
{
ZValue += W[row * nColsW + i] * A[i * nColsA + col];
}
Z[row * nColsA + col] = ZValue + b[row];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <unistd.h>
#include <math.h>
#include <time.h>
// Maximum value of the matrix element
#define MAX 100
#define MAX_ITER 100
#define TOL 0.000001
// Generate a random float number with the maximum value of max
float rand_float(int max) {
return ((float)rand() / (float)(RAND_MAX)) * max;
}
// Calulates the space for the matrices (bytes)
int calc_mem_size(int n, int m) {
return (n * m * sizeof(float));
}
// Calculates the sum of a given flat matrix (array)
float calc_mat_sum(float **mat, int n, int m) {
float sum = 0.0f;
for (long i = 0; i < (n*m); i++) {
sum += (*mat)[i];
}
return sum;
}
// Allocate 2D matrix in the host
void alloc_host_matrix(float **mat, int n, int m, bool must_init) {
*mat = (float *) malloc(n * m * sizeof(float));
// In case of initializing the matrix with the initial values
if (must_init) {
for (int i = 0; i < (n*m); i++) {
(*mat)[i] = rand_float(MAX);
}
}
}
// Allocate 2D matrix in the device
void alloc_dev_matrix(float **mat, int n, int m) {
size_t memSize = (n * m * sizeof(float));
cudaMalloc(&mat, memSize);
}
// Write the time results into a CSV file
void write_to_file(int n, int num_blocks, int num_threads, float total_time, float exec_time) {
FILE *f;
char* file_name = "results.csv";
if (access(file_name, F_OK) == -1) {
f = fopen(file_name, "a");
fprintf(f, "Matrix size;Blocks;Threads per block;Total time;Operations time;\n");
}
else {
f = fopen(file_name, "a");
}
fprintf(f, "%d;%d;%d;%f;%f;\n", n, num_blocks, num_threads, total_time, exec_time);
fclose(f);
}
// Solver (executed by each thread)
__global__ void solver(float **mat, float **mat_diff, int n) {
// Original position that this thread is assigned
int i_org = (blockDim.x * blockIdx.x) + threadIdx.x;
// Real position that this thread is going to compute
int i = i_org;
i = i + n; // VIP: The threads must avoid first row
i = i + 1; // VIP: The threads must avoid first column
// In case the thread is leftover
if (i >= ((n*n) - n - 1)) {
return;
}
float temp;
float diff = 0;
int cnt_iter = 0;
const int pos_up = i - n;
const int pos_do = i + n;
const int pos_le = i - 1;
const int pos_ri = i + 1;
while (cnt_iter < MAX_ITER) {
temp = (*mat)[i];
(*mat)[i] = 0.2 * ((*mat)[i] + (*mat)[pos_le] + (*mat)[pos_up] + (*mat)[pos_ri] + (*mat)[pos_do]);
// The LAST difference between the prev value and the new value is stored
diff = abs((*mat)[i] - temp);
cnt_iter ++;
}
// Finally the difference is store in its corresponding cell
// VIP: Use '=' not '+=' to avoid non-zero values on the first func call
(*mat_diff)[i_org] = diff;
}
int main(int argc, char *argv[]) {
if (argc < 3) {
printf("Call this program with two parameters:\n");
printf("\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)\n");
printf("\t threads_per_block: Better a power of 2 (e.g. : 16, 32, 64)\n");
exit(1);
}
int n = atoi(argv[1]);
int threads_per_block = atoi(argv[2]);
printf("Matrix size = %d\n", n);
printf("Threads per block = %d\n", threads_per_block);
// Start recording the time
clock_t i_total_t = clock();
float *host_mat_vals;
float *host_mat_diff;
float *dev_mat_vals;
float *dev_mat_diff;
// Allocating matrices space both in host and device
alloc_host_matrix(&host_mat_vals, n, n, true);
alloc_host_matrix(&host_mat_diff, n-2, n-2, false);
alloc_dev_matrix(&dev_mat_vals, n, n);
alloc_dev_matrix(&dev_mat_diff, n-2, n-2);
// Calculating the memory size for allocating (bytes)
size_t all_mat_size = calc_mem_size(n, n);
size_t core_mat_size = calc_mem_size(n-2, n-2);
// Passing data from host to device
cudaMemcpy(dev_mat_vals, host_mat_vals, all_mat_size, cudaMemcpyHostToDevice);
// Calculate the number of threads to launch (1 per core cell)
int core_dim = (n-2) * (n-2);
// Given a constant number of threads per block, determine the blocks
int num_blocks = (int) ceil(core_dim / threads_per_block);
dim3 dimGrid(num_blocks);
dim3 dimBlock(threads_per_block);
// Time before the execution
clock_t i_exec_t = clock();
while (true) {
// Make all the threads synchronous
solver<<< dimGrid, dimBlock >>>(&dev_mat_vals, &dev_mat_diff, n);
// The ThreadSynchronize would be neccesary in case Memcpy is not done
// However, as it is called later on, the following line is commented
// cudaThreadSynchronize();
// Passing the differential data back from the device to the host
cudaMemcpy(host_mat_diff, dev_mat_diff, core_mat_size, cudaMemcpyDeviceToHost);
// Breaks in case of reaching the TOL threshold
float diffs_sum = calc_mat_sum(&host_mat_diff, n-2, n-2);
float diffs_mean = diffs_sum / (n-2) / (n-2);
if (diffs_mean < TOL) {
break;
}
}
// Time before the execution
clock_t f_exec_t = clock();
// Passing data back from the device to the host
cudaMemcpy(host_mat_vals, dev_mat_vals, all_mat_size, cudaMemcpyDeviceToHost);
// Finally, the matrices are freed
cudaFree(dev_mat_vals);
cudaFree(dev_mat_diff);
free(host_mat_vals);
free(host_mat_diff);
// Finish recording the time
clock_t f_total_t = clock();
float total_time = (float)(f_total_t - i_total_t) / CLOCKS_PER_SEC;
float exec_time = (float)(f_exec_t - i_exec_t) / CLOCKS_PER_SEC;
printf("Total time: %f\n", total_time);
printf("Operations time: %f\n", exec_time);
write_to_file(n, num_blocks, threads_per_block, total_time, exec_time);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <unistd.h>
#include <math.h>
#include <time.h>
// Maximum value of the matrix element
#define MAX 100
#define MAX_ITER 100
#define TOL 0.000001
// Generate a random float number with the maximum value of max
float rand_float(int max) {
return ((float)rand() / (float)(RAND_MAX)) * max;
}
// Calulates the space for the matrices (bytes)
int calc_mem_size(int n, int m) {
return (n * m * sizeof(float));
}
// Calculates the sum of a given flat matrix (array)
float calc_mat_sum(float **mat, int n, int m) {
float sum = 0.0f;
for (long i = 0; i < (n*m); i++) {
sum += (*mat)[i];
}
return sum;
}
// Allocate 2D matrix in the host
void alloc_host_matrix(float **mat, int n, int m, bool must_init) {
*mat = (float *) malloc(n * m * sizeof(float));
// In case of initializing the matrix with the initial values
if (must_init) {
for (int i = 0; i < (n*m); i++) {
(*mat)[i] = rand_float(MAX);
}
}
}
// Allocate 2D matrix in the device
void alloc_dev_matrix(float **mat, int n, int m) {
size_t memSize = (n * m * sizeof(float));
hipMalloc(&mat, memSize);
}
// Write the time results into a CSV file
void write_to_file(int n, int num_blocks, int num_threads, float total_time, float exec_time) {
FILE *f;
char* file_name = "results.csv";
if (access(file_name, F_OK) == -1) {
f = fopen(file_name, "a");
fprintf(f, "Matrix size;Blocks;Threads per block;Total time;Operations time;\n");
}
else {
f = fopen(file_name, "a");
}
fprintf(f, "%d;%d;%d;%f;%f;\n", n, num_blocks, num_threads, total_time, exec_time);
fclose(f);
}
// Solver (executed by each thread)
__global__ void solver(float **mat, float **mat_diff, int n) {
// Original position that this thread is assigned
int i_org = (blockDim.x * blockIdx.x) + threadIdx.x;
// Real position that this thread is going to compute
int i = i_org;
i = i + n; // VIP: The threads must avoid first row
i = i + 1; // VIP: The threads must avoid first column
// In case the thread is leftover
if (i >= ((n*n) - n - 1)) {
return;
}
float temp;
float diff = 0;
int cnt_iter = 0;
const int pos_up = i - n;
const int pos_do = i + n;
const int pos_le = i - 1;
const int pos_ri = i + 1;
while (cnt_iter < MAX_ITER) {
temp = (*mat)[i];
(*mat)[i] = 0.2 * ((*mat)[i] + (*mat)[pos_le] + (*mat)[pos_up] + (*mat)[pos_ri] + (*mat)[pos_do]);
// The LAST difference between the prev value and the new value is stored
diff = abs((*mat)[i] - temp);
cnt_iter ++;
}
// Finally the difference is store in its corresponding cell
// VIP: Use '=' not '+=' to avoid non-zero values on the first func call
(*mat_diff)[i_org] = diff;
}
int main(int argc, char *argv[]) {
if (argc < 3) {
printf("Call this program with two parameters:\n");
printf("\t matrix_size: Add 2 to a power of 2 (e.g. : 18, 1026)\n");
printf("\t threads_per_block: Better a power of 2 (e.g. : 16, 32, 64)\n");
exit(1);
}
int n = atoi(argv[1]);
int threads_per_block = atoi(argv[2]);
printf("Matrix size = %d\n", n);
printf("Threads per block = %d\n", threads_per_block);
// Start recording the time
clock_t i_total_t = clock();
float *host_mat_vals;
float *host_mat_diff;
float *dev_mat_vals;
float *dev_mat_diff;
// Allocating matrices space both in host and device
alloc_host_matrix(&host_mat_vals, n, n, true);
alloc_host_matrix(&host_mat_diff, n-2, n-2, false);
alloc_dev_matrix(&dev_mat_vals, n, n);
alloc_dev_matrix(&dev_mat_diff, n-2, n-2);
// Calculating the memory size for allocating (bytes)
size_t all_mat_size = calc_mem_size(n, n);
size_t core_mat_size = calc_mem_size(n-2, n-2);
// Passing data from host to device
hipMemcpy(dev_mat_vals, host_mat_vals, all_mat_size, hipMemcpyHostToDevice);
// Calculate the number of threads to launch (1 per core cell)
int core_dim = (n-2) * (n-2);
// Given a constant number of threads per block, determine the blocks
int num_blocks = (int) ceil(core_dim / threads_per_block);
dim3 dimGrid(num_blocks);
dim3 dimBlock(threads_per_block);
// Time before the execution
clock_t i_exec_t = clock();
while (true) {
// Make all the threads synchronous
solver<<< dimGrid, dimBlock >>>(&dev_mat_vals, &dev_mat_diff, n);
// The ThreadSynchronize would be neccesary in case Memcpy is not done
// However, as it is called later on, the following line is commented
// cudaThreadSynchronize();
// Passing the differential data back from the device to the host
hipMemcpy(host_mat_diff, dev_mat_diff, core_mat_size, hipMemcpyDeviceToHost);
// Breaks in case of reaching the TOL threshold
float diffs_sum = calc_mat_sum(&host_mat_diff, n-2, n-2);
float diffs_mean = diffs_sum / (n-2) / (n-2);
if (diffs_mean < TOL) {
break;
}
}
// Time before the execution
clock_t f_exec_t = clock();
// Passing data back from the device to the host
hipMemcpy(host_mat_vals, dev_mat_vals, all_mat_size, hipMemcpyDeviceToHost);
// Finally, the matrices are freed
hipFree(dev_mat_vals);
hipFree(dev_mat_diff);
free(host_mat_vals);
free(host_mat_diff);
// Finish recording the time
clock_t f_total_t = clock();
float total_time = (float)(f_total_t - i_total_t) / CLOCKS_PER_SEC;
float exec_time = (float)(f_exec_t - i_exec_t) / CLOCKS_PER_SEC;
printf("Total time: %f\n", total_time);
printf("Operations time: %f\n", exec_time);
write_to_file(n, num_blocks, threads_per_block, total_time, exec_time);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
//nvcc -ptx EM5.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64"
__device__ void EM1( double *r,
double *z,
double * ar0,
double * br0,
double * az0,
double * bz0,
const int parNum,
const int gridR,
const int gridZ,
const double dr,
const double dz ) {
int globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
int threadsPerBlock = blockDim.x*blockDim.y;
int n = localThreadIdx + globalBlockIndex*threadsPerBlock;
if ( n >= parNum ){
return;
}
double r0 = r[n];
double z0 = z[n];
int ar,az,a1,a2,a3,a4;
double b1,b2,b3,b4;
// Er
ar = floor(r0/dr-0.5);
az = floor(z0/dz);
a1 = ar + az * gridR + 1;
a2 = (ar+1) + az * gridR + 1;
a3 = ar + (az+1) * gridR + 1;
a4 = (ar+1) + (az+1) * gridR + 1;
if( ar<0 ){
b1 = 0;
b2 = (az*dz+dz-z0)/dz;
b3 = 0;
b4 = (z0-az*dz)/dz;
a1 = 1;
a3 = 1;
}
else if( ar >= gridR-1 ){
b1 = (az*dz+dz-z0)/dz;
b2 = 0;
b3 = (z0-az*dz)/dz;
b4 = 0;
a2 = 1;
a4 = 1;
}
else{
b1 = ((ar+1.5)*dr-r0)*((az+1)*dz-z0)/(dr*dz);
b2 = (r0-(ar+0.5)*dr)*((az+1)*dz-z0)/(dr*dz);
b3 = ((ar+1.5)*dr-r0)*(z0-az*dz)/(dr*dz);
b4 = (r0-(ar+0.5)*dr)*(z0-az*dz)/(dr*dz);
}
ar0[n] = a1;
ar0[n+parNum] = a2;
ar0[n+2*parNum] = a3;
ar0[n+3*parNum] = a4;
br0[n] = b1;
br0[n+parNum] = b2;
br0[n+2*parNum] = b3;
br0[n+3*parNum] = b4;
// Ez
ar = floor(r0/dr);
az = floor(z0/dz-0.5);
a1 = ar + az * (gridR+1) + 1;
a2 = (ar+1) + az * (gridR+1) + 1;
a3 = ar + (az+1) * (gridR+1) + 1;
a4 = (ar+1) + (az+1) * (gridR+1) + 1;
if( az<0 ){
b1 = 0;
b2 = 0;
b3 = (ar*dr+dr-r0)/dr;
b4 = (r0-ar*dr)/dr;
a1 = 1;
a2 = 1;
}
else if( az >= gridZ-1 ){
b1 = (ar*dr+dr-r0)/dr;
b2 = (r0-ar*dr)/dr;
b3 = 0;
b4 = 0;
a3 = 1;
a4 = 1;
}
else{
b1 = ((ar+1)*dr-r0)*((az+1.5)*dz-z0)/(dr*dz);
b2 = (r0-ar*dr)*((az+1.5)*dz-z0)/(dr*dz);
b3 = ((ar+1)*dr-r0)*(z0-(az+0.5)*dz)/(dr*dz);
b4 = (r0-ar*dr)*(z0-(az+0.5)*dz)/(dr*dz);
}
az0[n] = a1;
az0[n+parNum] = a2;
az0[n+2*parNum] = a3;
az0[n+3*parNum] = a4;
bz0[n] = b1;
bz0[n+parNum] = b2;
bz0[n+2*parNum] = b3;
bz0[n+3*parNum] = b4;
}
__global__ void processMandelbrotElement(
double *r,
double *z,
double * ar0,
double * br0,
double * az0,
double * bz0,
const int parNum,
const int gridR,
const int gridZ,
const double dr,
const double dz ) {
EM1(r, z, ar0, br0, az0, bz0, parNum, gridR, gridZ, dr, dz);
}
|
#include <hip/hip_runtime.h>
//nvcc -ptx EM5.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64"
__device__ void EM1( double *r,
double *z,
double * ar0,
double * br0,
double * az0,
double * bz0,
const int parNum,
const int gridR,
const int gridZ,
const double dr,
const double dz ) {
int globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
int threadsPerBlock = blockDim.x*blockDim.y;
int n = localThreadIdx + globalBlockIndex*threadsPerBlock;
if ( n >= parNum ){
return;
}
double r0 = r[n];
double z0 = z[n];
int ar,az,a1,a2,a3,a4;
double b1,b2,b3,b4;
// Er
ar = floor(r0/dr-0.5);
az = floor(z0/dz);
a1 = ar + az * gridR + 1;
a2 = (ar+1) + az * gridR + 1;
a3 = ar + (az+1) * gridR + 1;
a4 = (ar+1) + (az+1) * gridR + 1;
if( ar<0 ){
b1 = 0;
b2 = (az*dz+dz-z0)/dz;
b3 = 0;
b4 = (z0-az*dz)/dz;
a1 = 1;
a3 = 1;
}
else if( ar >= gridR-1 ){
b1 = (az*dz+dz-z0)/dz;
b2 = 0;
b3 = (z0-az*dz)/dz;
b4 = 0;
a2 = 1;
a4 = 1;
}
else{
b1 = ((ar+1.5)*dr-r0)*((az+1)*dz-z0)/(dr*dz);
b2 = (r0-(ar+0.5)*dr)*((az+1)*dz-z0)/(dr*dz);
b3 = ((ar+1.5)*dr-r0)*(z0-az*dz)/(dr*dz);
b4 = (r0-(ar+0.5)*dr)*(z0-az*dz)/(dr*dz);
}
ar0[n] = a1;
ar0[n+parNum] = a2;
ar0[n+2*parNum] = a3;
ar0[n+3*parNum] = a4;
br0[n] = b1;
br0[n+parNum] = b2;
br0[n+2*parNum] = b3;
br0[n+3*parNum] = b4;
// Ez
ar = floor(r0/dr);
az = floor(z0/dz-0.5);
a1 = ar + az * (gridR+1) + 1;
a2 = (ar+1) + az * (gridR+1) + 1;
a3 = ar + (az+1) * (gridR+1) + 1;
a4 = (ar+1) + (az+1) * (gridR+1) + 1;
if( az<0 ){
b1 = 0;
b2 = 0;
b3 = (ar*dr+dr-r0)/dr;
b4 = (r0-ar*dr)/dr;
a1 = 1;
a2 = 1;
}
else if( az >= gridZ-1 ){
b1 = (ar*dr+dr-r0)/dr;
b2 = (r0-ar*dr)/dr;
b3 = 0;
b4 = 0;
a3 = 1;
a4 = 1;
}
else{
b1 = ((ar+1)*dr-r0)*((az+1.5)*dz-z0)/(dr*dz);
b2 = (r0-ar*dr)*((az+1.5)*dz-z0)/(dr*dz);
b3 = ((ar+1)*dr-r0)*(z0-(az+0.5)*dz)/(dr*dz);
b4 = (r0-ar*dr)*(z0-(az+0.5)*dz)/(dr*dz);
}
az0[n] = a1;
az0[n+parNum] = a2;
az0[n+2*parNum] = a3;
az0[n+3*parNum] = a4;
bz0[n] = b1;
bz0[n+parNum] = b2;
bz0[n+2*parNum] = b3;
bz0[n+3*parNum] = b4;
}
__global__ void processMandelbrotElement(
double *r,
double *z,
double * ar0,
double * br0,
double * az0,
double * bz0,
const int parNum,
const int gridR,
const int gridZ,
const double dr,
const double dz ) {
EM1(r, z, ar0, br0, az0, bz0, parNum, gridR, gridZ, dr, dz);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/************************************************************************
C-DAC Tech Workshop : hyPACK-2013
October 15-18, 2013
Example : multiple-cuda-streams.cu
Objective : Objective is to demonstrate multiple streams for
addition of two vectors
Input : None
Output : Time of cuda event elapsed time.
Created : August-2013
E-mail : [email protected]
**************************************************************************/
#include<stdio.h>
void CUDA_SAFE_CALL(cudaError_t call)
{
cudaError_t ret = call;
switch(ret)
{
case cudaSuccess:
break;
default:
{
printf(" ERROR at line :%i.%d' '%s\n",__LINE__,ret,cudaGetErrorString(ret));
exit(-1);
break;
}
}
}
#define N (1024*1024) /* N = Vector Size */
#define FULL_DATA_SIZE (N*20)
__global__ void kernel( int *a, int *b, int *c ) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
int main( void ) {
cudaDeviceProp prop;
int whichDevice;
CUDA_SAFE_CALL( cudaGetDevice( &whichDevice ) );
CUDA_SAFE_CALL( cudaGetDeviceProperties( &prop, whichDevice ) );
if (!prop.deviceOverlap) {
printf( "Device will not handle overlaps, so no speed up from streams\n" );
return 0;
}
if( (prop.concurrentKernels == 0 )) //check concurrent kernel support
{
printf("> GPU does not support concurrent kernel execution\n");
printf(" CUDA kernel runs will be serialized\n");
}
if(prop.asyncEngineCount == 0) //check concurrent data transfer support
{
printf("GPU does not support concurrent Data transer and overlaping of kernel execution & data transfer\n");
printf("Mem copy call will be blocking calls\n");
}
cudaEvent_t start, stop;
float elapsedTime;
cudaStream_t stream0, stream1;
int *host_a, *host_b, *host_c;
int *dev_a0, *dev_b0, *dev_c0;
int *dev_a1, *dev_b1, *dev_c1;
// start the timers
CUDA_SAFE_CALL( cudaEventCreate( &start ) );
CUDA_SAFE_CALL( cudaEventCreate( &stop ) );
// initialize the streams
CUDA_SAFE_CALL( cudaStreamCreate( &stream0 ) );
CUDA_SAFE_CALL( cudaStreamCreate( &stream1 ) );
// allocate the memory on the GPU
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_a0,
N * sizeof(int) ) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_b0,
N * sizeof(int) ) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_c0,
N * sizeof(int) ) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_a1,
N * sizeof(int) ) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_b1,
N * sizeof(int) ) );
CUDA_SAFE_CALL( cudaMalloc( (void**)&dev_c1,
N * sizeof(int) ) );
// allocate host locked memory, used to stream
CUDA_SAFE_CALL( cudaHostAlloc( (void**)&host_a,
FULL_DATA_SIZE * sizeof(int),
cudaHostAllocDefault ) );
CUDA_SAFE_CALL( cudaHostAlloc( (void**)&host_b,
FULL_DATA_SIZE * sizeof(int),
cudaHostAllocDefault ) );
CUDA_SAFE_CALL( cudaHostAlloc( (void**)&host_c,
FULL_DATA_SIZE * sizeof(int),
cudaHostAllocDefault ) );
for (int i=0; i<FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
CUDA_SAFE_CALL( cudaEventRecord( start, 0 ) );
// now loop over full data, in bite-sized chunks
for (int i=0; i<FULL_DATA_SIZE; i+= N*2) {
// enqueue copies of a in stream0 and stream1
CUDA_SAFE_CALL( cudaMemcpyAsync( dev_a0, host_a+i,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream0 ) );
CUDA_SAFE_CALL( cudaMemcpyAsync( dev_a1, host_a+i+N,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream1 ) );
// enqueue copies of b in stream0 and stream1
CUDA_SAFE_CALL( cudaMemcpyAsync( dev_b0, host_b+i,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream0 ) );
CUDA_SAFE_CALL( cudaMemcpyAsync( dev_b1, host_b+i+N,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream1 ) );
// enqueue kernels in stream0 and stream1
kernel<<<N/256,256,0,stream0>>>( dev_a0, dev_b0, dev_c0 );
kernel<<<N/256,256,0,stream1>>>( dev_a1, dev_b1, dev_c1 );
// enqueue copies of c from device to locked memory
CUDA_SAFE_CALL( cudaMemcpyAsync( host_c+i, dev_c0,
N * sizeof(int),
cudaMemcpyDeviceToHost,
stream0 ) );
CUDA_SAFE_CALL( cudaMemcpyAsync( host_c+i+N, dev_c1,
N * sizeof(int),
cudaMemcpyDeviceToHost,
stream1 ) );
}
CUDA_SAFE_CALL( cudaStreamSynchronize( stream0 ) );
CUDA_SAFE_CALL( cudaStreamSynchronize( stream1 ) );
CUDA_SAFE_CALL( cudaEventRecord( stop, 0 ) );
CUDA_SAFE_CALL( cudaEventSynchronize( stop ) );
CUDA_SAFE_CALL( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time taken: %3.1f ms\n", elapsedTime );
// cleanup the streams and memory
CUDA_SAFE_CALL( cudaFreeHost( host_a ) );
CUDA_SAFE_CALL( cudaFreeHost( host_b ) );
CUDA_SAFE_CALL( cudaFreeHost( host_c ) );
CUDA_SAFE_CALL( cudaFree( dev_a0 ) );
CUDA_SAFE_CALL( cudaFree( dev_b0 ) );
CUDA_SAFE_CALL( cudaFree( dev_c0 ) );
CUDA_SAFE_CALL( cudaFree( dev_a1 ) );
CUDA_SAFE_CALL( cudaFree( dev_b1 ) );
CUDA_SAFE_CALL( cudaFree( dev_c1 ) );
CUDA_SAFE_CALL( cudaStreamDestroy( stream0 ) );
CUDA_SAFE_CALL( cudaStreamDestroy( stream1 ) );
return 0;
}
|
/************************************************************************
C-DAC Tech Workshop : hyPACK-2013
October 15-18, 2013
Example : multiple-cuda-streams.cu
Objective : Objective is to demonstrate multiple streams for
addition of two vectors
Input : None
Output : Time of cuda event elapsed time.
Created : August-2013
E-mail : [email protected]
**************************************************************************/
#include <hip/hip_runtime.h>
#include<stdio.h>
void CUDA_SAFE_CALL(hipError_t call)
{
hipError_t ret = call;
switch(ret)
{
case hipSuccess:
break;
default:
{
printf(" ERROR at line :%i.%d' '%s\n",__LINE__,ret,hipGetErrorString(ret));
exit(-1);
break;
}
}
}
#define N (1024*1024) /* N = Vector Size */
#define FULL_DATA_SIZE (N*20)
__global__ void kernel( int *a, int *b, int *c ) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
int main( void ) {
hipDeviceProp_t prop;
int whichDevice;
CUDA_SAFE_CALL( hipGetDevice( &whichDevice ) );
CUDA_SAFE_CALL( hipGetDeviceProperties( &prop, whichDevice ) );
if (!prop.deviceOverlap) {
printf( "Device will not handle overlaps, so no speed up from streams\n" );
return 0;
}
if( (prop.concurrentKernels == 0 )) //check concurrent kernel support
{
printf("> GPU does not support concurrent kernel execution\n");
printf(" CUDA kernel runs will be serialized\n");
}
if(prop.asyncEngineCount == 0) //check concurrent data transfer support
{
printf("GPU does not support concurrent Data transer and overlaping of kernel execution & data transfer\n");
printf("Mem copy call will be blocking calls\n");
}
hipEvent_t start, stop;
float elapsedTime;
hipStream_t stream0, stream1;
int *host_a, *host_b, *host_c;
int *dev_a0, *dev_b0, *dev_c0;
int *dev_a1, *dev_b1, *dev_c1;
// start the timers
CUDA_SAFE_CALL( hipEventCreate( &start ) );
CUDA_SAFE_CALL( hipEventCreate( &stop ) );
// initialize the streams
CUDA_SAFE_CALL( hipStreamCreate( &stream0 ) );
CUDA_SAFE_CALL( hipStreamCreate( &stream1 ) );
// allocate the memory on the GPU
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_a0,
N * sizeof(int) ) );
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_b0,
N * sizeof(int) ) );
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_c0,
N * sizeof(int) ) );
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_a1,
N * sizeof(int) ) );
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_b1,
N * sizeof(int) ) );
CUDA_SAFE_CALL( hipMalloc( (void**)&dev_c1,
N * sizeof(int) ) );
// allocate host locked memory, used to stream
CUDA_SAFE_CALL( hipHostAlloc( (void**)&host_a,
FULL_DATA_SIZE * sizeof(int),
hipHostMallocDefault ) );
CUDA_SAFE_CALL( hipHostAlloc( (void**)&host_b,
FULL_DATA_SIZE * sizeof(int),
hipHostMallocDefault ) );
CUDA_SAFE_CALL( hipHostAlloc( (void**)&host_c,
FULL_DATA_SIZE * sizeof(int),
hipHostMallocDefault ) );
for (int i=0; i<FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
CUDA_SAFE_CALL( hipEventRecord( start, 0 ) );
// now loop over full data, in bite-sized chunks
for (int i=0; i<FULL_DATA_SIZE; i+= N*2) {
// enqueue copies of a in stream0 and stream1
CUDA_SAFE_CALL( hipMemcpyAsync( dev_a0, host_a+i,
N * sizeof(int),
hipMemcpyHostToDevice,
stream0 ) );
CUDA_SAFE_CALL( hipMemcpyAsync( dev_a1, host_a+i+N,
N * sizeof(int),
hipMemcpyHostToDevice,
stream1 ) );
// enqueue copies of b in stream0 and stream1
CUDA_SAFE_CALL( hipMemcpyAsync( dev_b0, host_b+i,
N * sizeof(int),
hipMemcpyHostToDevice,
stream0 ) );
CUDA_SAFE_CALL( hipMemcpyAsync( dev_b1, host_b+i+N,
N * sizeof(int),
hipMemcpyHostToDevice,
stream1 ) );
// enqueue kernels in stream0 and stream1
kernel<<<N/256,256,0,stream0>>>( dev_a0, dev_b0, dev_c0 );
kernel<<<N/256,256,0,stream1>>>( dev_a1, dev_b1, dev_c1 );
// enqueue copies of c from device to locked memory
CUDA_SAFE_CALL( hipMemcpyAsync( host_c+i, dev_c0,
N * sizeof(int),
hipMemcpyDeviceToHost,
stream0 ) );
CUDA_SAFE_CALL( hipMemcpyAsync( host_c+i+N, dev_c1,
N * sizeof(int),
hipMemcpyDeviceToHost,
stream1 ) );
}
CUDA_SAFE_CALL( hipStreamSynchronize( stream0 ) );
CUDA_SAFE_CALL( hipStreamSynchronize( stream1 ) );
CUDA_SAFE_CALL( hipEventRecord( stop, 0 ) );
CUDA_SAFE_CALL( hipEventSynchronize( stop ) );
CUDA_SAFE_CALL( hipEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time taken: %3.1f ms\n", elapsedTime );
// cleanup the streams and memory
CUDA_SAFE_CALL( hipHostFree( host_a ) );
CUDA_SAFE_CALL( hipHostFree( host_b ) );
CUDA_SAFE_CALL( hipHostFree( host_c ) );
CUDA_SAFE_CALL( hipFree( dev_a0 ) );
CUDA_SAFE_CALL( hipFree( dev_b0 ) );
CUDA_SAFE_CALL( hipFree( dev_c0 ) );
CUDA_SAFE_CALL( hipFree( dev_a1 ) );
CUDA_SAFE_CALL( hipFree( dev_b1 ) );
CUDA_SAFE_CALL( hipFree( dev_c1 ) );
CUDA_SAFE_CALL( hipStreamDestroy( stream0 ) );
CUDA_SAFE_CALL( hipStreamDestroy( stream1 ) );
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Global Memory(GB): %lu\n", prop.totalGlobalMem / (1024 * 1024 * 1024));
printf(" Shared Memory per Block: %d\n", prop.sharedMemPerBlock);
printf(" Registers per Block: %d\n", prop.regsPerBlock);
printf(" Max Threads per Block: %d\n", prop.maxThreadsPerBlock);
printf(" Multiprocessor Count: %d\n", prop.multiProcessorCount);
printf(" Concurrent Kernels: %d\n", prop.concurrentKernels);
printf(" L2 Cache Size: %d\n", prop.l2CacheSize);
}
/*cudaError_t err;
int *edgeArray, numEdges = 4;
err = cudaMalloc((void **)&edgeArray, (numEdges + 1) * sizeof(int));
if( err != cudaSuccess) {
printf("CUDA error: %s ** at Line %d\n", cudaGetErrorString(err), __LINE__);
return EXIT_FAILURE;
}*/
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
int main() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Global Memory(GB): %lu\n", prop.totalGlobalMem / (1024 * 1024 * 1024));
printf(" Shared Memory per Block: %d\n", prop.sharedMemPerBlock);
printf(" Registers per Block: %d\n", prop.regsPerBlock);
printf(" Max Threads per Block: %d\n", prop.maxThreadsPerBlock);
printf(" Multiprocessor Count: %d\n", prop.multiProcessorCount);
printf(" Concurrent Kernels: %d\n", prop.concurrentKernels);
printf(" L2 Cache Size: %d\n", prop.l2CacheSize);
}
/*cudaError_t err;
int *edgeArray, numEdges = 4;
err = cudaMalloc((void **)&edgeArray, (numEdges + 1) * sizeof(int));
if( err != cudaSuccess) {
printf("CUDA error: %s ** at Line %d\n", cudaGetErrorString(err), __LINE__);
return EXIT_FAILURE;
}*/
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <math.h>
#include <cuda_runtime_api.h>
#include <time.h>
#include <errno.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* nvcc -o culinear culinear.cu -lm
*
* To run:
* ./culinear
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{83.12,144.47},{65.27,114.80},{65.17,89.01},{68.57,122.90},
{77.57,136.93},{79.84,146.56},{84.42,123.51},{65.34,106.22},
{82.20,120.33},{65.35,142.11},{24.06,53.94},{35.61,87.53},
{ 2.02,22.75},{44.01,89.41},{85.58,141.52},{54.14,88.90},
{35.94,84.11},{22.86,45.76},{75.88,111.25},{54.49,105.83},
{94.65,139.29},{74.97,140.29},{46.31,94.00},{48.12,108.88},
{99.29,146.97},{86.76,135.87},{70.11,120.41},{ 5.01,35.32},
{84.56,147.46},{ 0.19,39.41},{13.16,49.52},{34.11,93.57},
{78.99,108.24},{38.38,81.59},{79.20,115.25},{84.38,146.00},
{92.49,166.93},{19.70,61.69},{23.14,82.49},{13.97,44.80},
{ 2.30,51.01},{15.33,34.49},{64.82,106.29},{39.99,76.65},
{85.93,162.61},{95.23,172.35},{11.05,60.11},{53.84,106.95},
{71.11,135.65},{33.67,88.76},{ 2.41,41.07},{52.19,108.83},
{30.21,57.75},{69.24,132.80},{96.44,157.86},{87.85,133.87},
{15.51,56.56},{53.81,106.32},{50.03,77.59},{77.05,136.93},
{37.29,81.30},{41.74,95.49},{53.91,109.94},{41.20,67.23},
{76.87,124.78},{39.99,82.29},{21.12,55.37},{34.62,65.13},
{20.91,51.88},{76.70,118.05},{ 4.76,45.66},{ 2.29,26.88},
{27.19,59.89},{ 6.82,36.36},{32.36,78.26},{48.72,99.14},
{80.55,127.01},{91.69,150.94},{ 9.68,29.41},{90.74,165.08},
{35.58,70.65},{90.86,166.10},{99.52,157.98},{15.66,47.55},
{45.23,88.34},{63.46,112.27},{64.21,115.27},{86.10,146.87},
{72.98,119.38},{31.78,67.38},{73.97,135.76},{24.43,70.15},
{74.86,135.38},{18.98,50.05},{49.32,106.88},{93.39,154.91},
{ 1.29,39.63},{10.92,61.03},{35.04,64.55},{57.66,111.38},
{42.04,96.64},{ 8.79,40.02},{92.43,147.28},{49.08,85.76},
{30.62,85.66},{51.41,97.98},{88.25,141.92},{27.07,61.14},
{34.88,83.12},{90.82,151.63},{55.07,106.28},{25.73,62.03},
{34.53,63.56},{ 6.61,34.03},{15.62,50.85},{15.32,67.76},
{69.03,114.54},{32.46,56.91},{69.37,123.90},{10.78,57.26},
{10.53,31.37},{53.23,109.49},{ 7.26,44.18},{15.90,63.21},
{ 8.53,36.85},{57.16,109.43},{80.74,122.57},{ 7.25,44.88},
{87.53,144.92},{90.70,165.27},{61.17,108.23},{53.14,111.23},
{94.75,138.45},{ 7.60,42.08},{18.83,76.22},{13.48,71.77},
{ 0.66,39.45},{35.94,87.05},{88.24,169.85},{22.00,70.26},
{93.97,144.15},{93.09,164.94},{41.88,90.98},{35.68,63.90},
{93.69,160.24},{22.20,53.28},{79.69,118.82},{27.57,57.90},
{24.98,72.67},{86.50,133.90},{40.28,86.21},{14.60,48.01},
{72.54,139.19},{55.30,79.54},{ 3.81,33.25},{ 5.68,53.66},
{17.39,44.50},{82.43,123.95},{26.21,57.88},{50.93,102.91},
{41.54,78.81},{36.41,65.17},{39.67,84.96},{74.19,130.02},
{79.23,147.24},{ 5.43,43.11},{59.04,92.40},{ 4.77,21.65},
{62.12,113.31},{80.55,133.55},{42.32,75.65},{83.01,131.90},
{39.06,88.34},{98.75,175.85},{31.87,62.41},{58.73,96.47},
{10.18,53.65},{12.05,47.02},{77.15,116.12},{17.71,57.77},
{82.98,134.75},{18.11,37.59},{32.30,74.54},{81.96,143.75},
{11.77,47.90},{24.43,78.01},{60.70,116.42},{72.05,123.46},
{42.29,75.74},{ 9.64,53.11},{ 3.20,41.20},{75.68,127.51},
{ 7.67,38.82},{ 9.55,45.92},{ 6.22,55.99},{15.01,53.21},
{ 2.50,17.99},{30.97,64.75},{15.92,58.06},{39.77,79.31},
{30.30,80.76},{75.71,133.13},{18.68,54.70},{14.33,48.80},
{65.29,112.12},{85.98,156.29},{68.20,115.16},{76.18,127.58},
{12.05,52.54},{ 1.45,26.32},{51.07,91.58},{70.45,131.48},
{46.34,110.44},{86.40,140.67},{62.22,107.05},{39.48,96.73},
{59.28,114.38},{85.33,140.73},{21.85,63.28},{55.32,96.88},
{54.90,99.09},{81.45,134.43},{94.99,152.75},{60.61,91.15},
{85.61,132.87},{54.72,105.30},{ 9.85,37.72},{85.74,133.99},
{30.19,79.45},{87.18,142.65},{27.50,68.06},{48.21,81.13},
{89.60,139.38},{20.45,61.03},{60.56,101.17},{88.41,139.78},
{84.60,146.42},{25.34,45.91},{32.69,104.43},{13.63,53.03},
{80.26,124.62},{97.15,147.49},{99.16,177.78},{81.31,127.71},
{88.58,136.47},{24.77,59.82},{96.93,160.71},{51.92,102.46},
{27.33,67.99},{92.40,156.65},{87.22,135.40},{ 8.66,33.01},
{79.02,137.74},{92.16,158.93},{70.14,117.38},{31.39,83.34},
{98.54,150.47},{81.39,145.14},{32.19,90.89},{49.53,82.60},
{83.19,147.94},{65.68,121.26},{19.73,73.98},{19.26,39.84},
{68.81,127.82},{21.93,64.48},{22.98,67.44},{ 8.19,35.21},
{83.08,134.02},{69.30,124.24},{19.40,46.96},{64.13,120.93},
{61.91,118.90},{31.92,72.59},{97.06,157.02},{69.68,131.99},
{64.02,120.20},{86.75,141.47},{48.62,98.35},{62.34,118.54},
{23.10,73.71},{ 3.22,24.94},{47.03,98.28},{86.10,129.82},
{17.62,41.43},{20.60,62.70},{25.56,79.02},{98.74,168.44},
{25.25,68.33},{ 0.26,17.74},{73.72,125.70},{62.70,101.61},
{86.10,144.15},{ 7.59,38.21},{65.71,118.18},{57.83,104.28},
{48.00,91.86},{59.53,110.64},{75.08,131.55},{66.96,113.45},
{23.44,41.93},{ 7.22,33.51},{22.13,70.49},{20.24,70.87},
{36.57,59.85},{22.89,50.80},{88.83,128.03},{54.08,109.80},
{20.87,65.63},{80.15,132.14},{91.71,142.11},{12.37,46.56},
{31.09,82.71},{ 9.54,28.65},{16.74,44.18},{37.07,73.24},
{ 1.67,41.10},{ 0.29,12.09},{34.05,80.10},{64.07,112.30},
{64.66,110.15},{21.74,62.28},{74.39,129.73},{53.67,90.13},
{75.14,147.83},{42.98,82.02},{66.29,121.10},{57.34,102.40},
{96.75,152.13},{13.36,48.35},{21.05,73.53},{81.77,135.48},
{88.21,171.75},{51.53,98.91},{21.88,63.71},{89.27,145.47},
{67.70,125.26},{72.69,126.45},{27.77,58.71},{69.38,115.18},
{ 2.59,19.50},{93.93,149.24},{ 4.84,44.09},{19.21,43.14},
{10.58,38.47},{41.51,82.49},{88.02,148.21},{55.22,114.17},
{12.69,79.85},{91.81,160.45},{99.68,162.60},{62.74,103.63},
{10.21,47.93},{ 5.21,28.37},{89.57,148.01},{28.42,54.46},
{61.03,88.74},{73.04,120.93},{71.30,131.03},{ 6.42,27.57},
{82.06,114.82},{50.07,89.66},{76.06,137.34},{69.25,116.77},
{72.62,110.20},{ 8.88,48.25},{24.03,73.68},{52.59,102.23},
{84.77,139.15},{96.75,154.31},{70.15,122.87},{93.18,166.62},
{ 6.17,58.46},{92.22,158.34},{74.61,131.25},{67.46,119.20},
{22.98,57.20},{37.45,86.95},{ 1.97,39.59},{48.29,116.20},
{52.60,109.07},{24.17,56.13},{58.56,116.56},{32.87,65.50},
{ 0.34,43.67},{87.72,142.21},{37.41,62.88},{64.08,127.92},
{42.54,79.79},{35.53,88.48},{ 2.57,23.24},{77.80,122.09},
{ 4.19,35.89},{11.53,28.55},{62.03,82.21},{55.15,93.33},
{63.96,120.79},{73.17,129.77},{57.12,113.60},{32.89,92.86},
{27.89,70.41},{39.21,74.83},{77.58,129.76},{77.44,149.05},
{ 2.87,10.13},{11.11,44.31},{77.46,144.46},{45.30,100.95},
{ 4.69,30.94},{89.47,157.53},{ 7.61,44.77},{23.09,74.16},
{91.49,156.06},{11.20,52.40},{21.47,77.05},{86.58,141.10},
{24.07,57.57},{76.46,137.23},{84.23,120.97},{96.42,157.37},
{98.02,155.25},{99.42,159.62},{12.67,68.56},{36.27,92.72},
{16.08,50.55},{29.05,58.27},{24.65,58.31},{22.59,71.18},
{54.34,115.03},{44.53,96.50},{50.73,109.29},{10.75,45.32},
{62.06,126.81},{12.61,62.62},{21.94,50.52},{86.83,160.25},
{ 9.03,51.65},{73.37,127.89},{54.41,107.85},{95.96,172.35},
{69.67,130.26},{48.73,103.54},{62.30,113.08},{19.39,78.51},
{77.40,124.44},{ 1.63,34.05},{90.02,152.89},{64.47,110.81},
{47.10,103.92},{64.92,116.32},{42.67,73.30},{48.06,76.96},
{35.45,65.22},{98.35,158.55},{17.10,60.38},{29.75,70.75},
{85.75,135.77},{48.27,88.32},{42.05,73.57},{88.04,146.92},
{ 9.72,34.51},{66.61,120.50},{52.60,91.06},{78.80,127.29},
{11.69,48.24},{ 2.59,39.39},{84.26,130.65},{10.82,43.81},
{97.33,173.24},{95.78,157.66},{51.35,81.72},{83.75,136.31},
{72.98,114.92},{70.67,120.19},{90.19,147.54},{39.23,71.88},
{35.17,78.15},{84.31,136.47},{ 4.96,37.06},{13.96,55.78},
{51.70,107.90},{48.21,98.95},{90.61,142.67},{ 4.39,50.63},
{76.09,120.85},{72.86,132.97},{69.73,118.54},{60.33,93.71},
{ 5.07,42.46},{20.73,60.27},{42.45,89.87},{80.47,166.56},
{16.49,68.34},{97.12,153.22},{19.75,50.44},{75.75,121.87},
{16.84,69.99},{16.59,56.79},{22.78,65.78},{78.48,135.35},
{70.14,122.63},{39.36,74.32},{21.60,75.60},{66.51,101.96},
{62.88,107.89},{50.24,88.20},{60.77,106.24},{86.21,148.74},
{ 9.38,44.95},{87.93,141.50},{13.25,49.13},{50.99,106.87},
{84.74,145.24},{91.76,140.41},{81.99,130.91},{58.39,94.20},
{84.02,153.63},{55.36,92.79},{ 2.69,36.03},{65.84,115.04},
{52.09,98.57},{16.14,46.02},{18.37,39.39},{49.37,96.53},
{43.87,80.59},{80.77,130.01},{45.87,98.61},{10.53,37.07},
{46.18,93.03},{24.75,71.96},{85.19,138.24},{66.97,129.60},
{ 2.19,44.38},{68.15,89.75},{60.75,117.13},{15.45,62.88},
{59.82,93.68},{14.43,51.77},{46.38,75.94},{86.99,133.36},
{80.16,115.98},{71.51,113.22},{ 8.43,45.23},{36.84,81.44},
{99.22,143.60},{26.46,59.92},{92.97,161.39},{81.44,120.67},
{ 4.33,31.81},{81.67,130.81},{34.26,76.67},{76.71,150.31},
{77.99,131.09},{45.96,90.46},{25.87,59.28},{51.79,104.69},
{14.95,41.47},{22.07,67.88},{84.04,152.63},{63.10,114.30},
{94.30,147.86},{56.55,108.74},{ 8.29,55.81},{30.76,84.68},
{68.20,133.71},{ 3.29,50.95},{89.16,145.76},{31.10,67.81},
{ 0.88,41.80},{ 7.31,39.34},{51.82,103.09},{13.69,35.21},
{54.12,109.39},{41.60,79.94},{44.78,91.74},{ 0.83,42.82},
{88.24,138.49},{62.16,110.68},{ 7.00,25.60},{80.07,157.43},
{19.82,51.33},{11.07,53.28},{77.57,133.32},{94.77,146.08},
{19.43,67.02},{99.17,165.99},{32.86,70.06},{75.29,142.96},
{37.18,96.22},{37.29,112.25},{84.78,143.59},{93.33,138.44},
{74.44,121.57},{19.51,51.21},{82.81,123.17},{14.24,68.89},
{ 3.64,29.43},{18.79,56.15},{97.75,161.17},{71.42,119.80},
{ 5.68,42.40},{65.07,120.59},{53.09,109.96},{64.88,117.08},
{64.22,114.47},{22.87,69.56},{26.46,54.11},{38.98,79.57},
{89.71,145.31},{50.80,98.09},{50.17,95.25},{22.41,62.02},
{38.83,81.99},{ 4.82,22.56},{15.01,52.96},{41.12,76.82},
{ 5.14,35.46},{40.40,78.76},{76.89,122.53},{99.60,164.21},
{17.56,69.70},{15.47,67.74},{79.33,143.39},{61.38,106.24},
{77.09,145.58},{22.38,57.87},{77.00,146.86},{85.47,139.32},
{78.29,125.77},{56.09,113.82},{29.85,57.95},{68.02,114.98},
{99.80,152.56},{56.13,99.68},{50.87,96.14},{70.92,118.34},
{18.13,52.54},{ 9.65,52.74},{21.14,64.53},{ 5.85,35.25},
{ 3.90,35.84},{57.70,113.74},{32.65,79.44},{30.78,57.23},
{15.93,47.90},{94.54,158.57},{15.99,48.42},{54.03,97.67},
{94.56,145.55},{48.42,92.14},{33.50,75.93},{75.31,134.44},
{ 7.53,33.84},{48.48,81.91},{62.78,135.05},{22.56,62.72},
{31.12,58.49},{30.90,48.51},{48.27,107.01},{29.57,56.55},
{31.84,67.56},{63.07,115.38},{96.22,146.90},{75.96,125.90},
{78.48,132.71},{ 4.47,19.69},{56.83,94.99},{90.74,136.22},
{18.37,45.45},{43.37,88.50},{75.13,127.54},{91.84,139.83},
{66.99,114.37},{35.62,97.15},{14.32,40.17},{35.62,77.26},
{98.70,157.47},{14.60,46.19},{27.33,82.11},{15.48,46.49},
{82.71,139.29},{17.78,59.32},{37.39,90.82},{29.65,66.51},
{14.27,48.09},{38.27,74.89},{69.32,120.78},{ 3.72,41.25},
{ 6.44,62.75},{29.18,70.64},{46.02,71.57},{57.14,115.12},
{45.49,85.00},{38.75,82.52},{58.52,107.65},{54.88,99.55},
{71.98,123.01},{37.71,68.39},{43.32,82.62},{79.11,142.63},
{34.48,81.63},{73.53,130.77},{10.70,50.84},{23.54,68.26},
{63.75,124.89},{ 4.50,31.46},{55.35,99.71},{ 2.26, 1.63},
{65.48,121.04},{65.51,130.58},{74.76,130.05},{61.96,113.45},
{22.75,76.09},{12.11,56.20},{60.19,102.29},{27.93,78.04},
{14.21,40.49},{80.85,130.02},{98.75,163.54},{39.58,101.41},
{75.84,132.72},{ 2.21,14.08},{22.68,65.37},{81.91,138.57},
{71.29,114.89},{90.83,164.22},{94.44,151.59},{82.04,131.07},
{13.66,63.96},{48.38,87.90},{46.38,87.25},{22.28,63.31},
{ 2.87,32.37},{10.02,58.24},{49.16,100.16},{86.62,135.56},
{39.26,90.93},{78.34,133.91},{82.53,139.45},{59.77,112.37},
{70.98,130.76},{66.60,114.24},{35.82,90.20},{30.53,71.96},
{69.51,139.87},{94.56,173.33},{21.42,59.83},{58.70,111.28},
{37.44,94.48},{31.15,63.11},{23.53,63.70},{ 5.11,63.57},
{55.81,123.51},{15.80,42.37},{83.53,149.47},{80.35,153.86},
{37.73,102.20},{95.31,133.18},{97.78,155.11},{59.12,116.15},
{10.35,41.60},{65.22,107.71},{54.83,108.60},{91.01,151.20},
{78.63,147.74},{51.16,110.76},{70.28,106.57},{70.08,129.60},
{47.41,99.55},{ 0.52,21.99},{54.85,94.95},{93.87,153.82},
{40.84,67.40},{57.23,116.36},{76.08,140.72},{62.88,107.11},
{23.52,58.75},{86.76,141.34},{76.61,131.49},{69.97,129.62},
{ 6.16,24.48},{61.86,114.65},{30.69,88.16},{89.57,147.12},
{42.47,86.94},{29.92,69.93},{36.03,83.92},{90.74,139.60},
{32.22,73.11},{10.79,57.18},{28.87,59.02},{47.85,109.31},
{44.50,87.53},{10.85,44.35},{45.82,85.17},{43.53,93.85},
{57.17,103.94},{86.07,142.47},{97.68,151.83},{85.74,147.44},
{ 4.78,35.45},{97.96,154.43},{99.31,154.34},{ 6.00,45.64},
{56.05,115.48},{24.98,66.31},{86.32,152.64},{ 1.08,40.11},
{42.92,80.64},{79.59,132.72},{71.87,107.43},{19.35,47.20},
{38.09,92.45},{18.94,60.66},{30.15,60.80},{19.43,53.20},
{63.91,129.49},{54.38,113.42},{42.06,91.30},{ 1.98,41.20},
{ 5.47,23.84},{84.77,133.67},{ 4.93,38.23},{84.19,147.77},
{38.91,67.06},{25.87,60.48},{62.61,110.60},{28.58,84.46},
{92.31,152.06},{61.23,92.60},{82.96,125.80},{15.59,59.43},
{34.88,70.07},{13.29,35.70},{30.92,61.47},{93.31,141.05},
{68.91,126.91},{26.63,59.73},{37.41,72.67},{15.63,44.98},
{27.66,76.55},{99.90,164.33},{87.52,144.03},{ 4.42,29.79},
{30.91,59.24},{ 6.37,47.74},{78.59,133.51},{50.65,94.09},
{69.79,136.05},{60.30,120.16},{53.64,109.72},{ 9.80,62.05},
{84.72,134.75},{90.92,131.16},{70.20,126.34},{19.16,45.57},
{52.85,98.88},{69.27,123.71},{99.94,161.32},{92.46,161.95},
{94.75,159.49},{72.82,126.08},{92.27,145.98},{ 5.93,28.08},
{33.26,72.26},{ 2.12,39.38},{12.99,47.88},{57.53,112.68},
{46.70,94.90},{81.13,126.83},{12.80,69.03},{30.96,68.96},
{24.18,59.11},{ 2.27,41.30},{49.74,82.50},{62.55,126.09},
{48.84,95.14},{72.25,120.77},{ 3.22,24.46},{99.21,167.11},
{87.37,133.05},{82.33,144.86},{95.53,163.89},{94.11,145.19},
{13.11,35.64},{59.44,116.19},{24.27,62.07},{91.53,145.26},
{46.43,82.98},{99.89,151.74},{66.41,102.58},{56.46,114.65},
{62.68,99.59},{77.05,132.15},{47.38,81.81},{64.85,107.58},
{91.24,145.20},{65.69,126.13},{66.98,136.61},{ 4.95,29.94},
{75.39,156.04},{ 7.55,35.93},{29.83,62.85},{91.79,140.73},
{66.56,129.57},{36.16,67.39},{41.25,86.72},{94.82,156.68},
{24.15,66.85},{44.28,97.11},{31.82,69.41},{13.75,53.07},
{81.76,135.27},{23.72,77.94},{24.53,53.47},{23.66,67.62},
{21.90,56.35},{31.58,75.84},{31.28,70.78},{42.78,78.57},
{12.46,42.74},{74.68,148.57},{ 2.58,19.05},{91.39,147.46},
{56.50,121.13},{21.06,54.11},{27.09,57.00},{46.82,87.12},
{45.76,90.04},{85.87,149.19},{40.52,84.52},{72.24,118.46},
{ 3.34,27.96},{24.68,51.90},{45.54,98.75},{ 9.05,54.03},
{84.14,127.96},{73.69,129.22},{22.43,56.43},{20.47,67.18},
{21.36,81.39},{88.61,147.49},{88.78,126.41},{36.54,90.18},
{23.39,47.90},{16.16,53.46},{34.88,76.16},{75.58,140.32},
{33.45,88.12},{89.01,142.71},{46.57,96.54},{25.00,56.85},
{99.78,171.85},{82.58,152.64},{13.94,52.87},{46.61,112.56},
{64.76,116.36},{31.86,63.96},{69.61,120.21},{53.72,100.82},
{81.88,142.33},{29.39,66.57},{86.67,143.51},{ 4.13,31.53},
{22.34,58.49},{64.54,116.47},{68.08,129.02},{34.02,98.04},
{55.23,104.11},{19.59,64.50},{84.85,156.51},{94.41,142.74},
{12.49,49.71},{27.81,63.84},{53.94,107.71},{92.25,147.58},
{87.89,148.18},{21.02,69.44},{57.05,97.23},{48.46,94.85},
{ 3.81,37.26},{89.90,156.01},{57.31,88.22},{78.39,140.66},
{77.93,149.82},{23.15,62.96},{25.77,55.58},{74.11,141.26},
{21.31,64.10},{46.04,79.80},{65.78,117.56},{41.04,79.20},
{94.38,143.18},{81.52,133.84},{86.12,146.57},{39.38,85.36},
{63.01,110.79},{42.25,92.03},{48.83,86.99},{19.09,65.04}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c){
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
//Device variables
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_dc
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_data
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
//Copy memory for dm to d_dm
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
//Copy memory for dc to d_dc
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
//Copy memory for data to d_data
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
//Host variable storing the array returned from the kernel function.
double h_error_sum_arr[1000];
//Stores the total sum of the values from the error sum array.
double error_sum_total;
//Stores the mean of the total sum of the error sums.
double error_sum_mean;
//Call the rms_error function using 100 blocks and 10 threads.
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
//Copy memory for d_error_sum_arr
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
//Loop through the error sum array returned from the kernel function
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
//Calculate the mean for the error sum.
error_sum_mean = error_sum_total / n_data;
//Calculate the square root for the error sum mean.
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
//printf("best m,c is %lf,%lf with error %lf in direction %d\n",
//dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//Free memory for d_dm
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_dc
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_data
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_error_sum_arr
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
//Get the system time after we have run the linear regression function.
clock_gettime(CLOCK_MONOTONIC, &finish);
//Calculate the time spent between the start time and end time.
time_difference(&start, &finish, &time_elapsed);
//Output the time spent running the program.
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <hip/hip_runtime_api.h>
#include <time.h>
#include <errno.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* nvcc -o culinear culinear.cu -lm
*
* To run:
* ./culinear
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{83.12,144.47},{65.27,114.80},{65.17,89.01},{68.57,122.90},
{77.57,136.93},{79.84,146.56},{84.42,123.51},{65.34,106.22},
{82.20,120.33},{65.35,142.11},{24.06,53.94},{35.61,87.53},
{ 2.02,22.75},{44.01,89.41},{85.58,141.52},{54.14,88.90},
{35.94,84.11},{22.86,45.76},{75.88,111.25},{54.49,105.83},
{94.65,139.29},{74.97,140.29},{46.31,94.00},{48.12,108.88},
{99.29,146.97},{86.76,135.87},{70.11,120.41},{ 5.01,35.32},
{84.56,147.46},{ 0.19,39.41},{13.16,49.52},{34.11,93.57},
{78.99,108.24},{38.38,81.59},{79.20,115.25},{84.38,146.00},
{92.49,166.93},{19.70,61.69},{23.14,82.49},{13.97,44.80},
{ 2.30,51.01},{15.33,34.49},{64.82,106.29},{39.99,76.65},
{85.93,162.61},{95.23,172.35},{11.05,60.11},{53.84,106.95},
{71.11,135.65},{33.67,88.76},{ 2.41,41.07},{52.19,108.83},
{30.21,57.75},{69.24,132.80},{96.44,157.86},{87.85,133.87},
{15.51,56.56},{53.81,106.32},{50.03,77.59},{77.05,136.93},
{37.29,81.30},{41.74,95.49},{53.91,109.94},{41.20,67.23},
{76.87,124.78},{39.99,82.29},{21.12,55.37},{34.62,65.13},
{20.91,51.88},{76.70,118.05},{ 4.76,45.66},{ 2.29,26.88},
{27.19,59.89},{ 6.82,36.36},{32.36,78.26},{48.72,99.14},
{80.55,127.01},{91.69,150.94},{ 9.68,29.41},{90.74,165.08},
{35.58,70.65},{90.86,166.10},{99.52,157.98},{15.66,47.55},
{45.23,88.34},{63.46,112.27},{64.21,115.27},{86.10,146.87},
{72.98,119.38},{31.78,67.38},{73.97,135.76},{24.43,70.15},
{74.86,135.38},{18.98,50.05},{49.32,106.88},{93.39,154.91},
{ 1.29,39.63},{10.92,61.03},{35.04,64.55},{57.66,111.38},
{42.04,96.64},{ 8.79,40.02},{92.43,147.28},{49.08,85.76},
{30.62,85.66},{51.41,97.98},{88.25,141.92},{27.07,61.14},
{34.88,83.12},{90.82,151.63},{55.07,106.28},{25.73,62.03},
{34.53,63.56},{ 6.61,34.03},{15.62,50.85},{15.32,67.76},
{69.03,114.54},{32.46,56.91},{69.37,123.90},{10.78,57.26},
{10.53,31.37},{53.23,109.49},{ 7.26,44.18},{15.90,63.21},
{ 8.53,36.85},{57.16,109.43},{80.74,122.57},{ 7.25,44.88},
{87.53,144.92},{90.70,165.27},{61.17,108.23},{53.14,111.23},
{94.75,138.45},{ 7.60,42.08},{18.83,76.22},{13.48,71.77},
{ 0.66,39.45},{35.94,87.05},{88.24,169.85},{22.00,70.26},
{93.97,144.15},{93.09,164.94},{41.88,90.98},{35.68,63.90},
{93.69,160.24},{22.20,53.28},{79.69,118.82},{27.57,57.90},
{24.98,72.67},{86.50,133.90},{40.28,86.21},{14.60,48.01},
{72.54,139.19},{55.30,79.54},{ 3.81,33.25},{ 5.68,53.66},
{17.39,44.50},{82.43,123.95},{26.21,57.88},{50.93,102.91},
{41.54,78.81},{36.41,65.17},{39.67,84.96},{74.19,130.02},
{79.23,147.24},{ 5.43,43.11},{59.04,92.40},{ 4.77,21.65},
{62.12,113.31},{80.55,133.55},{42.32,75.65},{83.01,131.90},
{39.06,88.34},{98.75,175.85},{31.87,62.41},{58.73,96.47},
{10.18,53.65},{12.05,47.02},{77.15,116.12},{17.71,57.77},
{82.98,134.75},{18.11,37.59},{32.30,74.54},{81.96,143.75},
{11.77,47.90},{24.43,78.01},{60.70,116.42},{72.05,123.46},
{42.29,75.74},{ 9.64,53.11},{ 3.20,41.20},{75.68,127.51},
{ 7.67,38.82},{ 9.55,45.92},{ 6.22,55.99},{15.01,53.21},
{ 2.50,17.99},{30.97,64.75},{15.92,58.06},{39.77,79.31},
{30.30,80.76},{75.71,133.13},{18.68,54.70},{14.33,48.80},
{65.29,112.12},{85.98,156.29},{68.20,115.16},{76.18,127.58},
{12.05,52.54},{ 1.45,26.32},{51.07,91.58},{70.45,131.48},
{46.34,110.44},{86.40,140.67},{62.22,107.05},{39.48,96.73},
{59.28,114.38},{85.33,140.73},{21.85,63.28},{55.32,96.88},
{54.90,99.09},{81.45,134.43},{94.99,152.75},{60.61,91.15},
{85.61,132.87},{54.72,105.30},{ 9.85,37.72},{85.74,133.99},
{30.19,79.45},{87.18,142.65},{27.50,68.06},{48.21,81.13},
{89.60,139.38},{20.45,61.03},{60.56,101.17},{88.41,139.78},
{84.60,146.42},{25.34,45.91},{32.69,104.43},{13.63,53.03},
{80.26,124.62},{97.15,147.49},{99.16,177.78},{81.31,127.71},
{88.58,136.47},{24.77,59.82},{96.93,160.71},{51.92,102.46},
{27.33,67.99},{92.40,156.65},{87.22,135.40},{ 8.66,33.01},
{79.02,137.74},{92.16,158.93},{70.14,117.38},{31.39,83.34},
{98.54,150.47},{81.39,145.14},{32.19,90.89},{49.53,82.60},
{83.19,147.94},{65.68,121.26},{19.73,73.98},{19.26,39.84},
{68.81,127.82},{21.93,64.48},{22.98,67.44},{ 8.19,35.21},
{83.08,134.02},{69.30,124.24},{19.40,46.96},{64.13,120.93},
{61.91,118.90},{31.92,72.59},{97.06,157.02},{69.68,131.99},
{64.02,120.20},{86.75,141.47},{48.62,98.35},{62.34,118.54},
{23.10,73.71},{ 3.22,24.94},{47.03,98.28},{86.10,129.82},
{17.62,41.43},{20.60,62.70},{25.56,79.02},{98.74,168.44},
{25.25,68.33},{ 0.26,17.74},{73.72,125.70},{62.70,101.61},
{86.10,144.15},{ 7.59,38.21},{65.71,118.18},{57.83,104.28},
{48.00,91.86},{59.53,110.64},{75.08,131.55},{66.96,113.45},
{23.44,41.93},{ 7.22,33.51},{22.13,70.49},{20.24,70.87},
{36.57,59.85},{22.89,50.80},{88.83,128.03},{54.08,109.80},
{20.87,65.63},{80.15,132.14},{91.71,142.11},{12.37,46.56},
{31.09,82.71},{ 9.54,28.65},{16.74,44.18},{37.07,73.24},
{ 1.67,41.10},{ 0.29,12.09},{34.05,80.10},{64.07,112.30},
{64.66,110.15},{21.74,62.28},{74.39,129.73},{53.67,90.13},
{75.14,147.83},{42.98,82.02},{66.29,121.10},{57.34,102.40},
{96.75,152.13},{13.36,48.35},{21.05,73.53},{81.77,135.48},
{88.21,171.75},{51.53,98.91},{21.88,63.71},{89.27,145.47},
{67.70,125.26},{72.69,126.45},{27.77,58.71},{69.38,115.18},
{ 2.59,19.50},{93.93,149.24},{ 4.84,44.09},{19.21,43.14},
{10.58,38.47},{41.51,82.49},{88.02,148.21},{55.22,114.17},
{12.69,79.85},{91.81,160.45},{99.68,162.60},{62.74,103.63},
{10.21,47.93},{ 5.21,28.37},{89.57,148.01},{28.42,54.46},
{61.03,88.74},{73.04,120.93},{71.30,131.03},{ 6.42,27.57},
{82.06,114.82},{50.07,89.66},{76.06,137.34},{69.25,116.77},
{72.62,110.20},{ 8.88,48.25},{24.03,73.68},{52.59,102.23},
{84.77,139.15},{96.75,154.31},{70.15,122.87},{93.18,166.62},
{ 6.17,58.46},{92.22,158.34},{74.61,131.25},{67.46,119.20},
{22.98,57.20},{37.45,86.95},{ 1.97,39.59},{48.29,116.20},
{52.60,109.07},{24.17,56.13},{58.56,116.56},{32.87,65.50},
{ 0.34,43.67},{87.72,142.21},{37.41,62.88},{64.08,127.92},
{42.54,79.79},{35.53,88.48},{ 2.57,23.24},{77.80,122.09},
{ 4.19,35.89},{11.53,28.55},{62.03,82.21},{55.15,93.33},
{63.96,120.79},{73.17,129.77},{57.12,113.60},{32.89,92.86},
{27.89,70.41},{39.21,74.83},{77.58,129.76},{77.44,149.05},
{ 2.87,10.13},{11.11,44.31},{77.46,144.46},{45.30,100.95},
{ 4.69,30.94},{89.47,157.53},{ 7.61,44.77},{23.09,74.16},
{91.49,156.06},{11.20,52.40},{21.47,77.05},{86.58,141.10},
{24.07,57.57},{76.46,137.23},{84.23,120.97},{96.42,157.37},
{98.02,155.25},{99.42,159.62},{12.67,68.56},{36.27,92.72},
{16.08,50.55},{29.05,58.27},{24.65,58.31},{22.59,71.18},
{54.34,115.03},{44.53,96.50},{50.73,109.29},{10.75,45.32},
{62.06,126.81},{12.61,62.62},{21.94,50.52},{86.83,160.25},
{ 9.03,51.65},{73.37,127.89},{54.41,107.85},{95.96,172.35},
{69.67,130.26},{48.73,103.54},{62.30,113.08},{19.39,78.51},
{77.40,124.44},{ 1.63,34.05},{90.02,152.89},{64.47,110.81},
{47.10,103.92},{64.92,116.32},{42.67,73.30},{48.06,76.96},
{35.45,65.22},{98.35,158.55},{17.10,60.38},{29.75,70.75},
{85.75,135.77},{48.27,88.32},{42.05,73.57},{88.04,146.92},
{ 9.72,34.51},{66.61,120.50},{52.60,91.06},{78.80,127.29},
{11.69,48.24},{ 2.59,39.39},{84.26,130.65},{10.82,43.81},
{97.33,173.24},{95.78,157.66},{51.35,81.72},{83.75,136.31},
{72.98,114.92},{70.67,120.19},{90.19,147.54},{39.23,71.88},
{35.17,78.15},{84.31,136.47},{ 4.96,37.06},{13.96,55.78},
{51.70,107.90},{48.21,98.95},{90.61,142.67},{ 4.39,50.63},
{76.09,120.85},{72.86,132.97},{69.73,118.54},{60.33,93.71},
{ 5.07,42.46},{20.73,60.27},{42.45,89.87},{80.47,166.56},
{16.49,68.34},{97.12,153.22},{19.75,50.44},{75.75,121.87},
{16.84,69.99},{16.59,56.79},{22.78,65.78},{78.48,135.35},
{70.14,122.63},{39.36,74.32},{21.60,75.60},{66.51,101.96},
{62.88,107.89},{50.24,88.20},{60.77,106.24},{86.21,148.74},
{ 9.38,44.95},{87.93,141.50},{13.25,49.13},{50.99,106.87},
{84.74,145.24},{91.76,140.41},{81.99,130.91},{58.39,94.20},
{84.02,153.63},{55.36,92.79},{ 2.69,36.03},{65.84,115.04},
{52.09,98.57},{16.14,46.02},{18.37,39.39},{49.37,96.53},
{43.87,80.59},{80.77,130.01},{45.87,98.61},{10.53,37.07},
{46.18,93.03},{24.75,71.96},{85.19,138.24},{66.97,129.60},
{ 2.19,44.38},{68.15,89.75},{60.75,117.13},{15.45,62.88},
{59.82,93.68},{14.43,51.77},{46.38,75.94},{86.99,133.36},
{80.16,115.98},{71.51,113.22},{ 8.43,45.23},{36.84,81.44},
{99.22,143.60},{26.46,59.92},{92.97,161.39},{81.44,120.67},
{ 4.33,31.81},{81.67,130.81},{34.26,76.67},{76.71,150.31},
{77.99,131.09},{45.96,90.46},{25.87,59.28},{51.79,104.69},
{14.95,41.47},{22.07,67.88},{84.04,152.63},{63.10,114.30},
{94.30,147.86},{56.55,108.74},{ 8.29,55.81},{30.76,84.68},
{68.20,133.71},{ 3.29,50.95},{89.16,145.76},{31.10,67.81},
{ 0.88,41.80},{ 7.31,39.34},{51.82,103.09},{13.69,35.21},
{54.12,109.39},{41.60,79.94},{44.78,91.74},{ 0.83,42.82},
{88.24,138.49},{62.16,110.68},{ 7.00,25.60},{80.07,157.43},
{19.82,51.33},{11.07,53.28},{77.57,133.32},{94.77,146.08},
{19.43,67.02},{99.17,165.99},{32.86,70.06},{75.29,142.96},
{37.18,96.22},{37.29,112.25},{84.78,143.59},{93.33,138.44},
{74.44,121.57},{19.51,51.21},{82.81,123.17},{14.24,68.89},
{ 3.64,29.43},{18.79,56.15},{97.75,161.17},{71.42,119.80},
{ 5.68,42.40},{65.07,120.59},{53.09,109.96},{64.88,117.08},
{64.22,114.47},{22.87,69.56},{26.46,54.11},{38.98,79.57},
{89.71,145.31},{50.80,98.09},{50.17,95.25},{22.41,62.02},
{38.83,81.99},{ 4.82,22.56},{15.01,52.96},{41.12,76.82},
{ 5.14,35.46},{40.40,78.76},{76.89,122.53},{99.60,164.21},
{17.56,69.70},{15.47,67.74},{79.33,143.39},{61.38,106.24},
{77.09,145.58},{22.38,57.87},{77.00,146.86},{85.47,139.32},
{78.29,125.77},{56.09,113.82},{29.85,57.95},{68.02,114.98},
{99.80,152.56},{56.13,99.68},{50.87,96.14},{70.92,118.34},
{18.13,52.54},{ 9.65,52.74},{21.14,64.53},{ 5.85,35.25},
{ 3.90,35.84},{57.70,113.74},{32.65,79.44},{30.78,57.23},
{15.93,47.90},{94.54,158.57},{15.99,48.42},{54.03,97.67},
{94.56,145.55},{48.42,92.14},{33.50,75.93},{75.31,134.44},
{ 7.53,33.84},{48.48,81.91},{62.78,135.05},{22.56,62.72},
{31.12,58.49},{30.90,48.51},{48.27,107.01},{29.57,56.55},
{31.84,67.56},{63.07,115.38},{96.22,146.90},{75.96,125.90},
{78.48,132.71},{ 4.47,19.69},{56.83,94.99},{90.74,136.22},
{18.37,45.45},{43.37,88.50},{75.13,127.54},{91.84,139.83},
{66.99,114.37},{35.62,97.15},{14.32,40.17},{35.62,77.26},
{98.70,157.47},{14.60,46.19},{27.33,82.11},{15.48,46.49},
{82.71,139.29},{17.78,59.32},{37.39,90.82},{29.65,66.51},
{14.27,48.09},{38.27,74.89},{69.32,120.78},{ 3.72,41.25},
{ 6.44,62.75},{29.18,70.64},{46.02,71.57},{57.14,115.12},
{45.49,85.00},{38.75,82.52},{58.52,107.65},{54.88,99.55},
{71.98,123.01},{37.71,68.39},{43.32,82.62},{79.11,142.63},
{34.48,81.63},{73.53,130.77},{10.70,50.84},{23.54,68.26},
{63.75,124.89},{ 4.50,31.46},{55.35,99.71},{ 2.26, 1.63},
{65.48,121.04},{65.51,130.58},{74.76,130.05},{61.96,113.45},
{22.75,76.09},{12.11,56.20},{60.19,102.29},{27.93,78.04},
{14.21,40.49},{80.85,130.02},{98.75,163.54},{39.58,101.41},
{75.84,132.72},{ 2.21,14.08},{22.68,65.37},{81.91,138.57},
{71.29,114.89},{90.83,164.22},{94.44,151.59},{82.04,131.07},
{13.66,63.96},{48.38,87.90},{46.38,87.25},{22.28,63.31},
{ 2.87,32.37},{10.02,58.24},{49.16,100.16},{86.62,135.56},
{39.26,90.93},{78.34,133.91},{82.53,139.45},{59.77,112.37},
{70.98,130.76},{66.60,114.24},{35.82,90.20},{30.53,71.96},
{69.51,139.87},{94.56,173.33},{21.42,59.83},{58.70,111.28},
{37.44,94.48},{31.15,63.11},{23.53,63.70},{ 5.11,63.57},
{55.81,123.51},{15.80,42.37},{83.53,149.47},{80.35,153.86},
{37.73,102.20},{95.31,133.18},{97.78,155.11},{59.12,116.15},
{10.35,41.60},{65.22,107.71},{54.83,108.60},{91.01,151.20},
{78.63,147.74},{51.16,110.76},{70.28,106.57},{70.08,129.60},
{47.41,99.55},{ 0.52,21.99},{54.85,94.95},{93.87,153.82},
{40.84,67.40},{57.23,116.36},{76.08,140.72},{62.88,107.11},
{23.52,58.75},{86.76,141.34},{76.61,131.49},{69.97,129.62},
{ 6.16,24.48},{61.86,114.65},{30.69,88.16},{89.57,147.12},
{42.47,86.94},{29.92,69.93},{36.03,83.92},{90.74,139.60},
{32.22,73.11},{10.79,57.18},{28.87,59.02},{47.85,109.31},
{44.50,87.53},{10.85,44.35},{45.82,85.17},{43.53,93.85},
{57.17,103.94},{86.07,142.47},{97.68,151.83},{85.74,147.44},
{ 4.78,35.45},{97.96,154.43},{99.31,154.34},{ 6.00,45.64},
{56.05,115.48},{24.98,66.31},{86.32,152.64},{ 1.08,40.11},
{42.92,80.64},{79.59,132.72},{71.87,107.43},{19.35,47.20},
{38.09,92.45},{18.94,60.66},{30.15,60.80},{19.43,53.20},
{63.91,129.49},{54.38,113.42},{42.06,91.30},{ 1.98,41.20},
{ 5.47,23.84},{84.77,133.67},{ 4.93,38.23},{84.19,147.77},
{38.91,67.06},{25.87,60.48},{62.61,110.60},{28.58,84.46},
{92.31,152.06},{61.23,92.60},{82.96,125.80},{15.59,59.43},
{34.88,70.07},{13.29,35.70},{30.92,61.47},{93.31,141.05},
{68.91,126.91},{26.63,59.73},{37.41,72.67},{15.63,44.98},
{27.66,76.55},{99.90,164.33},{87.52,144.03},{ 4.42,29.79},
{30.91,59.24},{ 6.37,47.74},{78.59,133.51},{50.65,94.09},
{69.79,136.05},{60.30,120.16},{53.64,109.72},{ 9.80,62.05},
{84.72,134.75},{90.92,131.16},{70.20,126.34},{19.16,45.57},
{52.85,98.88},{69.27,123.71},{99.94,161.32},{92.46,161.95},
{94.75,159.49},{72.82,126.08},{92.27,145.98},{ 5.93,28.08},
{33.26,72.26},{ 2.12,39.38},{12.99,47.88},{57.53,112.68},
{46.70,94.90},{81.13,126.83},{12.80,69.03},{30.96,68.96},
{24.18,59.11},{ 2.27,41.30},{49.74,82.50},{62.55,126.09},
{48.84,95.14},{72.25,120.77},{ 3.22,24.46},{99.21,167.11},
{87.37,133.05},{82.33,144.86},{95.53,163.89},{94.11,145.19},
{13.11,35.64},{59.44,116.19},{24.27,62.07},{91.53,145.26},
{46.43,82.98},{99.89,151.74},{66.41,102.58},{56.46,114.65},
{62.68,99.59},{77.05,132.15},{47.38,81.81},{64.85,107.58},
{91.24,145.20},{65.69,126.13},{66.98,136.61},{ 4.95,29.94},
{75.39,156.04},{ 7.55,35.93},{29.83,62.85},{91.79,140.73},
{66.56,129.57},{36.16,67.39},{41.25,86.72},{94.82,156.68},
{24.15,66.85},{44.28,97.11},{31.82,69.41},{13.75,53.07},
{81.76,135.27},{23.72,77.94},{24.53,53.47},{23.66,67.62},
{21.90,56.35},{31.58,75.84},{31.28,70.78},{42.78,78.57},
{12.46,42.74},{74.68,148.57},{ 2.58,19.05},{91.39,147.46},
{56.50,121.13},{21.06,54.11},{27.09,57.00},{46.82,87.12},
{45.76,90.04},{85.87,149.19},{40.52,84.52},{72.24,118.46},
{ 3.34,27.96},{24.68,51.90},{45.54,98.75},{ 9.05,54.03},
{84.14,127.96},{73.69,129.22},{22.43,56.43},{20.47,67.18},
{21.36,81.39},{88.61,147.49},{88.78,126.41},{36.54,90.18},
{23.39,47.90},{16.16,53.46},{34.88,76.16},{75.58,140.32},
{33.45,88.12},{89.01,142.71},{46.57,96.54},{25.00,56.85},
{99.78,171.85},{82.58,152.64},{13.94,52.87},{46.61,112.56},
{64.76,116.36},{31.86,63.96},{69.61,120.21},{53.72,100.82},
{81.88,142.33},{29.39,66.57},{86.67,143.51},{ 4.13,31.53},
{22.34,58.49},{64.54,116.47},{68.08,129.02},{34.02,98.04},
{55.23,104.11},{19.59,64.50},{84.85,156.51},{94.41,142.74},
{12.49,49.71},{27.81,63.84},{53.94,107.71},{92.25,147.58},
{87.89,148.18},{21.02,69.44},{57.05,97.23},{48.46,94.85},
{ 3.81,37.26},{89.90,156.01},{57.31,88.22},{78.39,140.66},
{77.93,149.82},{23.15,62.96},{25.77,55.58},{74.11,141.26},
{21.31,64.10},{46.04,79.80},{65.78,117.56},{41.04,79.20},
{94.38,143.18},{81.52,133.84},{86.12,146.57},{39.38,85.36},
{63.01,110.79},{42.25,92.03},{48.83,86.99},{19.09,65.04}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c){
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipError_t error;
//Device variables
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
error = hipMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "hipMalloc on d_dm returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Allocate memory for d_dc
error = hipMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "hipMalloc on d_dc returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
error = hipMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "hipMalloc on d_error_sum_arr returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Allocate memory for d_data
error = hipMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "hipMalloc on d_data returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
//Copy memory for dm to d_dm
error = hipMemcpy(d_dm, dm, (sizeof(double) * 8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_dm returned %d %s\n", error,
hipGetErrorString(error));
}
//Copy memory for dc to d_dc
error = hipMemcpy(d_dc, dc, (sizeof(double) * 8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_dc returned %d %s\n", error,
hipGetErrorString(error));
}
//Copy memory for data to d_data
error = hipMemcpy(d_data, data, sizeof(data), hipMemcpyHostToDevice);
if(error){
fprintf(stderr, "hipMemcpy to d_data returned %d %s\n", error,
hipGetErrorString(error));
}
for(i=0;i<8;i++) {
//Host variable storing the array returned from the kernel function.
double h_error_sum_arr[1000];
//Stores the total sum of the values from the error sum array.
double error_sum_total;
//Stores the mean of the total sum of the error sums.
double error_sum_mean;
//Call the rms_error function using 100 blocks and 10 threads.
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
hipDeviceSynchronize();
//Copy memory for d_error_sum_arr
error = hipMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), hipMemcpyDeviceToHost);
if(error){
fprintf(stderr, "hipMemcpy to error_sum returned %d %s\n", error,
hipGetErrorString(error));
}
//Loop through the error sum array returned from the kernel function
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
//Calculate the mean for the error sum.
error_sum_mean = error_sum_total / n_data;
//Calculate the square root for the error sum mean.
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
//printf("best m,c is %lf,%lf with error %lf in direction %d\n",
//dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//Free memory for d_dm
error = hipFree(d_dm);
if(error){
fprintf(stderr, "hipFree on d_dm returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Free memory for d_dc
error = hipFree(d_dc);
if(error){
fprintf(stderr, "hipFree on d_dc returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Free memory for d_data
error = hipFree(d_data);
if(error){
fprintf(stderr, "hipFree on d_data returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
//Free memory for d_error_sum_arr
error = hipFree(d_error_sum_arr);
if(error){
fprintf(stderr, "hipFree on d_error_sum_arr returned %d %s\n", error,
hipGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
//Get the system time after we have run the linear regression function.
clock_gettime(CLOCK_MONOTONIC, &finish);
//Calculate the time spent between the start time and end time.
time_difference(&start, &finish, &time_elapsed);
//Output the time spent running the program.
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <iostream>
#include <math.h>
#include <vector>
#include <string>
#include <algorithm>
#include <random>
#include <chrono>
#include <stdio.h>
#define NB_THREADS 1024
#define NB_NUMBERS 200
#define NB_EXPERIMENTS 100
void print_vector(int* array, int k) {
for(size_t i=0; i < k; i++) {
printf("%d ", array[i]);
}
printf("\n");
}
bool compare_vectors(int *v1, std::vector<int> v2) {
for(int i=0; i < v2.size(); i++) {
if (v1[i] != v2[i]) {
printf("wrong at %d %d vs %d\n", i, v1[i], v2[i]);
return false;
}
}
return true;
}
std::vector<int> create_vector(int N) {
std::vector<int> res;
for(int i=0; i < N; i++) {
res.push_back(i);
}
return res;
}
__global__
void bubble_sort(int *A, int n) {
__shared__ int end[NB_THREADS]; // termination condition for each thread
__shared__ int race[NB_NUMBERS]; // race condition for each element of input array
for (int u=0; u<NB_THREADS; u++) { end[u] = 0; }
for (int v=0; v<NB_NUMBERS; v++) { race[v] = 1; }
int temp;
int index = threadIdx.x;
while (1) {
end[index] = 1;
for (int i=0; i<n; i++) {
while (1) {
if ((race[i] == 1 && race[i-1] == 1) || (i == 0 && race[i] == 1)) {
if (A[i-1] > A[i]) {
// Block race condition
race[i] = 0;
race[i-1] = 0;
// Swap
temp = A[i-1];
A[i-1] = A[i];
A[i] = temp;
// Release race condition
end[index] = 0;
race[i] = 1;
race[i-1] = 1;
}
break;
}
}
}
if (end[index] == 1) { break; }
}
}
int main(void) {
for (int e=0; e<NB_EXPERIMENTS; e++) {
// Init random input array
int range = NB_NUMBERS;
std::vector<int> v = create_vector(range);
std::vector<int> v_orig = create_vector(range);
std::random_shuffle ( v.begin(), v.end() );
std::random_shuffle ( v.begin(), v.end() );
int *x;
cudaMallocManaged(&x, range*sizeof(int));
for (int i = 0; i < range; i++) {
x[i] = v[i];
printf("%d %d\n", x[i], v[i]);
}
print_vector(x, range);
// Sort
bubble_sort<<<1, NB_THREADS>>>(x, range);
cudaDeviceSynchronize();
// Check output
if (!compare_vectors(x, v_orig)) {
printf("%s\n", "Wrong algorithm");
print_vector(x, range);
break;
} else { printf("%s %d\n", "True algorithm", e); }
// Free memory
cudaFree(x);
}
}
|
#include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
#include <vector>
#include <string>
#include <algorithm>
#include <random>
#include <chrono>
#include <stdio.h>
#define NB_THREADS 1024
#define NB_NUMBERS 200
#define NB_EXPERIMENTS 100
void print_vector(int* array, int k) {
for(size_t i=0; i < k; i++) {
printf("%d ", array[i]);
}
printf("\n");
}
bool compare_vectors(int *v1, std::vector<int> v2) {
for(int i=0; i < v2.size(); i++) {
if (v1[i] != v2[i]) {
printf("wrong at %d %d vs %d\n", i, v1[i], v2[i]);
return false;
}
}
return true;
}
std::vector<int> create_vector(int N) {
std::vector<int> res;
for(int i=0; i < N; i++) {
res.push_back(i);
}
return res;
}
__global__
void bubble_sort(int *A, int n) {
__shared__ int end[NB_THREADS]; // termination condition for each thread
__shared__ int race[NB_NUMBERS]; // race condition for each element of input array
for (int u=0; u<NB_THREADS; u++) { end[u] = 0; }
for (int v=0; v<NB_NUMBERS; v++) { race[v] = 1; }
int temp;
int index = threadIdx.x;
while (1) {
end[index] = 1;
for (int i=0; i<n; i++) {
while (1) {
if ((race[i] == 1 && race[i-1] == 1) || (i == 0 && race[i] == 1)) {
if (A[i-1] > A[i]) {
// Block race condition
race[i] = 0;
race[i-1] = 0;
// Swap
temp = A[i-1];
A[i-1] = A[i];
A[i] = temp;
// Release race condition
end[index] = 0;
race[i] = 1;
race[i-1] = 1;
}
break;
}
}
}
if (end[index] == 1) { break; }
}
}
int main(void) {
for (int e=0; e<NB_EXPERIMENTS; e++) {
// Init random input array
int range = NB_NUMBERS;
std::vector<int> v = create_vector(range);
std::vector<int> v_orig = create_vector(range);
std::random_shuffle ( v.begin(), v.end() );
std::random_shuffle ( v.begin(), v.end() );
int *x;
hipMallocManaged(&x, range*sizeof(int));
for (int i = 0; i < range; i++) {
x[i] = v[i];
printf("%d %d\n", x[i], v[i]);
}
print_vector(x, range);
// Sort
bubble_sort<<<1, NB_THREADS>>>(x, range);
hipDeviceSynchronize();
// Check output
if (!compare_vectors(x, v_orig)) {
printf("%s\n", "Wrong algorithm");
print_vector(x, range);
break;
} else { printf("%s %d\n", "True algorithm", e); }
// Free memory
hipFree(x);
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <sys/time.h>
#include <cooperative_groups.h>
//#include <helper_cuda.h>
#define N_INPUTS 32
#define N_ARITH 74
__global__ void
ac(float *A, const int *B, const int *C, const int *op_sel, int n_inputs, const int n_arith, int thresh, int iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
int idx_off= i*n_inputs;
float val_31, val_32, val_33, val_34, val_35, val_36, val_37, val_38, val_39, val_40, val_41, val_42, val_43, val_44, val_45, val_46, val_47, val_48, val_49, val_50, val_51, val_52, val_53, val_54, val_55, val_56, val_57, val_58, val_59, val_60, val_61, val_62, val_63, val_64, val_65, val_66, val_67, val_68, val_69, val_70, val_71, val_72, val_73, val_74, val_75, val_76, val_77, val_78, val_79, val_80, val_81, val_82, val_83, val_84, val_85, val_86, val_87, val_88, val_89, val_90, val_91, val_92, val_93, val_94, val_95, val_96, val_97, val_98, val_99, val_100, val_101, val_102, val_103, val_104;
float *val= &A[idx_off];
for (int k=0; k<iter; k++) {
val_31 = A[idx_off + 5] * val[29];
val_32 = val[6] * val[30];
val_33 = val[3] * val[29];
val_34 = val[4] * val[30];
val_35 = val_31 + val_32;
val_36 = val_33 + val_34;
val_37 = val[0] * val[15];
val_38 = val[1] * val[16];
val_39 = val[3] * val_37;
val_40 = val[4] * val_37;
val_41 = val[0] * val_38;
val_42 = val[1] * val_38;
val_43 = val_39 + val_41;
val_44 = val_40 + val_42;
val_45 = val[12] * val[19];
val_46 = val[13] * val[19];
val_47 = val[11] * val[20];
val_48 = val[14] * val[20];
val_49 = val[10] * val[19];
val_50 = val[11] * val[19];
val_51 = val[9] * val[20];
val_52 = val[12] * val[20];
val_53 = val_45 + val_47;
val_54 = val_46 + val_48;
val_55 = val_49 + val_51;
val_56 = val_50 + val_52;
val_57 = val[27] * val_43;
val_58 = val[28] * val_44;
val_59 = val_57 + val_58;
val_60 = val[2] * val[25];
val_61 = val[2] * val[26];
val_62 = val[7] * val_60;
val_63 = val[9] * val_61;
val_64 = val[8] * val_60;
val_65 = val[10] * val_61;
val_66 = val[11] * val_62;
val_67 = val[0] * val_63;
val_68 = val[12] * val_62;
val_69 = val[1] * val_63;
val_70 = val[11] * val_64;
val_71 = val[0] * val_65;
val_72 = val[12] * val_64;
val_73 = val[1] * val_65;
val_74 = val_66 + val_67;
val_75 = val_68 + val_69;
val_76 = val_70 + val_71;
val_77 = val_72 + val_73;
val_78 = val[21] * val_35;
val_79 = val[22] * val_36;
val_80 = val_53 * val_78;
val_81 = val_54 * val_79;
val_82 = val_55 * val_78;
val_83 = val_56 * val_79;
val_84 = val_59 * val_80;
val_85 = val_57 * val_80;
val_86 = val_58 * val_81;
val_87 = val_59 * val_82;
val_88 = val_57 * val_82;
val_89 = val_58 * val_83;
val_90 = val_85 + val_86;
val_91 = val_88 + val_89;
val_92 = val[17] * val_74;
val_93 = val[17] * val_75;
val_94 = val[18] * val_76;
val_95 = val[18] * val_77;
val_96 = val_84 * val_92;
val_97 = val_90 * val_93;
val_98 = val_87 * val_94;
val_99 = val_91 * val_95;
val_100 = val_96 + val_98;
val_101 = val_97 + val_99;
val_102 = val[23] * val_100;
val_103 = val[24] * val_101;
val_104 = val_102 + val_103;
A[i*n_inputs+5] += val_104;
}
A[i*n_inputs]= val_104;
}
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
const int n_inputs= N_INPUTS;
const int n_arith= N_ARITH;
const int batch_size= 128;
const int iter= 1;
const int thresh= n_arith/3;
size_t size= batch_size * (n_inputs) * sizeof(float);
size_t size_idx= n_arith * sizeof(int);
float *h_A= (float *)malloc(size);
int *h_B= (int *)malloc(size_idx);
int *h_C= (int *)malloc(size_idx);
int *h_op_sel= (int *) malloc(size_idx);
// Initialize the host input vectors
for (int i = 0; i < n_arith; ++i)
{
if (i < thresh) {
h_B[i] = rand() % (n_inputs);
h_C[i] = rand() % (n_inputs);
}
else{
h_B[i] = rand() % (i);
h_C[i] = rand() % (i);
}
h_op_sel[i]= rand() % 2;
}
for (int i= 0; i < n_inputs; ++i) {
for (int b =0; b< batch_size; ++b) {
//h_A[b* n_inputs + i]= float(rand());
h_A[b* n_inputs + i]= 0.5;
}
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_B = NULL;
err = cudaMalloc((void **)&d_B, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_C = NULL;
err = cudaMalloc((void **)&d_C, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_op_sel = NULL;
err = cudaMalloc((void **)&d_op_sel, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_C, h_C, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_op_sel, h_op_sel, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 32;
int blocksPerGrid= (batch_size + threadsPerBlock -1)/ threadsPerBlock;
struct timeval t1, t2;
// Perform Warmup
ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, thresh, iter);
// FInish execution of kernel
cudaDeviceSynchronize();
gettimeofday(&t1, 0);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, thresh, iter);
// FInish execution of kernel
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Time of kernel: %3.4f ms \n", time);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Throughput: %.3f Gops/sec\n", (((1.0*batch_size*iter*n_arith))/time)/10E6);
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//for (int i=0; i<numElements; i++) {
for (int i=0; i<32; i++) {
printf("%d : %f,", i, h_A[i*n_inputs]);
}
err = cudaFree(d_A);
err = cudaFree(d_B);
err = cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
printf("Done!\n");
return 0;
}
|
#include <stdio.h>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <hip/hip_cooperative_groups.h>
//#include <helper_cuda.h>
#define N_INPUTS 32
#define N_ARITH 74
__global__ void
ac(float *A, const int *B, const int *C, const int *op_sel, int n_inputs, const int n_arith, int thresh, int iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
int idx_off= i*n_inputs;
float val_31, val_32, val_33, val_34, val_35, val_36, val_37, val_38, val_39, val_40, val_41, val_42, val_43, val_44, val_45, val_46, val_47, val_48, val_49, val_50, val_51, val_52, val_53, val_54, val_55, val_56, val_57, val_58, val_59, val_60, val_61, val_62, val_63, val_64, val_65, val_66, val_67, val_68, val_69, val_70, val_71, val_72, val_73, val_74, val_75, val_76, val_77, val_78, val_79, val_80, val_81, val_82, val_83, val_84, val_85, val_86, val_87, val_88, val_89, val_90, val_91, val_92, val_93, val_94, val_95, val_96, val_97, val_98, val_99, val_100, val_101, val_102, val_103, val_104;
float *val= &A[idx_off];
for (int k=0; k<iter; k++) {
val_31 = A[idx_off + 5] * val[29];
val_32 = val[6] * val[30];
val_33 = val[3] * val[29];
val_34 = val[4] * val[30];
val_35 = val_31 + val_32;
val_36 = val_33 + val_34;
val_37 = val[0] * val[15];
val_38 = val[1] * val[16];
val_39 = val[3] * val_37;
val_40 = val[4] * val_37;
val_41 = val[0] * val_38;
val_42 = val[1] * val_38;
val_43 = val_39 + val_41;
val_44 = val_40 + val_42;
val_45 = val[12] * val[19];
val_46 = val[13] * val[19];
val_47 = val[11] * val[20];
val_48 = val[14] * val[20];
val_49 = val[10] * val[19];
val_50 = val[11] * val[19];
val_51 = val[9] * val[20];
val_52 = val[12] * val[20];
val_53 = val_45 + val_47;
val_54 = val_46 + val_48;
val_55 = val_49 + val_51;
val_56 = val_50 + val_52;
val_57 = val[27] * val_43;
val_58 = val[28] * val_44;
val_59 = val_57 + val_58;
val_60 = val[2] * val[25];
val_61 = val[2] * val[26];
val_62 = val[7] * val_60;
val_63 = val[9] * val_61;
val_64 = val[8] * val_60;
val_65 = val[10] * val_61;
val_66 = val[11] * val_62;
val_67 = val[0] * val_63;
val_68 = val[12] * val_62;
val_69 = val[1] * val_63;
val_70 = val[11] * val_64;
val_71 = val[0] * val_65;
val_72 = val[12] * val_64;
val_73 = val[1] * val_65;
val_74 = val_66 + val_67;
val_75 = val_68 + val_69;
val_76 = val_70 + val_71;
val_77 = val_72 + val_73;
val_78 = val[21] * val_35;
val_79 = val[22] * val_36;
val_80 = val_53 * val_78;
val_81 = val_54 * val_79;
val_82 = val_55 * val_78;
val_83 = val_56 * val_79;
val_84 = val_59 * val_80;
val_85 = val_57 * val_80;
val_86 = val_58 * val_81;
val_87 = val_59 * val_82;
val_88 = val_57 * val_82;
val_89 = val_58 * val_83;
val_90 = val_85 + val_86;
val_91 = val_88 + val_89;
val_92 = val[17] * val_74;
val_93 = val[17] * val_75;
val_94 = val[18] * val_76;
val_95 = val[18] * val_77;
val_96 = val_84 * val_92;
val_97 = val_90 * val_93;
val_98 = val_87 * val_94;
val_99 = val_91 * val_95;
val_100 = val_96 + val_98;
val_101 = val_97 + val_99;
val_102 = val[23] * val_100;
val_103 = val[24] * val_101;
val_104 = val_102 + val_103;
A[i*n_inputs+5] += val_104;
}
A[i*n_inputs]= val_104;
}
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
const int n_inputs= N_INPUTS;
const int n_arith= N_ARITH;
const int batch_size= 128;
const int iter= 1;
const int thresh= n_arith/3;
size_t size= batch_size * (n_inputs) * sizeof(float);
size_t size_idx= n_arith * sizeof(int);
float *h_A= (float *)malloc(size);
int *h_B= (int *)malloc(size_idx);
int *h_C= (int *)malloc(size_idx);
int *h_op_sel= (int *) malloc(size_idx);
// Initialize the host input vectors
for (int i = 0; i < n_arith; ++i)
{
if (i < thresh) {
h_B[i] = rand() % (n_inputs);
h_C[i] = rand() % (n_inputs);
}
else{
h_B[i] = rand() % (i);
h_C[i] = rand() % (i);
}
h_op_sel[i]= rand() % 2;
}
for (int i= 0; i < n_inputs; ++i) {
for (int b =0; b< batch_size; ++b) {
//h_A[b* n_inputs + i]= float(rand());
h_A[b* n_inputs + i]= 0.5;
}
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_B = NULL;
err = hipMalloc((void **)&d_B, size_idx);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_C = NULL;
err = hipMalloc((void **)&d_C, size_idx);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_op_sel = NULL;
err = hipMalloc((void **)&d_op_sel, size_idx);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size_idx, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_C, h_C, size_idx, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_op_sel, h_op_sel, size_idx, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 32;
int blocksPerGrid= (batch_size + threadsPerBlock -1)/ threadsPerBlock;
struct timeval t1, t2;
// Perform Warmup
ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, thresh, iter);
// FInish execution of kernel
hipDeviceSynchronize();
gettimeofday(&t1, 0);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, thresh, iter);
// FInish execution of kernel
hipDeviceSynchronize();
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Time of kernel: %3.4f ms \n", time);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Throughput: %.3f Gops/sec\n", (((1.0*batch_size*iter*n_arith))/time)/10E6);
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_A, d_A, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//for (int i=0; i<numElements; i++) {
for (int i=0; i<32; i++) {
printf("%d : %f,", i, h_A[i*n_inputs]);
}
err = hipFree(d_A);
err = hipFree(d_B);
err = hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
printf("Done!\n");
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
// includes, system
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
__global__ void notDivergent(int n)
//The threads should perform the same work as
//in divergent(), but the threads within a warp
//should not diverge
{
}
__global__ void divergent(int n)
//The threads should perform the same work as
//in notDivergent(), but the threads within
//a warp should be forced to diverge
{
}
// Program main
/////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
const int N = 10000, threads = 10000;
cudaEvent_t start, stop;
float time;
int nBlocks, nThreads;
nThreads = 512;
nBlocks = (threads + nThreads - 1)/nThreads;
//Set up the timing variables and begin timing
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//The Divergent Kernal
divergent<<<nBlocks, nThreads>>>(N);
//Stop timing
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//Compute the Elapsed Time
cudaEventElapsedTime(&time, start, stop);
printf("divergent kernel: %f milliseconds\n", time);
//begin new timing
cudaEventRecord(start, 0);
//The non-Divergent Kernel
notDivergent<<<nBlocks, nThreads>>>(N);
//Stop timing
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//Compute the Kernel Time
cudaEventElapsedTime(&time, start, stop);
printf("non-divergent kernel: %f milliseconds\n", time);
return 0;
}
|
// includes, system
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
__global__ void notDivergent(int n)
//The threads should perform the same work as
//in divergent(), but the threads within a warp
//should not diverge
{
}
__global__ void divergent(int n)
//The threads should perform the same work as
//in notDivergent(), but the threads within
//a warp should be forced to diverge
{
}
// Program main
/////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
const int N = 10000, threads = 10000;
hipEvent_t start, stop;
float time;
int nBlocks, nThreads;
nThreads = 512;
nBlocks = (threads + nThreads - 1)/nThreads;
//Set up the timing variables and begin timing
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//The Divergent Kernal
divergent<<<nBlocks, nThreads>>>(N);
//Stop timing
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
//Compute the Elapsed Time
hipEventElapsedTime(&time, start, stop);
printf("divergent kernel: %f milliseconds\n", time);
//begin new timing
hipEventRecord(start, 0);
//The non-Divergent Kernel
notDivergent<<<nBlocks, nThreads>>>(N);
//Stop timing
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
//Compute the Kernel Time
hipEventElapsedTime(&time, start, stop);
printf("non-divergent kernel: %f milliseconds\n", time);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <cstdio>
extern __global__ void myFirstKernel(void);
int main(void){
myFirstKernel<<<1,1>>>();
printf("hi there, from CPU\n");
cudaDeviceSynchronize();
return 0;
}
|
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <hip/hip_runtime.h>
#include <cstdio>
extern __global__ void myFirstKernel(void);
int main(void){
myFirstKernel<<<1,1>>>();
printf("hi there, from CPU\n");
hipDeviceSynchronize();
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
// This example introduces CUDA's heterogeneous model of memory
// by demonstrating the difference between the "host" and "device"
// memory spaces.
// #include stdlib.h for malloc/free
#include <stdlib.h>
// #include stdio.h for printf
#include <stdio.h>
// nvcc automatically #includes headers needed for cudaMalloc, cudaFree, cudaMemcpy, & cudaMemset
int main(void)
{
// create arrays of 16 elements
int num_elements = 16;
// compute the size of the arrays in bytes
int num_bytes = num_elements * sizeof(int);
// pointers to host & device arrays
int *device_array = 0;
int *host_array = 0;
// malloc a host array
host_array = (int*)malloc(num_bytes);
// cudaMalloc a device array
// we pass cudaMalloc a pointer to the device_array pointer
cudaMalloc((void**)&device_array, num_bytes);
// if either memory allocation failed, report an error message
if(host_array == 0 || device_array == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// zero out the device array with cudaMemset
cudaMemset(device_array, 0, num_bytes);
// we can't dereference elements of device_array from the host directly:
// that will likely cause a crash. instead, we must explicitly copy from
// device memory to host memory to access the result
// copy the contents of the device array to the host array to inspect the result
// use cudaMemcpyDeviceToHost to indicate the direction of the copy
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
// print out the result element by element
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n");
// use free to free the host array
free(host_array);
// use cudaFree to free the device array
cudaFree(device_array);
}
|
// This example introduces CUDA's heterogeneous model of memory
// by demonstrating the difference between the "host" and "device"
// memory spaces.
// #include stdlib.h for malloc/free
#include <hip/hip_runtime.h>
#include <stdlib.h>
// #include stdio.h for printf
#include <stdio.h>
// nvcc automatically #includes headers needed for cudaMalloc, cudaFree, cudaMemcpy, & cudaMemset
int main(void)
{
// create arrays of 16 elements
int num_elements = 16;
// compute the size of the arrays in bytes
int num_bytes = num_elements * sizeof(int);
// pointers to host & device arrays
int *device_array = 0;
int *host_array = 0;
// malloc a host array
host_array = (int*)malloc(num_bytes);
// cudaMalloc a device array
// we pass cudaMalloc a pointer to the device_array pointer
hipMalloc((void**)&device_array, num_bytes);
// if either memory allocation failed, report an error message
if(host_array == 0 || device_array == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// zero out the device array with cudaMemset
hipMemset(device_array, 0, num_bytes);
// we can't dereference elements of device_array from the host directly:
// that will likely cause a crash. instead, we must explicitly copy from
// device memory to host memory to access the result
// copy the contents of the device array to the host array to inspect the result
// use cudaMemcpyDeviceToHost to indicate the direction of the copy
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
// print out the result element by element
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n");
// use free to free the host array
free(host_array);
// use cudaFree to free the device array
hipFree(device_array);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
//合并 访存
#include<stdio.h>
#include<math.h>
#include<time.h>
#include <stdlib.h>
int Max=16384;
int width=32;
typedef struct {
double A1;
double A2;
double A3;
double A4;
}stru;
__global__ void multi(stru *A,stru *b,double *C,const int Max){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
double sum=0.0;
if(idx<Max && idy<Max && idx==idy){
for(int i=0;i<Max/4;i++){
sum+=A[idx*Max/4+i].A1*b[i*4].A1+A[idx*Max/4+i].A2*b[i*4+1].A1+A[idx*Max/4+i].A3*b[i*4+2].A1+A[idx*Max/4+i].A4*b[i*4+3].A1;
}
C[idx]=sum;
}
}
int main(){
printf("合并访存:\n");
stru *A =(stru *)malloc(Max * Max/4 * sizeof(stru)); //A
stru *b =(stru *)malloc(Max * sizeof(stru)); //b
double *C =(double *)malloc(Max * sizeof(double)); //C
double *test_c=(double *)malloc(Max * sizeof(double)); //cpu_test
int i,j;
for(i=0;i<Max;i++){
for(j=0;j<Max/4;j++){
A[i*Max/4+j].A1=i-0.1*j*4+1;
A[i*Max/4+j].A2=i-(0.1*j*4+1)+1;
A[i*Max/4+j].A3=i-(0.1*j*4+2)+1;
A[i*Max/4+j].A4=i-(0.1*j*4+3)+1;
}
}
for(i=0;i<Max;i++){
b[i].A1=log(sqrt(i*i-i+2));
b[i].A2=0.0;
b[i].A3=0.0;
b[i].A4=0.0;
C[i]=0.0;
}
stru *A_d,*b_d;
double *C_d;
cudaMalloc((void **)&A_d,Max * Max/4 * sizeof(stru));
cudaMalloc((void **)&b_d,Max *sizeof(stru));
cudaMalloc((void **)&C_d,Max *sizeof(double));
clock_t start,end;
start=clock();
cudaMemcpy(A_d, A,Max*Max/4*sizeof(stru),cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b,Max*sizeof(stru),cudaMemcpyHostToDevice);
cudaMemcpy(C_d, C,Max * sizeof(double), cudaMemcpyHostToDevice);
dim3 block(width,width);
dim3 grid(Max/block.x, Max/block.y);
multi<<<grid,block>>>(A_d,b_d,C_d,Max);
cudaMemcpy(C, C_d, Max * sizeof(double), cudaMemcpyDeviceToHost);
end=clock();
double time=(end-start)*1000/CLOCKS_PER_SEC;
//check result:
//cpu:
clock_t start_c,end_c;
start_c=clock();
for (int i = 0; i < Max; ++i){
for (int j = 0; j < Max/4; ++j)
{
test_c[i]+=A[i*Max/4+j].A1*b[j*4].A1+A[i*Max/4+j].A2*b[j*4+1].A1+A[i*Max/4+j].A3*b[j*4+2].A1+A[i*Max/4+j].A4*b[j*4+3].A1;
}
}
end_c=clock();
bool flag = true;
for (int i = 0; i < Max; ++i){
float a=test_c[i];
float b=C[i];
if (a!=b)
{
printf("cpu:%lf gpu:%lf\n",a,b);
flag = false;
}
}
if (flag == true)
printf("result correct\n");
else{
printf("resul wrong\n");
}
double time_C=(end_c-start_c)*1000/CLOCKS_PER_SEC;
printf("GPU TIME:%lf ms\n",time);
printf("CPU TIME:%lf ms\n",time_C);
cudaFree(A_d);
cudaFree(b_d);
cudaFree(C_d);
free(A);
free(b);
free(C);
}
|
//合并 访存
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<math.h>
#include<time.h>
#include <stdlib.h>
int Max=16384;
int width=32;
typedef struct {
double A1;
double A2;
double A3;
double A4;
}stru;
__global__ void multi(stru *A,stru *b,double *C,const int Max){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
double sum=0.0;
if(idx<Max && idy<Max && idx==idy){
for(int i=0;i<Max/4;i++){
sum+=A[idx*Max/4+i].A1*b[i*4].A1+A[idx*Max/4+i].A2*b[i*4+1].A1+A[idx*Max/4+i].A3*b[i*4+2].A1+A[idx*Max/4+i].A4*b[i*4+3].A1;
}
C[idx]=sum;
}
}
int main(){
printf("合并访存:\n");
stru *A =(stru *)malloc(Max * Max/4 * sizeof(stru)); //A
stru *b =(stru *)malloc(Max * sizeof(stru)); //b
double *C =(double *)malloc(Max * sizeof(double)); //C
double *test_c=(double *)malloc(Max * sizeof(double)); //cpu_test
int i,j;
for(i=0;i<Max;i++){
for(j=0;j<Max/4;j++){
A[i*Max/4+j].A1=i-0.1*j*4+1;
A[i*Max/4+j].A2=i-(0.1*j*4+1)+1;
A[i*Max/4+j].A3=i-(0.1*j*4+2)+1;
A[i*Max/4+j].A4=i-(0.1*j*4+3)+1;
}
}
for(i=0;i<Max;i++){
b[i].A1=log(sqrt(i*i-i+2));
b[i].A2=0.0;
b[i].A3=0.0;
b[i].A4=0.0;
C[i]=0.0;
}
stru *A_d,*b_d;
double *C_d;
hipMalloc((void **)&A_d,Max * Max/4 * sizeof(stru));
hipMalloc((void **)&b_d,Max *sizeof(stru));
hipMalloc((void **)&C_d,Max *sizeof(double));
clock_t start,end;
start=clock();
hipMemcpy(A_d, A,Max*Max/4*sizeof(stru),hipMemcpyHostToDevice);
hipMemcpy(b_d, b,Max*sizeof(stru),hipMemcpyHostToDevice);
hipMemcpy(C_d, C,Max * sizeof(double), hipMemcpyHostToDevice);
dim3 block(width,width);
dim3 grid(Max/block.x, Max/block.y);
multi<<<grid,block>>>(A_d,b_d,C_d,Max);
hipMemcpy(C, C_d, Max * sizeof(double), hipMemcpyDeviceToHost);
end=clock();
double time=(end-start)*1000/CLOCKS_PER_SEC;
//check result:
//cpu:
clock_t start_c,end_c;
start_c=clock();
for (int i = 0; i < Max; ++i){
for (int j = 0; j < Max/4; ++j)
{
test_c[i]+=A[i*Max/4+j].A1*b[j*4].A1+A[i*Max/4+j].A2*b[j*4+1].A1+A[i*Max/4+j].A3*b[j*4+2].A1+A[i*Max/4+j].A4*b[j*4+3].A1;
}
}
end_c=clock();
bool flag = true;
for (int i = 0; i < Max; ++i){
float a=test_c[i];
float b=C[i];
if (a!=b)
{
printf("cpu:%lf gpu:%lf\n",a,b);
flag = false;
}
}
if (flag == true)
printf("result correct\n");
else{
printf("resul wrong\n");
}
double time_C=(end_c-start_c)*1000/CLOCKS_PER_SEC;
printf("GPU TIME:%lf ms\n",time);
printf("CPU TIME:%lf ms\n",time_C);
hipFree(A_d);
hipFree(b_d);
hipFree(C_d);
free(A);
free(b);
free(C);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<math.h>
#include<time.h>
#include<stdexcept>
#include<iostream>
#include<cstdlib> //for abs(x)
#include<stdio.h>
using namespace std;
__global__ void kernel_multiplication( int* A, int* B, int* C,int N,int M);
int main()
{
int NUMBER_OF_ELEMENTS;
int VECTOR_SIZE;
cout<<"Enter the vector size:";
cin>>VECTOR_SIZE;
NUMBER_OF_ELEMENTS=VECTOR_SIZE;
int SIZE = NUMBER_OF_ELEMENTS*sizeof(int);
cudaEvent_t start,end,start1,end1;
int* hostA = (int*)malloc(VECTOR_SIZE*sizeof(int));
int* hostB = (int*)malloc(SIZE*VECTOR_SIZE*sizeof(int));
int* hostC = (int*)malloc(VECTOR_SIZE*sizeof(int));
int* deviceA,*deviceB,*deviceC;
srand(time(0));
int i,j;
cout<<"\nVector:\n";
for(i=0;i<VECTOR_SIZE;i++)
{
hostA[i] = rand()%VECTOR_SIZE;
cout<<hostA[i]<<"\t";
}
//initialize matrix by random elements
for(i=0;i<NUMBER_OF_ELEMENTS;i++)
{
for(j=0;j<VECTOR_SIZE;j++)
{
hostB[i*VECTOR_SIZE+j] = rand()%VECTOR_SIZE;
}
}
cout<<"\nMatrix=\n";
for(i=0;i<NUMBER_OF_ELEMENTS;i++)
{
for(j=0;j<VECTOR_SIZE;j++)
{
cout<<hostB[i*VECTOR_SIZE+j]<<"\t";
}
cout<<"\n";
}
cudaMalloc(&deviceA,VECTOR_SIZE*sizeof(int));
cudaMalloc(&deviceB,NUMBER_OF_ELEMENTS*VECTOR_SIZE*sizeof(int));
cudaMalloc(&deviceC,VECTOR_SIZE*sizeof(int));
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventCreate(&start1);
cudaEventCreate(&end1);
cudaEventRecord(start);
cudaMemcpy(deviceA,hostA,VECTOR_SIZE*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,hostB,SIZE*VECTOR_SIZE,cudaMemcpyHostToDevice);
kernel_multiplication<<<NUMBER_OF_ELEMENTS,1>>>(deviceA,deviceB,deviceC,NUMBER_OF_ELEMENTS,VECTOR_SIZE);
cudaDeviceSynchronize();
cudaMemcpy(hostC,deviceC,VECTOR_SIZE*sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord(end);
cudaEventSynchronize(end);
float t=0;
cudaEventElapsedTime(&t,start,end);
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
double error = 0;
int* answer = (int*) malloc(VECTOR_SIZE*sizeof(int));
cudaEventRecord(start1);
for(int i=0;i<NUMBER_OF_ELEMENTS;i++)
{
int sum = 0;
for(int j=0;j<VECTOR_SIZE;j++)
{
sum += hostA[j]*hostB[j*VECTOR_SIZE+i];
}
answer[i] = sum;
}
for(int k=0;k<VECTOR_SIZE;k++)
{
cout<<k<<")"<< "Expected value = "<<answer[k]<<" Actual value = "<<hostC[k]<<"\n";
error += double(abs(answer[k]-hostC[k]));
}
error=sqrt(error);
cout<<"error = "<<error<<"\n";
cudaEventRecord(end1);
cudaEventSynchronize(end1);
float t1=0;
cudaEventElapsedTime(&t1,start1,end1);
cout<<"\nSequential time="<<t1;
cout<<"\nParallel time="<<t<<endl;
delete[] hostA;
delete[] hostB;
delete[] hostC;
return cudaDeviceSynchronize();
}
__global__ void kernel_multiplication( int* A, int* B, int* C, int N,int M)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int sum = 0;
if(index<N)
{
for(int i=0;i<M;i++)
sum+=A[i]*B[(i*M)+index];
C[index] = sum;
}
}
|
#include <hip/hip_runtime.h>
#include<math.h>
#include<time.h>
#include<stdexcept>
#include<iostream>
#include<cstdlib> //for abs(x)
#include<stdio.h>
using namespace std;
__global__ void kernel_multiplication( int* A, int* B, int* C,int N,int M);
int main()
{
int NUMBER_OF_ELEMENTS;
int VECTOR_SIZE;
cout<<"Enter the vector size:";
cin>>VECTOR_SIZE;
NUMBER_OF_ELEMENTS=VECTOR_SIZE;
int SIZE = NUMBER_OF_ELEMENTS*sizeof(int);
hipEvent_t start,end,start1,end1;
int* hostA = (int*)malloc(VECTOR_SIZE*sizeof(int));
int* hostB = (int*)malloc(SIZE*VECTOR_SIZE*sizeof(int));
int* hostC = (int*)malloc(VECTOR_SIZE*sizeof(int));
int* deviceA,*deviceB,*deviceC;
srand(time(0));
int i,j;
cout<<"\nVector:\n";
for(i=0;i<VECTOR_SIZE;i++)
{
hostA[i] = rand()%VECTOR_SIZE;
cout<<hostA[i]<<"\t";
}
//initialize matrix by random elements
for(i=0;i<NUMBER_OF_ELEMENTS;i++)
{
for(j=0;j<VECTOR_SIZE;j++)
{
hostB[i*VECTOR_SIZE+j] = rand()%VECTOR_SIZE;
}
}
cout<<"\nMatrix=\n";
for(i=0;i<NUMBER_OF_ELEMENTS;i++)
{
for(j=0;j<VECTOR_SIZE;j++)
{
cout<<hostB[i*VECTOR_SIZE+j]<<"\t";
}
cout<<"\n";
}
hipMalloc(&deviceA,VECTOR_SIZE*sizeof(int));
hipMalloc(&deviceB,NUMBER_OF_ELEMENTS*VECTOR_SIZE*sizeof(int));
hipMalloc(&deviceC,VECTOR_SIZE*sizeof(int));
hipEventCreate(&start);
hipEventCreate(&end);
hipEventCreate(&start1);
hipEventCreate(&end1);
hipEventRecord(start);
hipMemcpy(deviceA,hostA,VECTOR_SIZE*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(deviceB,hostB,SIZE*VECTOR_SIZE,hipMemcpyHostToDevice);
kernel_multiplication<<<NUMBER_OF_ELEMENTS,1>>>(deviceA,deviceB,deviceC,NUMBER_OF_ELEMENTS,VECTOR_SIZE);
hipDeviceSynchronize();
hipMemcpy(hostC,deviceC,VECTOR_SIZE*sizeof(int),hipMemcpyDeviceToHost);
hipEventRecord(end);
hipEventSynchronize(end);
float t=0;
hipEventElapsedTime(&t,start,end);
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
double error = 0;
int* answer = (int*) malloc(VECTOR_SIZE*sizeof(int));
hipEventRecord(start1);
for(int i=0;i<NUMBER_OF_ELEMENTS;i++)
{
int sum = 0;
for(int j=0;j<VECTOR_SIZE;j++)
{
sum += hostA[j]*hostB[j*VECTOR_SIZE+i];
}
answer[i] = sum;
}
for(int k=0;k<VECTOR_SIZE;k++)
{
cout<<k<<")"<< "Expected value = "<<answer[k]<<" Actual value = "<<hostC[k]<<"\n";
error += double(abs(answer[k]-hostC[k]));
}
error=sqrt(error);
cout<<"error = "<<error<<"\n";
hipEventRecord(end1);
hipEventSynchronize(end1);
float t1=0;
hipEventElapsedTime(&t1,start1,end1);
cout<<"\nSequential time="<<t1;
cout<<"\nParallel time="<<t<<endl;
delete[] hostA;
delete[] hostB;
delete[] hostC;
return hipDeviceSynchronize();
}
__global__ void kernel_multiplication( int* A, int* B, int* C, int N,int M)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int sum = 0;
if(index<N)
{
for(int i=0;i<M;i++)
sum+=A[i]*B[(i*M)+index];
C[index] = sum;
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cuda.h>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#define STOP 0
#define START 1
/* Play with the following two values */
#define NB 1000000L //Size of array (long integer)
#define MANY 200L //Number of transfers
/* (over-)Simple chronometer function */
void chrono (int kind, float *time) {
static clock_t counts;
if (kind == START) {
*time = 0.0;
counts = clock();
return;
}
if (kind == STOP) {
*time = ((float)(clock()-counts))/((float)CLOCKS_PER_SEC);
}
}
int main () {
float *ptr, *gpu_ptr;
cudaError_t err;
float time, number_of_Gbytes;
long i, j;
/* Dynamic allocations below */
// Allocate ptr on host below
ptr = (float *)malloc(NB * sizeof(float));
// Allocate gpu_ptr on device below
err = cudaMalloc(&gpu_ptr,sizeof(float)*NB);
/* Some error handling */
if (ptr == NULL) {
printf ("Not enough memory on host\n");
exit (1);
}
if (err != 0) {
printf ("Pb allocating memory on device. Reason:\n");
printf ("%s\n", cudaGetErrorString (err));
exit (1);
}
/* Are the following two lines necessary ? */
for (i = 0; i < NB; i++)
ptr[i] = 9.0;
/* Transfer loop below */
chrono (START, &time);
for (j = 0; j < MANY; j++) {
// perform memory transfer here
// cudaMemcpy(gpu_ptr,ptr,sizeof(float)*NB,cudaMemcpyHostToDevice);
cudaMemcpy(gpu_ptr,ptr,sizeof(float)*NB,cudaMemcpyDeviceToHost);
}
chrono (STOP, &time);
/* Output results */
number_of_Gbytes = (MANY*(float)sizeof(float)*NB/1024./1024./1024.);
printf ("%f Gbytes transfered in %f seconds\n", number_of_Gbytes, time);
printf ("Bandwidth = %f Gb/s\n", number_of_Gbytes / time);
/* Clean the place */
cudaFree (gpu_ptr);
free (ptr);
return 0;
}
|
#include <hip/hip_runtime.h>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
#define STOP 0
#define START 1
/* Play with the following two values */
#define NB 1000000L //Size of array (long integer)
#define MANY 200L //Number of transfers
/* (over-)Simple chronometer function */
void chrono (int kind, float *time) {
static clock_t counts;
if (kind == START) {
*time = 0.0;
counts = clock();
return;
}
if (kind == STOP) {
*time = ((float)(clock()-counts))/((float)CLOCKS_PER_SEC);
}
}
int main () {
float *ptr, *gpu_ptr;
hipError_t err;
float time, number_of_Gbytes;
long i, j;
/* Dynamic allocations below */
// Allocate ptr on host below
ptr = (float *)malloc(NB * sizeof(float));
// Allocate gpu_ptr on device below
err = hipMalloc(&gpu_ptr,sizeof(float)*NB);
/* Some error handling */
if (ptr == NULL) {
printf ("Not enough memory on host\n");
exit (1);
}
if (err != 0) {
printf ("Pb allocating memory on device. Reason:\n");
printf ("%s\n", hipGetErrorString (err));
exit (1);
}
/* Are the following two lines necessary ? */
for (i = 0; i < NB; i++)
ptr[i] = 9.0;
/* Transfer loop below */
chrono (START, &time);
for (j = 0; j < MANY; j++) {
// perform memory transfer here
// cudaMemcpy(gpu_ptr,ptr,sizeof(float)*NB,cudaMemcpyHostToDevice);
hipMemcpy(gpu_ptr,ptr,sizeof(float)*NB,hipMemcpyDeviceToHost);
}
chrono (STOP, &time);
/* Output results */
number_of_Gbytes = (MANY*(float)sizeof(float)*NB/1024./1024./1024.);
printf ("%f Gbytes transfered in %f seconds\n", number_of_Gbytes, time);
printf ("Bandwidth = %f Gb/s\n", number_of_Gbytes / time);
/* Clean the place */
hipFree (gpu_ptr);
free (ptr);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void _segmentedScanBackKer(float *maxdist, int *maxdistidx, int *label, float *blockmaxdist, int *blocklabel, int *blockmaxdistidx, int numelements)
{
// 声明共享内存。用来存放中间结果小数组中的元素,也就是输入的原数组的每块最
// 后一个元素。共包含三个信息。
__shared__ float shdcurmaxdist[1];
__shared__ int shdcurlabel[1];
__shared__ int shdcurmaxdistindex[1];
// 状态位,用来标记上一块的最后一个元素的标签值是否和本段第一个元素的标签值
// 相同。
__shared__ int state[1];
// 计算需要进行块间累加位置索引(块外的数组索引)。
int idx = (blockIdx.x + 1) * blockDim.x + threadIdx.x;
// 用每块的第一个线程来读取每块前一块的最后一个元素,从中间结果数组中读取。
if (threadIdx.x == 0) {
shdcurmaxdist[0] = blockmaxdist[blockIdx.x];
shdcurlabel[0] = blocklabel[blockIdx.x];
shdcurmaxdistindex[0] = blockmaxdistidx[blockIdx.x];
// 用 state 来记录上一块的最后一个元素的标签值是否和本段第一个元素的
// 标签值相同,相同则为 1,不同则为 0。
state[0] = (label[idx] == shdcurlabel[0]);
}
// 块内同步。
__syncthreads();
// 如果状态位为 0,说明上一块和本块无关,不在一个区域内,直接返回。
if (state[0] == 0)
return;
// 如果数组索引大于数组长度,直接返回。
if (idx >= numelements)
return;
// 如果当前位置处的标签值和目前已知的最大垂距的标签值相同,并且垂距小于目前
// 已知的最大垂距,那么更新当前位置处的最大垂距记录和最大垂距位置的索引。
if (label[idx] == shdcurlabel[0] && maxdist[idx] < shdcurmaxdist[0]) {
maxdist[idx] = shdcurmaxdist[0];
maxdistidx[idx] = shdcurmaxdistindex[0];
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void _segmentedScanBackKer(float *maxdist, int *maxdistidx, int *label, float *blockmaxdist, int *blocklabel, int *blockmaxdistidx, int numelements)
{
// 声明共享内存。用来存放中间结果小数组中的元素,也就是输入的原数组的每块最
// 后一个元素。共包含三个信息。
__shared__ float shdcurmaxdist[1];
__shared__ int shdcurlabel[1];
__shared__ int shdcurmaxdistindex[1];
// 状态位,用来标记上一块的最后一个元素的标签值是否和本段第一个元素的标签值
// 相同。
__shared__ int state[1];
// 计算需要进行块间累加位置索引(块外的数组索引)。
int idx = (blockIdx.x + 1) * blockDim.x + threadIdx.x;
// 用每块的第一个线程来读取每块前一块的最后一个元素,从中间结果数组中读取。
if (threadIdx.x == 0) {
shdcurmaxdist[0] = blockmaxdist[blockIdx.x];
shdcurlabel[0] = blocklabel[blockIdx.x];
shdcurmaxdistindex[0] = blockmaxdistidx[blockIdx.x];
// 用 state 来记录上一块的最后一个元素的标签值是否和本段第一个元素的
// 标签值相同,相同则为 1,不同则为 0。
state[0] = (label[idx] == shdcurlabel[0]);
}
// 块内同步。
__syncthreads();
// 如果状态位为 0,说明上一块和本块无关,不在一个区域内,直接返回。
if (state[0] == 0)
return;
// 如果数组索引大于数组长度,直接返回。
if (idx >= numelements)
return;
// 如果当前位置处的标签值和目前已知的最大垂距的标签值相同,并且垂距小于目前
// 已知的最大垂距,那么更新当前位置处的最大垂距记录和最大垂距位置的索引。
if (label[idx] == shdcurlabel[0] && maxdist[idx] < shdcurmaxdist[0]) {
maxdist[idx] = shdcurmaxdist[0];
maxdistidx[idx] = shdcurmaxdistindex[0];
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void TemporalConvolutionTBC_bp_bias( float* matrix, float* target, int rows, int stride, float scale) {
int i = blockIdx.x * 32 + threadIdx.x;
float t = 0;
for (int j = blockIdx.y; j < rows; j += gridDim.y)
t += matrix[j * stride + i];
atomicAdd(&target[i], t * scale);
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void TemporalConvolutionTBC_bp_bias( float* matrix, float* target, int rows, int stride, float scale) {
int i = blockIdx.x * 32 + threadIdx.x;
float t = 0;
for (int j = blockIdx.y; j < rows; j += gridDim.y)
t += matrix[j * stride + i];
atomicAdd(&target[i], t * scale);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <complex>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <cuComplex.h>
// Kernel Definitions
/******************************************************************************
* Function: CUDAisInMandelbrotSet
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* CUDAisInMandelbrotSet() takes in cuDoubleComplex number, c, and an int pointer
* to return the number of iterations
*
* Parameters: [in] c
* [in-out] iterations
******************************************************************************/
__device__ void CUDAisInMandelbrotSet(cuDoubleComplex c, int *iterations){
double zr = 0;
double zi = 0;
cuDoubleComplex z = make_cuDoubleComplex(zr, zi);
int i = 0;
for (i = 0; i < 100 && cuCabs(z) < 2.0; ++i){
z = cuCadd(cuCmul(z, z), c); // z = z*z + c;
}
*iterations = i;
}
/******************************************************************************
* Function: CUDAisInMandelbrotSet
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* CUDAisInJuliaSet() takes in two cuDoubleComplex numbers, c & z, and an int
* pointer to return the number of iterations
*
* Parameters: [in] z
* [in] c
* [in-out] iterations
******************************************************************************/
__device__ void CUDAisInJuliaSet(cuDoubleComplex z, cuDoubleComplex c, int *iterations){
int i = 0;
for (i = 0; i < 100 && cuCabs(z) < 2.0; ++i){
z = cuCadd(cuCmul(z, z), c); // z = z*z + c;
}
*iterations = i;
}
/******************************************************************************
* Function: mandelCalc
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* mandelCalc() is a kernel which takes in a set of real and imaginary values
* and will calcualte in parallel wether or not each point is in or out of the
* mandelbrot set.
*
* Parameters: [in] cReals
* [in] cImags
* [in-out] iterations
* [in] len
******************************************************************************/
__global__ void mandelCalc(double *cReals, double *cImags, int *iterations, int len) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
cuDoubleComplex num;
if(i < len){
num = make_cuDoubleComplex(cReals[i], cImags[i]);
CUDAisInMandelbrotSet(num, &iterations[i]);
}
}
/******************************************************************************
* Function: juliaCalc
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* juliaCalc() is a kernel which takes in a set of real and imaginary values as
* well as a real and imaginary number (as doubles), and will calcualte in
* parallel wether or not each point in the arrays is in or out of the Julia set
* associated with the given point
*
* Parameters: [in] zReal
* [in] zImag
* [in] cReals
* [in] cImags
* [in-out] iterations
* [in] len
******************************************************************************/
__global__ void juliaCalc(double zReal, double zImag, double *cReals, double *cImags, int *iterations, int len) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
cuDoubleComplex num;
cuDoubleComplex z;
if(i < len){
num = make_cuDoubleComplex(cReals[i], cImags[i]);
z = make_cuDoubleComplex(zReal, zImag);
CUDAisInJuliaSet(num, z, &iterations[i]);
}
}
/******************************************************************************
* Function: cudaCalcMandelbrot
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* cudaCalcMandelbrot() is a function which takes in a set of real and imaginary
* values, and will reutrn if each point is in our out of the mandelbrot set using
* the in-out iterations parameter
*
* Parameters: [in] setOfReals
* [in] setOfImags
* [in-out] iterations
* [in] len
******************************************************************************/
__host__ void cudaCalcMandelbrot(double *setOfReals, double *setOfImags, int *iterations, int len) {
// Block management
int n = len;
// Device arrays
double *d_set_reals;
double *d_set_imags;
int *d_iterations;
// Allocate our memory on the device
cudaMalloc(&d_set_reals, len * sizeof(double));
cudaMalloc(&d_set_imags, len * sizeof(double));
cudaMalloc(&d_iterations, len * sizeof(int));
// Copy our stuff to the device
cudaMemcpy(d_set_reals, setOfReals, len * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_set_imags, setOfImags, len * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_iterations, iterations, len * sizeof(int), cudaMemcpyHostToDevice);
// Run the code on the GPU
int nThreads = 128; // should be multiple of 32 (up to 1024)
int nBlocks = ( n + nThreads - 1 ) / nThreads;
mandelCalc<<<nBlocks, nThreads>>>(d_set_reals, d_set_imags, d_iterations, len);
// Copy stuff from the GPU to our host
cudaMemcpy(setOfReals, d_set_reals, len * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(setOfImags, d_set_imags, len * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(iterations, d_iterations, len * sizeof(int), cudaMemcpyDeviceToHost);
// Free the device memory
cudaFree(d_set_reals);
cudaFree(d_set_imags);
cudaFree(d_iterations);
}
/******************************************************************************
* Function: cudaCalcJulia
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* cudaCalcJulia() is a function which takes in a real and imaginary z value and
* a set of real and imaginary values, c, and will reutrn if each point in c is
* in the Julia set associated with the point z
*
* Parameters: [in] zReal
* [in] zImag
* [in] cReals
* [in] cImags
* [in-out] iterations
* [in] len
******************************************************************************/
__host__ void cudaCalcJulia(double zReal, double zImag, double *setOfReals, double *setOfImags, int *iterations, int len) {
// Local variables
int n = len;
// Device variables
double *d_set_reals;
double *d_set_imags;
int *d_iterations;
// Allocate our memory on the device
cudaMalloc(&d_set_reals, len * sizeof(double));
cudaMalloc(&d_set_imags, len * sizeof(double));
cudaMalloc(&d_iterations, len * sizeof(int));
// Copy our stuff to the device
cudaMemcpy(d_set_reals, setOfReals, len * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_set_imags, setOfImags, len * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_iterations, iterations, len * sizeof(int), cudaMemcpyHostToDevice);
// Run the code on the GPU
int nThreads = 128; // should be multiple of 32 (up to 1024)
int nBlocks = ( n + nThreads - 1 ) / nThreads;
juliaCalc<<<nBlocks, nThreads>>>(zReal, zImag, d_set_reals, d_set_imags, d_iterations, len);
// Copy stuff from the GPU to our host
cudaMemcpy(iterations, d_iterations, len * sizeof(int), cudaMemcpyDeviceToHost);
// Free the device memory
cudaFree(d_set_reals);
cudaFree(d_set_imags);
cudaFree(d_iterations);
}
|
#include <hip/hip_runtime.h>
#include <complex>
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <hip/hip_complex.h>
// Kernel Definitions
/******************************************************************************
* Function: CUDAisInMandelbrotSet
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* CUDAisInMandelbrotSet() takes in cuDoubleComplex number, c, and an int pointer
* to return the number of iterations
*
* Parameters: [in] c
* [in-out] iterations
******************************************************************************/
__device__ void CUDAisInMandelbrotSet(hipDoubleComplex c, int *iterations){
double zr = 0;
double zi = 0;
hipDoubleComplex z = make_hipDoubleComplex(zr, zi);
int i = 0;
for (i = 0; i < 100 && hipCabs(z) < 2.0; ++i){
z = hipCadd(hipCmul(z, z), c); // z = z*z + c;
}
*iterations = i;
}
/******************************************************************************
* Function: CUDAisInMandelbrotSet
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* CUDAisInJuliaSet() takes in two cuDoubleComplex numbers, c & z, and an int
* pointer to return the number of iterations
*
* Parameters: [in] z
* [in] c
* [in-out] iterations
******************************************************************************/
__device__ void CUDAisInJuliaSet(hipDoubleComplex z, hipDoubleComplex c, int *iterations){
int i = 0;
for (i = 0; i < 100 && hipCabs(z) < 2.0; ++i){
z = hipCadd(hipCmul(z, z), c); // z = z*z + c;
}
*iterations = i;
}
/******************************************************************************
* Function: mandelCalc
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* mandelCalc() is a kernel which takes in a set of real and imaginary values
* and will calcualte in parallel wether or not each point is in or out of the
* mandelbrot set.
*
* Parameters: [in] cReals
* [in] cImags
* [in-out] iterations
* [in] len
******************************************************************************/
__global__ void mandelCalc(double *cReals, double *cImags, int *iterations, int len) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
hipDoubleComplex num;
if(i < len){
num = make_hipDoubleComplex(cReals[i], cImags[i]);
CUDAisInMandelbrotSet(num, &iterations[i]);
}
}
/******************************************************************************
* Function: juliaCalc
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* juliaCalc() is a kernel which takes in a set of real and imaginary values as
* well as a real and imaginary number (as doubles), and will calcualte in
* parallel wether or not each point in the arrays is in or out of the Julia set
* associated with the given point
*
* Parameters: [in] zReal
* [in] zImag
* [in] cReals
* [in] cImags
* [in-out] iterations
* [in] len
******************************************************************************/
__global__ void juliaCalc(double zReal, double zImag, double *cReals, double *cImags, int *iterations, int len) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
hipDoubleComplex num;
hipDoubleComplex z;
if(i < len){
num = make_hipDoubleComplex(cReals[i], cImags[i]);
z = make_hipDoubleComplex(zReal, zImag);
CUDAisInJuliaSet(num, z, &iterations[i]);
}
}
/******************************************************************************
* Function: cudaCalcMandelbrot
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* cudaCalcMandelbrot() is a function which takes in a set of real and imaginary
* values, and will reutrn if each point is in our out of the mandelbrot set using
* the in-out iterations parameter
*
* Parameters: [in] setOfReals
* [in] setOfImags
* [in-out] iterations
* [in] len
******************************************************************************/
__host__ void cudaCalcMandelbrot(double *setOfReals, double *setOfImags, int *iterations, int len) {
// Block management
int n = len;
// Device arrays
double *d_set_reals;
double *d_set_imags;
int *d_iterations;
// Allocate our memory on the device
hipMalloc(&d_set_reals, len * sizeof(double));
hipMalloc(&d_set_imags, len * sizeof(double));
hipMalloc(&d_iterations, len * sizeof(int));
// Copy our stuff to the device
hipMemcpy(d_set_reals, setOfReals, len * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_set_imags, setOfImags, len * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_iterations, iterations, len * sizeof(int), hipMemcpyHostToDevice);
// Run the code on the GPU
int nThreads = 128; // should be multiple of 32 (up to 1024)
int nBlocks = ( n + nThreads - 1 ) / nThreads;
mandelCalc<<<nBlocks, nThreads>>>(d_set_reals, d_set_imags, d_iterations, len);
// Copy stuff from the GPU to our host
hipMemcpy(setOfReals, d_set_reals, len * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(setOfImags, d_set_imags, len * sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(iterations, d_iterations, len * sizeof(int), hipMemcpyDeviceToHost);
// Free the device memory
hipFree(d_set_reals);
hipFree(d_set_imags);
hipFree(d_iterations);
}
/******************************************************************************
* Function: cudaCalcJulia
*
* Authors: Elliott Rarden & Katie Macmillan
*
* Description:
* cudaCalcJulia() is a function which takes in a real and imaginary z value and
* a set of real and imaginary values, c, and will reutrn if each point in c is
* in the Julia set associated with the point z
*
* Parameters: [in] zReal
* [in] zImag
* [in] cReals
* [in] cImags
* [in-out] iterations
* [in] len
******************************************************************************/
__host__ void cudaCalcJulia(double zReal, double zImag, double *setOfReals, double *setOfImags, int *iterations, int len) {
// Local variables
int n = len;
// Device variables
double *d_set_reals;
double *d_set_imags;
int *d_iterations;
// Allocate our memory on the device
hipMalloc(&d_set_reals, len * sizeof(double));
hipMalloc(&d_set_imags, len * sizeof(double));
hipMalloc(&d_iterations, len * sizeof(int));
// Copy our stuff to the device
hipMemcpy(d_set_reals, setOfReals, len * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_set_imags, setOfImags, len * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_iterations, iterations, len * sizeof(int), hipMemcpyHostToDevice);
// Run the code on the GPU
int nThreads = 128; // should be multiple of 32 (up to 1024)
int nBlocks = ( n + nThreads - 1 ) / nThreads;
juliaCalc<<<nBlocks, nThreads>>>(zReal, zImag, d_set_reals, d_set_imags, d_iterations, len);
// Copy stuff from the GPU to our host
hipMemcpy(iterations, d_iterations, len * sizeof(int), hipMemcpyDeviceToHost);
// Free the device memory
hipFree(d_set_reals);
hipFree(d_set_imags);
hipFree(d_iterations);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 32
#define GROUP_OF_PIXELS 1
__global__ void mandelKernel(float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = (blockIdx.x * blockDim.x + threadIdx.x) * GROUP_OF_PIXELS;
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
for (int i = 0; i < GROUP_OF_PIXELS && col < resX; i++, col++) {
float x = lowerX + col * stepX;
float y = lowerY + row * stepY;
float z_re = x, z_im = y;
int val = 0;
for (; val < maxIterations; ++val) {
if (z_re * z_re + z_im * z_im > 4.f) break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = x + new_re;
z_im = y + new_im;
}
img[row * resX + col] = val;
}
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) {
int *temp_img;
int *output;
size_t pitch;
cudaHostAlloc((void **)&output, resX * resY * sizeof(int), cudaHostAllocDefault);
cudaMallocPitch((void **) &temp_img, &pitch, resX * sizeof (int), resY);
int group_block_size = BLOCK_SIZE * GROUP_OF_PIXELS;
static int x_blocks = resX % group_block_size == 0 ? resX / group_block_size : resX / group_block_size + 1;
static int y_blocks = resY % BLOCK_SIZE == 0 ? resY / BLOCK_SIZE : resY / BLOCK_SIZE + 1;
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 num_block(x_blocks, y_blocks);
mandelKernel<<<num_block, block_size>>>(upperX, upperY, lowerX, lowerY, temp_img, resX, resY, maxIterations);
cudaMemcpy(output, temp_img, resX * resY * sizeof(int), cudaMemcpyDeviceToHost);
memcpy(img, output, resX * resY * sizeof(int));
cudaFreeHost(output);
cudaFree(temp_img);
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 32
#define GROUP_OF_PIXELS 1
__global__ void mandelKernel(float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = (blockIdx.x * blockDim.x + threadIdx.x) * GROUP_OF_PIXELS;
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
for (int i = 0; i < GROUP_OF_PIXELS && col < resX; i++, col++) {
float x = lowerX + col * stepX;
float y = lowerY + row * stepY;
float z_re = x, z_im = y;
int val = 0;
for (; val < maxIterations; ++val) {
if (z_re * z_re + z_im * z_im > 4.f) break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = x + new_re;
z_im = y + new_im;
}
img[row * resX + col] = val;
}
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) {
int *temp_img;
int *output;
size_t pitch;
hipHostAlloc((void **)&output, resX * resY * sizeof(int), hipHostMallocDefault);
hipMallocPitch((void **) &temp_img, &pitch, resX * sizeof (int), resY);
int group_block_size = BLOCK_SIZE * GROUP_OF_PIXELS;
static int x_blocks = resX % group_block_size == 0 ? resX / group_block_size : resX / group_block_size + 1;
static int y_blocks = resY % BLOCK_SIZE == 0 ? resY / BLOCK_SIZE : resY / BLOCK_SIZE + 1;
dim3 block_size(BLOCK_SIZE, BLOCK_SIZE);
dim3 num_block(x_blocks, y_blocks);
mandelKernel<<<num_block, block_size>>>(upperX, upperY, lowerX, lowerY, temp_img, resX, resY, maxIterations);
hipMemcpy(output, temp_img, resX * resY * sizeof(int), hipMemcpyDeviceToHost);
memcpy(img, output, resX * resY * sizeof(int));
hipHostFree(output);
hipFree(temp_img);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "plane.cuh"
#include <stdio.h>
__host__ __device__ Plane3::Plane3() {
a = Vec3();
b = Vec3();
c = Vec3();
}
__host__ __device__ Plane3::Plane3(Vec3 a, Vec3 b, Vec3 c) {
this->a = a;
this->b = b;
this->c = c;
}
__host__ __device__ float determinant(Vec3 a, Vec3 b, Vec3 c) {
return (a.x * b.y * c.z) + (b.x * c.y * a.z) + (c.x * a.y * b.z) - (c.x * b.y * a.z) - (b.x * a.y * c.z) - (a.x * c.y * b.z);
}
__host__ __device__ float Plane3::outside(Vec3 x) {
Vec3 b_prime = b - a;
Vec3 c_prime = c - a;
Vec3 x_prime = x - a;
return determinant(b_prime, c_prime, x_prime);
}
__host__ __device__ Vec3 Plane3::intersection_point(Vec3 line_origin, Vec3 line_direction) {
Vec3 plane_normal = ((c - a).cross(b - a)).normalize();
float denom = plane_normal.dot(line_direction.normalize());
float t = 99999999;
if (denom > 0.0001) {
Vec3 line_origin_to_point = a - line_origin;
t = line_origin_to_point.dot(plane_normal) / denom;
}
return line_origin + (line_direction.normalize() * t);
// float t = (plane_normal.dot(a) - plane_normal.dot(line_origin)) / plane_normal.dot(line_direction.normalize());
// if (t < 0) // Falling of screen
// t = 99999999.0;
// return line_origin + ((line_direction).normalize() * t);
}
|
#ifndef HEADER_PLANE
#define HEADER_PLANE
#include "vector.cuh"
class Plane3 {
public:
__host__ __device__ Plane3();
__host__ __device__ Plane3(Vec3 a, Vec3 b, Vec3 c);
Vec3 a;
Vec3 b;
Vec3 c;
__host__ __device__ float outside(Vec3 point);
__host__ __device__ Vec3 intersection_point(Vec3 line_origin, Vec3 line_direction);
};
#endif
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "cuda_runtime.h"
#include <stdio.h>
#include <time.h>
//#define SIZE 1000
using namespace std;
__global__ void Convolution1(int *a,int *filter,int *result,int size_a,int size_filter,int size_result)
{
int i=blockIdx.x;
int j=blockIdx.y;
if(i<size_result||j<size_result)
{
for(int k=0;k<size_filter;k++)
for(int l=0;l<size_filter;l++)
result[i*size_result+j] += filter[k*size_filter+l]*a[(2*i+k)*size_a+2*j+l];
}
}
__global__ void Convolution3(int *a,int *filter,int *result,int size_a,int size_filter,int size_result)
{
int i=blockIdx.x;
int j=blockIdx.y;
int k=threadIdx.x;
int l=threadIdx.y;
if(i<size_result||j<size_result||k<size_filter||l<size_filter)
{
atomicAdd(&result[i*size_result+j],filter[k*size_filter+l]*a[(2*i+k)*size_a+2*j+l]);
}
}
void Convolution2(int *a,int *filter,int *result,int size_a,int size_filter,int size_result)
{
for(int i=0;i<size_result;i++)
{
for(int j=0;j<size_result;j++)
for(int k=0;k<size_filter;k++)
for(int l=0;l<size_filter;l++)
result[i*size_result+j] += filter[k*size_filter+l]*a[(2*i+k)*size_a+2*j+l];
}
}
int main()
{
int *a,*filter,*result,*result_serial,*result_optimal;
int size_a,size_filter,size_result;
clock_t t;
double time_taken;
x: printf("\nEnter size of array:");
scanf("%d",&size_a);
printf("\nEnter size of filter:");
scanf("%d",&size_filter);
if(size_a%2==0||size_filter%2==0)
{
printf("\nEnter odd numbers for sizes.");
goto x;
}
if((size_a-size_filter)<0)
{
printf("\nEnter larger matrix size or smaller filter size.");
goto x;
}
size_result=(size_a-size_filter)/2 +1;
printf("Size of Matrix after Convolution with stride = (2) will be: %d \n",size_result);
cudaMallocManaged(&a,size_a*size_a*sizeof(int));
cudaMallocManaged(&filter,size_filter*size_filter*sizeof(int));
cudaMallocManaged(&result,size_result*size_result*sizeof(int));
cudaMallocManaged(&result_optimal,size_result*size_result*sizeof(int));
cudaMallocManaged(&result_serial,size_result*size_result*sizeof(int));
srand(0);
for(int i=0;i<size_a*size_a;i++)
{
a[i]=rand()%100;
//printf("Enter a[%d]",i);
//scanf("%d",&a[i]);
}
for(int i=0;i<size_filter*size_filter;i++)
{
filter[i]=rand()%100;
//printf("Enter filter[%d]",i);
//scanf("%d",&filter[i]);
}
for(int i=0;i<size_result*size_result;i++)
{
result[i]=0;
result_serial[i]=0;
result_optimal[i]=0;
}
dim3 res(size_result,size_result);
dim3 fil(size_filter,size_filter);
t=clock();
Convolution1<<<res,1>>>(a,filter,result,size_a,size_filter,size_result);
cudaDeviceSynchronize();
t=clock()-t;
time_taken=((double)t)/CLOCKS_PER_SEC;
printf("Time for Convolution with %d threads: %f \n",size_result*size_result,time_taken);
t=clock();
Convolution3<<<res,fil>>>(a,filter,result_optimal,size_a,size_filter,size_result);
cudaDeviceSynchronize();
t=clock()-t;
time_taken=((double)t)/CLOCKS_PER_SEC;
printf("Time for Convolution with %d x %d threads: %f \n",size_result*size_result,size_filter*size_filter,time_taken);
t=clock();
Convolution2(a,filter,result_serial,size_a,size_filter,size_result);
t=clock()-t;
time_taken=((double)t)/CLOCKS_PER_SEC;
printf("Time for Convolution using serial:%f \n",time_taken);
printf("\nSanity Check:");
if(size_filter*size_filter>11)
for(int i=0;i<10;i++)
{
printf("\nresult[%d]=%d \nresult_serial[%d]=%d \nresult_optimal[%d]=%d\n",i,result[i],i,result_serial[i],i,result_optimal[i]);
}
else
for(int i=0;i<size_filter*size_filter;i++)
{
printf("\nresult[%d]=%d \nresult_serial[%d]=%d \nresult_optimal[%d]=%d\n",i,result[i],i,result_serial[i],i,result_optimal[i]);
}
cudaFree(a);
cudaFree(filter);
cudaFree(result);
cudaFree(result_serial);
return 0;
}
/***********************OUTPUT*************************
[user10@linux-teslagpu ~]$ ./a.out
Enter size of array:10001
Enter size of filter:3
Size of Matrix after Convolution with stride = (2) will be: 5000
Time for Convolution with 25000000 threads: 0.000000
Time for Convolution using serial:1.990000
On Gtx 1050:
E:\!KUNAL\MIT\BE\HPC\MiniProject>a.exe
Enter size of array:10001
Enter size of filter:3
Size of Matrix after Convolution with stride = (2) will be: 5000
Time for Convolution with 25000000 threads: 2.210000
Time for Convolution with 25000000 x 9 threads: 0.134000
Time for Convolution using serial:3.210000
Sanity Check:
result[0]=12792
result_serial[0]=12792
result_optimal[0]=12792
result[1]=14060
result_serial[1]=14060
result_optimal[1]=14060
result[2]=20138
result_serial[2]=20138
result_optimal[2]=20138
result[3]=19328
result_serial[3]=19328
result_optimal[3]=19328
result[4]=20288
result_serial[4]=20288
result_optimal[4]=20288
result[5]=14252
result_serial[5]=14252
result_optimal[5]=14252
result[6]=16804
result_serial[6]=16804
result_optimal[6]=16804
result[7]=20854
result_serial[7]=20854
result_optimal[7]=20854
result[8]=24886
result_serial[8]=24886
result_optimal[8]=24886
******************************************************/
|
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
//#define SIZE 1000
using namespace std;
__global__ void Convolution1(int *a,int *filter,int *result,int size_a,int size_filter,int size_result)
{
int i=blockIdx.x;
int j=blockIdx.y;
if(i<size_result||j<size_result)
{
for(int k=0;k<size_filter;k++)
for(int l=0;l<size_filter;l++)
result[i*size_result+j] += filter[k*size_filter+l]*a[(2*i+k)*size_a+2*j+l];
}
}
__global__ void Convolution3(int *a,int *filter,int *result,int size_a,int size_filter,int size_result)
{
int i=blockIdx.x;
int j=blockIdx.y;
int k=threadIdx.x;
int l=threadIdx.y;
if(i<size_result||j<size_result||k<size_filter||l<size_filter)
{
atomicAdd(&result[i*size_result+j],filter[k*size_filter+l]*a[(2*i+k)*size_a+2*j+l]);
}
}
void Convolution2(int *a,int *filter,int *result,int size_a,int size_filter,int size_result)
{
for(int i=0;i<size_result;i++)
{
for(int j=0;j<size_result;j++)
for(int k=0;k<size_filter;k++)
for(int l=0;l<size_filter;l++)
result[i*size_result+j] += filter[k*size_filter+l]*a[(2*i+k)*size_a+2*j+l];
}
}
int main()
{
int *a,*filter,*result,*result_serial,*result_optimal;
int size_a,size_filter,size_result;
clock_t t;
double time_taken;
x: printf("\nEnter size of array:");
scanf("%d",&size_a);
printf("\nEnter size of filter:");
scanf("%d",&size_filter);
if(size_a%2==0||size_filter%2==0)
{
printf("\nEnter odd numbers for sizes.");
goto x;
}
if((size_a-size_filter)<0)
{
printf("\nEnter larger matrix size or smaller filter size.");
goto x;
}
size_result=(size_a-size_filter)/2 +1;
printf("Size of Matrix after Convolution with stride = (2) will be: %d \n",size_result);
hipMallocManaged(&a,size_a*size_a*sizeof(int));
hipMallocManaged(&filter,size_filter*size_filter*sizeof(int));
hipMallocManaged(&result,size_result*size_result*sizeof(int));
hipMallocManaged(&result_optimal,size_result*size_result*sizeof(int));
hipMallocManaged(&result_serial,size_result*size_result*sizeof(int));
srand(0);
for(int i=0;i<size_a*size_a;i++)
{
a[i]=rand()%100;
//printf("Enter a[%d]",i);
//scanf("%d",&a[i]);
}
for(int i=0;i<size_filter*size_filter;i++)
{
filter[i]=rand()%100;
//printf("Enter filter[%d]",i);
//scanf("%d",&filter[i]);
}
for(int i=0;i<size_result*size_result;i++)
{
result[i]=0;
result_serial[i]=0;
result_optimal[i]=0;
}
dim3 res(size_result,size_result);
dim3 fil(size_filter,size_filter);
t=clock();
Convolution1<<<res,1>>>(a,filter,result,size_a,size_filter,size_result);
hipDeviceSynchronize();
t=clock()-t;
time_taken=((double)t)/CLOCKS_PER_SEC;
printf("Time for Convolution with %d threads: %f \n",size_result*size_result,time_taken);
t=clock();
Convolution3<<<res,fil>>>(a,filter,result_optimal,size_a,size_filter,size_result);
hipDeviceSynchronize();
t=clock()-t;
time_taken=((double)t)/CLOCKS_PER_SEC;
printf("Time for Convolution with %d x %d threads: %f \n",size_result*size_result,size_filter*size_filter,time_taken);
t=clock();
Convolution2(a,filter,result_serial,size_a,size_filter,size_result);
t=clock()-t;
time_taken=((double)t)/CLOCKS_PER_SEC;
printf("Time for Convolution using serial:%f \n",time_taken);
printf("\nSanity Check:");
if(size_filter*size_filter>11)
for(int i=0;i<10;i++)
{
printf("\nresult[%d]=%d \nresult_serial[%d]=%d \nresult_optimal[%d]=%d\n",i,result[i],i,result_serial[i],i,result_optimal[i]);
}
else
for(int i=0;i<size_filter*size_filter;i++)
{
printf("\nresult[%d]=%d \nresult_serial[%d]=%d \nresult_optimal[%d]=%d\n",i,result[i],i,result_serial[i],i,result_optimal[i]);
}
hipFree(a);
hipFree(filter);
hipFree(result);
hipFree(result_serial);
return 0;
}
/***********************OUTPUT*************************
[user10@linux-teslagpu ~]$ ./a.out
Enter size of array:10001
Enter size of filter:3
Size of Matrix after Convolution with stride = (2) will be: 5000
Time for Convolution with 25000000 threads: 0.000000
Time for Convolution using serial:1.990000
On Gtx 1050:
E:\!KUNAL\MIT\BE\HPC\MiniProject>a.exe
Enter size of array:10001
Enter size of filter:3
Size of Matrix after Convolution with stride = (2) will be: 5000
Time for Convolution with 25000000 threads: 2.210000
Time for Convolution with 25000000 x 9 threads: 0.134000
Time for Convolution using serial:3.210000
Sanity Check:
result[0]=12792
result_serial[0]=12792
result_optimal[0]=12792
result[1]=14060
result_serial[1]=14060
result_optimal[1]=14060
result[2]=20138
result_serial[2]=20138
result_optimal[2]=20138
result[3]=19328
result_serial[3]=19328
result_optimal[3]=19328
result[4]=20288
result_serial[4]=20288
result_optimal[4]=20288
result[5]=14252
result_serial[5]=14252
result_optimal[5]=14252
result[6]=16804
result_serial[6]=16804
result_optimal[6]=16804
result[7]=20854
result_serial[7]=20854
result_optimal[7]=20854
result[8]=24886
result_serial[8]=24886
result_optimal[8]=24886
******************************************************/
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
template <unsigned int blockSize>
__device__ void warpReduce(volatile int* sdata, int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
template <unsigned int blockSize>
__global__ void reduce6(int *g_idata, int *g_odata, int n) {
extern __shared__ int sdata[];
// perform first level of reduction, reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i] + g_idata[i+blockSize];
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
if (tid < 32) warpReduce<blockSize>(sdata, tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
int main(void)
{
long int size = 1 << 26;
long int s;
int sizeByte = size*sizeof(int);
int* h_data = (int*) malloc(sizeByte);
for(int i = 0; i < size; i++) {
// h_data[i] = rand() & 0xFF;
h_data[i] = i % 10;
}
long long int sum = 0;
for(int i = 0; i < size; i++) sum += h_data[i];
printf("CPU results = %lld \n", sum);
int* d_idata = NULL;
int* d_odata = NULL;
cudaMalloc(&d_idata, sizeByte);
cudaMalloc(&d_odata, sizeByte);
cudaMemcpy(d_idata, h_data, sizeByte, cudaMemcpyHostToDevice);
s = size >> 6;
int blocks = (s+512-1)/512;
reduce6<512><<<blocks/2, 512, 512*sizeof(int)>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
printf("The size of array is %ld and it is processed on # of Blocks: %d \n", size, blocks/2);
s = blocks/2;
blocks = (s+512-1)/512;
reduce6<512><<<blocks/2, 512, 512*sizeof(int)>>>(d_odata, d_idata, s);
cudaDeviceSynchronize();
cudaMemcpy(h_data, d_idata, sizeof(int), cudaMemcpyDeviceToHost);
printf("GPU result = %d\n", h_data[0]);
cudaFree(d_idata);
cudaFree(d_odata);
free(h_data);
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
template <unsigned int blockSize>
__device__ void warpReduce(volatile int* sdata, int tid) {
if (blockSize >= 64) sdata[tid] += sdata[tid + 32];
if (blockSize >= 32) sdata[tid] += sdata[tid + 16];
if (blockSize >= 16) sdata[tid] += sdata[tid + 8];
if (blockSize >= 8) sdata[tid] += sdata[tid + 4];
if (blockSize >= 4) sdata[tid] += sdata[tid + 2];
if (blockSize >= 2) sdata[tid] += sdata[tid + 1];
}
template <unsigned int blockSize>
__global__ void reduce6(int *g_idata, int *g_odata, int n) {
extern __shared__ int sdata[];
// perform first level of reduction, reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
sdata[tid] = 0;
while (i < n) {
sdata[tid] += g_idata[i] + g_idata[i+blockSize];
i += gridSize;
}
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) {
if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads();
}
if (tid < 32) warpReduce<blockSize>(sdata, tid);
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
int main(void)
{
long int size = 1 << 26;
long int s;
int sizeByte = size*sizeof(int);
int* h_data = (int*) malloc(sizeByte);
for(int i = 0; i < size; i++) {
// h_data[i] = rand() & 0xFF;
h_data[i] = i % 10;
}
long long int sum = 0;
for(int i = 0; i < size; i++) sum += h_data[i];
printf("CPU results = %lld \n", sum);
int* d_idata = NULL;
int* d_odata = NULL;
hipMalloc(&d_idata, sizeByte);
hipMalloc(&d_odata, sizeByte);
hipMemcpy(d_idata, h_data, sizeByte, hipMemcpyHostToDevice);
s = size >> 6;
int blocks = (s+512-1)/512;
reduce6<512><<<blocks/2, 512, 512*sizeof(int)>>>(d_idata, d_odata, size);
hipDeviceSynchronize();
printf("The size of array is %ld and it is processed on # of Blocks: %d \n", size, blocks/2);
s = blocks/2;
blocks = (s+512-1)/512;
reduce6<512><<<blocks/2, 512, 512*sizeof(int)>>>(d_odata, d_idata, s);
hipDeviceSynchronize();
hipMemcpy(h_data, d_idata, sizeof(int), hipMemcpyDeviceToHost);
printf("GPU result = %d\n", h_data[0]);
hipFree(d_idata);
hipFree(d_odata);
free(h_data);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/****
File: findRedsDriver.cu
By: Ilya Nemtsov
Compile: nvcc findRedsDriver.cu -o frgpu
Run: ./frgpu
****/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <cuda.h>
#define NUMPARTICLES 32768
#define NEIGHBORHOOD .05
#define THREADSPERBLOCK 128
void initPos(float *);
float findDistance(float *, int, int);
__device__ float findDistanceGPU(float *, int, int);
void dumpResults(int index[]);
__global__ void findRedsGPU(float *p, int *numI);
int main() {
cudaEvent_t start, stop;
float time;
float *pos, //host pos
*dpos; //device pos
int *numReds, //host numReds
*dnumReds;//device numreds
float psize = NUMPARTICLES * 4 * sizeof(float); //size of pos
int nsize = NUMPARTICLES * sizeof(int); //size of numReds
pos = (float *) malloc(psize); //allocate space for pos
numReds = (int *) malloc(nsize); // allocate space for numreds
initPos(pos); //initialize pos
// your code to allocate device arrays for pos and numReds go here
cudaMalloc((void** )&dpos, psize);
cudaMalloc((void** )&dnumReds, nsize);
// copy host pos to device pos
cudaMemcpy(dpos,pos,psize, cudaMemcpyHostToDevice);
// create timer events
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* invoke kernel findRedsGPU here */
findRedsGPU<<<NUMPARTICLES/THREADSPERBLOCK,THREADSPERBLOCK>>>(dpos, dnumReds);
cudaThreadSynchronize();
// your code to copy results to numReds[] go here
cudaMemcpy(numReds,dnumReds,nsize,cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed time = %f\n", time);
dumpResults(numReds);
// clean up
free(pos);
cudaFree(dpos);
free(numReds);
cudaFree(dnumReds);
}
void initPos(float *p) {
// your code for initializing pos goes here
int i;
int roll;
for (i=0; i<NUMPARTICLES; i++) {
p[i*4] = rand() / (float) RAND_MAX;
p[i*4+1] = rand() / (float) RAND_MAX;
p[i*4+2] = rand() / (float) RAND_MAX;
roll = rand() % 3;
if (roll == 0)
p[i*4+3] = 0xff0000;
else if (roll == 1)
p[i*4+3] = 0x00ff00;
else
p[i*4+3] = 0x0000ff;
}
}
__device__ float findDistanceGPU(float *p, int i, int j) {
// your code for calculating distance for particle i and j
float dx, dy, dz;
dx = p[i*4] - p[j*4];
dy = p[i*4+1] - p[j*4+1];
dz = p[i*4+2] - p[j*4+2];
return(sqrt(dx*dx + dy*dy + dz*dz));
}
__global__ void findRedsGPU(float *p, int *numI) {
// your code for counting red particles goes here
int j;
float distance;
int i = blockDim.x * blockIdx.x + threadIdx.x;
numI[i]=0;
for(j=0; j< NUMPARTICLES; j++){
if(i!=j){
distance = findDistanceGPU(p, i, j);
if(distance < NEIGHBORHOOD && p[j*4+3] == 0xff0000){
numI[i]++;
}
}
}
}
void dumpResults(int index[]) {
int i;
FILE *fp;
fp = fopen("./dump.out", "w");
for (i=0; i<NUMPARTICLES; i++) {
fprintf(fp, "%d %d\n", i, index[i]);
}
fclose(fp);
}
|
/****
File: findRedsDriver.cu
By: Ilya Nemtsov
Compile: nvcc findRedsDriver.cu -o frgpu
Run: ./frgpu
****/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define NUMPARTICLES 32768
#define NEIGHBORHOOD .05
#define THREADSPERBLOCK 128
void initPos(float *);
float findDistance(float *, int, int);
__device__ float findDistanceGPU(float *, int, int);
void dumpResults(int index[]);
__global__ void findRedsGPU(float *p, int *numI);
int main() {
hipEvent_t start, stop;
float time;
float *pos, //host pos
*dpos; //device pos
int *numReds, //host numReds
*dnumReds;//device numreds
float psize = NUMPARTICLES * 4 * sizeof(float); //size of pos
int nsize = NUMPARTICLES * sizeof(int); //size of numReds
pos = (float *) malloc(psize); //allocate space for pos
numReds = (int *) malloc(nsize); // allocate space for numreds
initPos(pos); //initialize pos
// your code to allocate device arrays for pos and numReds go here
hipMalloc((void** )&dpos, psize);
hipMalloc((void** )&dnumReds, nsize);
// copy host pos to device pos
hipMemcpy(dpos,pos,psize, hipMemcpyHostToDevice);
// create timer events
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
/* invoke kernel findRedsGPU here */
findRedsGPU<<<NUMPARTICLES/THREADSPERBLOCK,THREADSPERBLOCK>>>(dpos, dnumReds);
hipDeviceSynchronize();
// your code to copy results to numReds[] go here
hipMemcpy(numReds,dnumReds,nsize,hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("Elapsed time = %f\n", time);
dumpResults(numReds);
// clean up
free(pos);
hipFree(dpos);
free(numReds);
hipFree(dnumReds);
}
void initPos(float *p) {
// your code for initializing pos goes here
int i;
int roll;
for (i=0; i<NUMPARTICLES; i++) {
p[i*4] = rand() / (float) RAND_MAX;
p[i*4+1] = rand() / (float) RAND_MAX;
p[i*4+2] = rand() / (float) RAND_MAX;
roll = rand() % 3;
if (roll == 0)
p[i*4+3] = 0xff0000;
else if (roll == 1)
p[i*4+3] = 0x00ff00;
else
p[i*4+3] = 0x0000ff;
}
}
__device__ float findDistanceGPU(float *p, int i, int j) {
// your code for calculating distance for particle i and j
float dx, dy, dz;
dx = p[i*4] - p[j*4];
dy = p[i*4+1] - p[j*4+1];
dz = p[i*4+2] - p[j*4+2];
return(sqrt(dx*dx + dy*dy + dz*dz));
}
__global__ void findRedsGPU(float *p, int *numI) {
// your code for counting red particles goes here
int j;
float distance;
int i = blockDim.x * blockIdx.x + threadIdx.x;
numI[i]=0;
for(j=0; j< NUMPARTICLES; j++){
if(i!=j){
distance = findDistanceGPU(p, i, j);
if(distance < NEIGHBORHOOD && p[j*4+3] == 0xff0000){
numI[i]++;
}
}
}
}
void dumpResults(int index[]) {
int i;
FILE *fp;
fp = fopen("./dump.out", "w");
for (i=0; i<NUMPARTICLES; i++) {
fprintf(fp, "%d %d\n", i, index[i]);
}
fclose(fp);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <cuda.h>
#include <stdio.h>
__global__ void K1() {
unsigned num = 0;
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned ii = 0; ii < id; ++ii)
num += ii;
printf("K1: %d\n", threadIdx.x);
}
__global__ void K2() {
unsigned num = 0;
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned ii = 0; ii < id; ++ii)
num += ii;
__syncthreads();
printf("K2: %d\n", threadIdx.x);
}
__global__ void K3() {
printf("\tK3\n");
}
int main() {
int *ptr;
cudaStream_t s1, s2, s3;
cudaStreamCreate(&s1);
cudaStreamCreate(&s2);
cudaStreamCreate(&s3);
K1<<<32, 32, 0, s1>>>();
cudaHostAlloc(&ptr, sizeof(int), 0);
K2<<<1, 1024, 0, s2>>>();
K3<<<1, 32, 0, s3>>>();
cudaDeviceSynchronize();
return 0;
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void K1() {
unsigned num = 0;
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned ii = 0; ii < id; ++ii)
num += ii;
printf("K1: %d\n", threadIdx.x);
}
__global__ void K2() {
unsigned num = 0;
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned ii = 0; ii < id; ++ii)
num += ii;
__syncthreads();
printf("K2: %d\n", threadIdx.x);
}
__global__ void K3() {
printf("\tK3\n");
}
int main() {
int *ptr;
hipStream_t s1, s2, s3;
hipStreamCreate(&s1);
hipStreamCreate(&s2);
hipStreamCreate(&s3);
K1<<<32, 32, 0, s1>>>();
hipHostAlloc(&ptr, sizeof(int), 0);
K2<<<1, 1024, 0, s2>>>();
K3<<<1, 32, 0, s3>>>();
hipDeviceSynchronize();
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
//#include <cutil_inline.h>
extern "C"
void runCudaPart(float a[], float b[], float c[], int n);
__global__ void myKernel(float *a, float *b, float *c, int n)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//return;
if (idx < n)
{
c[idx] = a[idx] + b[idx];
//c[idx] = 10.0;
}
//printf("Test\n");
return;
}
// Main cuda function
void runCudaPart(float *a, float *b, float *c, int n) {
float *a_d, *b_d, *c_d;
size_t size = n * sizeof(float);
cudaMalloc((void **) &a_d, size);
cudaMalloc((void **) &b_d, size);
cudaMalloc((void **) &c_d, size);
cudaMemcpy(a_d, a, size,cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, size,cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c, size,cudaMemcpyHostToDevice);
//cudaMemset(c_d, 0, n);
printf("Executing CUDA kernel\n");
myKernel <<<1,100>>> (a_d, b_d, c_d, n);
cudaMemcpy(c, c_d, size, cudaMemcpyDeviceToHost);
printf("Kernel ended.\n");
}
|
#include <stdio.h>
#include <hip/hip_runtime.h>
//#include <cutil_inline.h>
extern "C"
void runCudaPart(float a[], float b[], float c[], int n);
__global__ void myKernel(float *a, float *b, float *c, int n)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//return;
if (idx < n)
{
c[idx] = a[idx] + b[idx];
//c[idx] = 10.0;
}
//printf("Test\n");
return;
}
// Main cuda function
void runCudaPart(float *a, float *b, float *c, int n) {
float *a_d, *b_d, *c_d;
size_t size = n * sizeof(float);
hipMalloc((void **) &a_d, size);
hipMalloc((void **) &b_d, size);
hipMalloc((void **) &c_d, size);
hipMemcpy(a_d, a, size,hipMemcpyHostToDevice);
hipMemcpy(b_d, b, size,hipMemcpyHostToDevice);
hipMemcpy(c_d, c, size,hipMemcpyHostToDevice);
//cudaMemset(c_d, 0, n);
printf("Executing CUDA kernel\n");
myKernel <<<1,100>>> (a_d, b_d, c_d, n);
hipMemcpy(c, c_d, size, hipMemcpyDeviceToHost);
printf("Kernel ended.\n");
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
float * g_outputs_d, *g_sweepers_d_2;
__global__ void update_positions(float max_speed, float * outputs_d, float * sweepers_d)
{
int my_index = blockIdx.x * blockDim.x + threadIdx.x;
sweepers_d[my_index] += (2 * outputs_d[my_index] * max_speed) - max_speed;
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
float * g_outputs_d, *g_sweepers_d_2;
__global__ void update_positions(float max_speed, float * outputs_d, float * sweepers_d)
{
int my_index = blockIdx.x * blockDim.x + threadIdx.x;
sweepers_d[my_index] += (2 * outputs_d[my_index] * max_speed) - max_speed;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
//=============================================================================================
// Name : thread3dStl.cu
// Author : Jose Refojo
// Version :
// Creation date : 26-02-2014
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will initialize a number of arrays stored in stl vectors,
// then it will grab data from each thread (such as thread position inside the block and block),
// save it, send it back into the main memory, and print it
//=============================================================================================
#include <iostream>
#include <stdio.h>
#include <vector>
using namespace std;
#define BLOCK_SIZE 2
__global__ void scanTheadInformationGPU(float *threadXIdsGPU,int N,int M,int L) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
int idz=blockIdx.z*blockDim.z+threadIdx.z;
if ( idx < N ) {
if ( idy < M ) {
if ( idz < L ) {
threadXIdsGPU[idx+idy*N+idz*N*M]=-(idx+idy*N+idz*N*M);
//threadXIdsGPU[idz+idy*L+idx*M*L]=-(idz+idy*L+idx*M*L);
}
}
}
}
int main() {
// pointers to host memory matrices
std::vector< float > vector1d;
std::vector< std::vector< float* > > vector3d;
float *vector1d_gpu;
// pointers to device memory matrices
//float *vectorGPU;
// N,M and L are the sizes on each dimension
int N=2,M=3,L=4,totalSize;
unsigned int ui,uj,uk;
totalSize=N*M*L;
// Allocate arrays threadIds and blockIds on host
vector1d.resize(totalSize);
vector3d.resize(N, std::vector< float* > (M));
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
vector3d[ui][uj]=&(vector1d[uj*L+ui*M*L]);
}
}
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
//vector1d[ui+uj*N+uk*N*M]=ui+uj*N+uk*N*M;
vector1d[uk+uj*L+ui*M*L]=uk+uj*L+ui*M*L;
}
}
}
// Allocate arrays threadIdsGPU and blockIdsGPU on device
cudaMalloc ((void **) &vector1d_gpu, sizeof(float)*N*M*L);
// Copy data from host memory to device memory
cudaMemcpy(vector1d_gpu, &(vector1d[0]), sizeof(float)*N*M*L, cudaMemcpyHostToDevice);
int block_size=BLOCK_SIZE;
dim3 dimBlock(block_size,block_size,block_size);
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1),(M/dimBlock.y) + (!(M%dimBlock.y)?0:1),(L/dimBlock.y) + (!(L%dimBlock.y)?0:1));
// Call the kernel
scanTheadInformationGPU <<<dimGrid,dimBlock>>> (vector1d_gpu,N,M,L);
// Copy data from device memory to host memory
cudaMemcpy(&(vector1d[0]), vector1d_gpu, sizeof(float)*N*M*L, cudaMemcpyDeviceToHost);
// Print all the data about the threads
cout << "vector1d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector1d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector1d[uk+uj*L+ui*M*L];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
cout << "vector3d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector3d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector3d[ui][uj][uk];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
/*
cout << "vector1d_host:" <<endl;
for (ui=0; ui<numRows; ui++) {
for (uj=0; uj<numColumns; uj++) {
cout << input2d_host[ui][uj];
if (uj==numColumns-1) { cout << endl;} else { cout << "\t"; }
}
}
cout << endl;
*/
}
|
//=============================================================================================
// Name : thread3dStl.cu
// Author : Jose Refojo
// Version :
// Creation date : 26-02-2014
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will initialize a number of arrays stored in stl vectors,
// then it will grab data from each thread (such as thread position inside the block and block),
// save it, send it back into the main memory, and print it
//=============================================================================================
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <vector>
using namespace std;
#define BLOCK_SIZE 2
__global__ void scanTheadInformationGPU(float *threadXIdsGPU,int N,int M,int L) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int idy=blockIdx.y*blockDim.y+threadIdx.y;
int idz=blockIdx.z*blockDim.z+threadIdx.z;
if ( idx < N ) {
if ( idy < M ) {
if ( idz < L ) {
threadXIdsGPU[idx+idy*N+idz*N*M]=-(idx+idy*N+idz*N*M);
//threadXIdsGPU[idz+idy*L+idx*M*L]=-(idz+idy*L+idx*M*L);
}
}
}
}
int main() {
// pointers to host memory matrices
std::vector< float > vector1d;
std::vector< std::vector< float* > > vector3d;
float *vector1d_gpu;
// pointers to device memory matrices
//float *vectorGPU;
// N,M and L are the sizes on each dimension
int N=2,M=3,L=4,totalSize;
unsigned int ui,uj,uk;
totalSize=N*M*L;
// Allocate arrays threadIds and blockIds on host
vector1d.resize(totalSize);
vector3d.resize(N, std::vector< float* > (M));
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
vector3d[ui][uj]=&(vector1d[uj*L+ui*M*L]);
}
}
for (ui=0;ui<N;ui++) {
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
//vector1d[ui+uj*N+uk*N*M]=ui+uj*N+uk*N*M;
vector1d[uk+uj*L+ui*M*L]=uk+uj*L+ui*M*L;
}
}
}
// Allocate arrays threadIdsGPU and blockIdsGPU on device
hipMalloc ((void **) &vector1d_gpu, sizeof(float)*N*M*L);
// Copy data from host memory to device memory
hipMemcpy(vector1d_gpu, &(vector1d[0]), sizeof(float)*N*M*L, hipMemcpyHostToDevice);
int block_size=BLOCK_SIZE;
dim3 dimBlock(block_size,block_size,block_size);
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1),(M/dimBlock.y) + (!(M%dimBlock.y)?0:1),(L/dimBlock.y) + (!(L%dimBlock.y)?0:1));
// Call the kernel
scanTheadInformationGPU <<<dimGrid,dimBlock>>> (vector1d_gpu,N,M,L);
// Copy data from device memory to host memory
hipMemcpy(&(vector1d[0]), vector1d_gpu, sizeof(float)*N*M*L, hipMemcpyDeviceToHost);
// Print all the data about the threads
cout << "vector1d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector1d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector1d[uk+uj*L+ui*M*L];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
cout << "vector3d_host: (" << N << "," << M << "," << L << ")" << endl;
for (ui=0;ui<N;ui++) {
cout << "vector3d_host, slice in Z " << ui << ":" << endl;
for (uj=0;uj<M;uj++) {
for (uk=0;uk<L;uk++) {
cout << vector3d[ui][uj][uk];
if (uk==L-1) { cout << endl;} else { cout << "\t"; }
}
}
}
/*
cout << "vector1d_host:" <<endl;
for (ui=0; ui<numRows; ui++) {
for (uj=0; uj<numColumns; uj++) {
cout << input2d_host[ui][uj];
if (uj==numColumns-1) { cout << endl;} else { cout << "\t"; }
}
}
cout << endl;
*/
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <cuda.h>
int main() {
// get the range of stream priorities for this device
int priority_high, priority_low;
cudaDeviceGetStreamPriorityRange(&priority_low, &priority_high);
// create streams with highest and lowest available priorities
cudaStream_t st_high, st_low;
cudaStreamCreateWithPriority(&st_high, cudaStreamNonBlocking, priority_high);
cudaStreamCreateWithPriority(&st_low, cudaStreamNonBlocking, priority_low);
printf("%d -- %d\n", priority_low, priority_high);
}
|
#include <stdio.h>
#include <hip/hip_runtime.h>
int main() {
// get the range of stream priorities for this device
int priority_high, priority_low;
hipDeviceGetStreamPriorityRange(&priority_low, &priority_high);
// create streams with highest and lowest available priorities
hipStream_t st_high, st_low;
hipStreamCreateWithPriority(&st_high, hipStreamNonBlocking, priority_high);
hipStreamCreateWithPriority(&st_low, hipStreamNonBlocking, priority_low);
printf("%d -- %d\n", priority_low, priority_high);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
extern __device__ __constant__ int constNumber[4];
|
#include <hip/hip_runtime.h>
extern __device__ __constant__ int constNumber[4];
|
Convert the following CUDA code to AMD GPU code:
cuda
#define TILE_DIM 128
//template<typename T>
//__device__ void sumColumns(const T* matrix, T* result,
// const int rows, const int cols) {
//
// __shared__ T tile[TILE_DIM][TILE_DIM];
//
// int by = blockIdx.y;
// int ty = threadIdx.y;
// int row = by * blockDim.y + ty;
// T sum = 0;
//
// #pragma unroll
// for (int t = 0; t < (cols - 1) / TILE_DIM + 1; t++) {
// #pragma unroll
// for (int i = 0; i < TILE_DIM; i++) {
// int r = by * TILE_DIM + i;
// int c = t * TILE_DIM + ty;
// if (r < rows && c < cols) {
// tile[i][ty] = matrix[r * cols + c];
// } else {
// tile[i][ty] = 0;
// }
// }
// __syncthreads();
//
// #pragma unroll
// for (int j = 0; j < TILE_DIM; j++) {
// sum += tile[ty][j];
// }
// __syncthreads();
// }
//
// if (row < rows) {
// result[row] = sum;
// }
//}
template<typename T>
__device__ void sumColumns(const T* matrix, T* result,
const int rows, const int cols) {
__shared__ T tile[TILE_DIM];
int row = blockIdx.x;
if (row < rows) {
int index = threadIdx.x;
int partLength = (cols + TILE_DIM - 1) / TILE_DIM;
T sum = 0;
#pragma unroll
for (int i = 0; i < partLength; i++) {
int col = i * TILE_DIM + index;
if (col < cols) {
T value = matrix[row * cols + col];
sum += value;
}
}
tile[index] = sum;
#pragma unroll
for (int d = 1; d < TILE_DIM && d < cols; d <<= 1) {
__syncthreads();
if (index % (d << 1) == 0) {
int valueIndex = index + d;
if (valueIndex < TILE_DIM) {
T value = tile[valueIndex];
sum += value;
tile[index] = sum;
}
}
}
if (index == 0) {
result[row] = sum;
}
}
}
|
#include <hip/hip_runtime.h>
#define TILE_DIM 128
//template<typename T>
//__device__ void sumColumns(const T* matrix, T* result,
// const int rows, const int cols) {
//
// __shared__ T tile[TILE_DIM][TILE_DIM];
//
// int by = blockIdx.y;
// int ty = threadIdx.y;
// int row = by * blockDim.y + ty;
// T sum = 0;
//
// #pragma unroll
// for (int t = 0; t < (cols - 1) / TILE_DIM + 1; t++) {
// #pragma unroll
// for (int i = 0; i < TILE_DIM; i++) {
// int r = by * TILE_DIM + i;
// int c = t * TILE_DIM + ty;
// if (r < rows && c < cols) {
// tile[i][ty] = matrix[r * cols + c];
// } else {
// tile[i][ty] = 0;
// }
// }
// __syncthreads();
//
// #pragma unroll
// for (int j = 0; j < TILE_DIM; j++) {
// sum += tile[ty][j];
// }
// __syncthreads();
// }
//
// if (row < rows) {
// result[row] = sum;
// }
//}
template<typename T>
__device__ void sumColumns(const T* matrix, T* result,
const int rows, const int cols) {
__shared__ T tile[TILE_DIM];
int row = blockIdx.x;
if (row < rows) {
int index = threadIdx.x;
int partLength = (cols + TILE_DIM - 1) / TILE_DIM;
T sum = 0;
#pragma unroll
for (int i = 0; i < partLength; i++) {
int col = i * TILE_DIM + index;
if (col < cols) {
T value = matrix[row * cols + col];
sum += value;
}
}
tile[index] = sum;
#pragma unroll
for (int d = 1; d < TILE_DIM && d < cols; d <<= 1) {
__syncthreads();
if (index % (d << 1) == 0) {
int valueIndex = index + d;
if (valueIndex < TILE_DIM) {
T value = tile[valueIndex];
sum += value;
tile[index] = sum;
}
}
}
if (index == 0) {
result[row] = sum;
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,int var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float* var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30) {
if (comp >= +1.9749E-43f + expf(-0.0f)) {
for (int i=0; i < var_1; ++i) {
if (comp == (var_3 - -0.0f)) {
for (int i=0; i < var_2; ++i) {
comp += (-0.0f * var_6 + var_7 + var_8 * -0.0f);
float tmp_1 = +1.2736E-36f;
comp = tmp_1 / acosf((-1.3056E36f + (var_9 / (+1.8507E6f / (-0.0f + (var_10 / var_11))))));
comp += (-0.0f + (var_12 * (var_13 - powf((var_14 - powf((+0.0f + -1.9912E-37f / (var_15 + (var_16 + -1.1420E-41f))), +0.0f)), (var_17 / -0.0f / logf(var_18 - (+1.4255E-41f - (var_19 / (+0.0f * -1.1114E-44f / +1.8196E-35f)))))))));
for (int i=0; i < var_4; ++i) {
var_20[i] = (var_21 * (var_22 - var_23));
comp += var_20[i] * +0.0f - (-1.2856E34f - (-1.6144E-43f / ldexpf(logf(floorf((var_24 + (var_25 * (-1.0566E22f * (-0.0f - var_26)))))), 2)));
comp = (var_27 * var_28);
}
for (int i=0; i < var_5; ++i) {
comp = -1.2844E-43f * (+1.0536E34f - (var_29 * (+0.0f - var_30)));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
int tmp_5 = atoi(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float* tmp_21 = initPointer( atof(argv[21]) );
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31);
cudaDeviceSynchronize();
return 0;
}
|
/* This is a automatically generated test. Do not modify */
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,int var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float* var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30) {
if (comp >= +1.9749E-43f + expf(-0.0f)) {
for (int i=0; i < var_1; ++i) {
if (comp == (var_3 - -0.0f)) {
for (int i=0; i < var_2; ++i) {
comp += (-0.0f * var_6 + var_7 + var_8 * -0.0f);
float tmp_1 = +1.2736E-36f;
comp = tmp_1 / acosf((-1.3056E36f + (var_9 / (+1.8507E6f / (-0.0f + (var_10 / var_11))))));
comp += (-0.0f + (var_12 * (var_13 - powf((var_14 - powf((+0.0f + -1.9912E-37f / (var_15 + (var_16 + -1.1420E-41f))), +0.0f)), (var_17 / -0.0f / logf(var_18 - (+1.4255E-41f - (var_19 / (+0.0f * -1.1114E-44f / +1.8196E-35f)))))))));
for (int i=0; i < var_4; ++i) {
var_20[i] = (var_21 * (var_22 - var_23));
comp += var_20[i] * +0.0f - (-1.2856E34f - (-1.6144E-43f / ldexpf(logf(floorf((var_24 + (var_25 * (-1.0566E22f * (-0.0f - var_26)))))), 2)));
comp = (var_27 * var_28);
}
for (int i=0; i < var_5; ++i) {
comp = -1.2844E-43f * (+1.0536E34f - (var_29 * (+0.0f - var_30)));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
int tmp_5 = atoi(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float* tmp_21 = initPointer( atof(argv[21]) );
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31);
hipDeviceSynchronize();
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 1024
#endif
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
}
#endif
#define S_LOWER_LIMIT 10.0f
#define S_UPPER_LIMIT 100.0f
#define K_LOWER_LIMIT 10.0f
#define K_UPPER_LIMIT 100.0f
#define T_LOWER_LIMIT 1.0f
#define T_UPPER_LIMIT 10.0f
#define R_LOWER_LIMIT 0.01f
#define R_UPPER_LIMIT 0.05f
#define SIGMA_LOWER_LIMIT 0.01f
#define SIGMA_UPPER_LIMIT 0.10f
__global__ void bs(float *drand, float *dput, float *dcall, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n) {
float c1 = 0.319381530f;
float c2 = -0.356563782f;
float c3 = 1.781477937f;
float c4 = -1.821255978f;
float c5 = 1.330274429f;
float zero = 0.0f;
float one = 1.0f;
float two = 2.0f;
float temp4 = 0.2316419f;
float oneBySqrt2pi = 0.398942280f;
float d1, d2;
float phiD1, phiD2;
float sigmaSqrtT;
float KexpMinusRT;
float inRand;
inRand = drand[id];
float S = S_LOWER_LIMIT * inRand + S_UPPER_LIMIT * (1.0f - inRand);
float K = K_LOWER_LIMIT * inRand + K_UPPER_LIMIT * (1.0f - inRand);
float T = T_LOWER_LIMIT * inRand + T_UPPER_LIMIT * (1.0f - inRand);
float R = R_LOWER_LIMIT * inRand + R_UPPER_LIMIT * (1.0f - inRand);
float sigmaVal = SIGMA_LOWER_LIMIT * inRand + SIGMA_UPPER_LIMIT * (1.0f - inRand);
sigmaSqrtT = sigmaVal * (float)sqrt(T);
d1 = ((float)log(S / K) + (R + sigmaVal * sigmaVal / two) * T) / sigmaSqrtT;
d2 = d1 - sigmaSqrtT;
KexpMinusRT = K * (float)exp(-R * T);
// phiD1 = phi(d1)
float X = d1;
float absX = (float)abs(X);
float t = one / (one + temp4 * absX);
float y = one - oneBySqrt2pi * (float)exp(-X * X / two) * t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))));
phiD1 = (X < zero) ? (one - y) : y;
// phiD2 = phi(d2)
X = d2;
absX = abs(X);
t = one / (one + temp4 * absX);
y = one - oneBySqrt2pi * (float)exp(-X * X / two) * t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))));
phiD2 = (X < zero) ? (one - y) : y;
dcall[id] = S * phiD1 - KexpMinusRT * phiD2;
// phiD1 = phi(-d1);
X = -d1;
absX = abs(X);
t = one / (one + temp4 * absX);
y = one - oneBySqrt2pi * (float)exp(-X * X / two) * t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))));
phiD1 = (X < zero) ? (one - y) : y;
// phiD2 = phi(-d2);
X = -d2;
absX = abs(X);
t = one / (one + temp4 * absX);
y = one - oneBySqrt2pi * (float)exp(-X * X / two) * t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))));
phiD2 = (X < zero) ? (one - y) : y;
dput[id] = KexpMinusRT * phiD2 - S * phiD1;
}
}
extern "C" {
void bsCUDA(float* rand, float *put, float *call, int start, int end, int GPUN) {
float *drand, *dput, *dcall;
if (GPUN > 0) {
assert(end - start + 1 == GPUN);
#ifdef VERBOSE
printf("In vcCUDA\n");
printf("\t GPUN: %d\n", GPUN);
printf("\t range: %d..%d\n", start, end);
#endif
CudaSafeCall(cudaMalloc(&drand, sizeof(float) * GPUN));
CudaSafeCall(cudaMalloc(&dput, sizeof(float) * GPUN));
CudaSafeCall(cudaMalloc(&dcall, sizeof(float) * GPUN));
CudaSafeCall(cudaMemcpy(drand, rand + start, sizeof(float) * GPUN, cudaMemcpyHostToDevice));
bs<<<ceil(((float)GPUN)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(drand, dput, dcall, GPUN);
CudaCheckError();
CudaSafeCall(cudaDeviceSynchronize());
CudaSafeCall(cudaMemcpy(put + start, dput, sizeof(float) * GPUN, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy(call + start, dcall, sizeof(float) * GPUN, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaFree(drand));
CudaSafeCall(cudaFree(dput));
CudaSafeCall(cudaFree(dcall));
}
}
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 1024
#endif
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
}
#endif
#define S_LOWER_LIMIT 10.0f
#define S_UPPER_LIMIT 100.0f
#define K_LOWER_LIMIT 10.0f
#define K_UPPER_LIMIT 100.0f
#define T_LOWER_LIMIT 1.0f
#define T_UPPER_LIMIT 10.0f
#define R_LOWER_LIMIT 0.01f
#define R_UPPER_LIMIT 0.05f
#define SIGMA_LOWER_LIMIT 0.01f
#define SIGMA_UPPER_LIMIT 0.10f
__global__ void bs(float *drand, float *dput, float *dcall, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n) {
float c1 = 0.319381530f;
float c2 = -0.356563782f;
float c3 = 1.781477937f;
float c4 = -1.821255978f;
float c5 = 1.330274429f;
float zero = 0.0f;
float one = 1.0f;
float two = 2.0f;
float temp4 = 0.2316419f;
float oneBySqrt2pi = 0.398942280f;
float d1, d2;
float phiD1, phiD2;
float sigmaSqrtT;
float KexpMinusRT;
float inRand;
inRand = drand[id];
float S = S_LOWER_LIMIT * inRand + S_UPPER_LIMIT * (1.0f - inRand);
float K = K_LOWER_LIMIT * inRand + K_UPPER_LIMIT * (1.0f - inRand);
float T = T_LOWER_LIMIT * inRand + T_UPPER_LIMIT * (1.0f - inRand);
float R = R_LOWER_LIMIT * inRand + R_UPPER_LIMIT * (1.0f - inRand);
float sigmaVal = SIGMA_LOWER_LIMIT * inRand + SIGMA_UPPER_LIMIT * (1.0f - inRand);
sigmaSqrtT = sigmaVal * (float)sqrt(T);
d1 = ((float)log(S / K) + (R + sigmaVal * sigmaVal / two) * T) / sigmaSqrtT;
d2 = d1 - sigmaSqrtT;
KexpMinusRT = K * (float)exp(-R * T);
// phiD1 = phi(d1)
float X = d1;
float absX = (float)abs(X);
float t = one / (one + temp4 * absX);
float y = one - oneBySqrt2pi * (float)exp(-X * X / two) * t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))));
phiD1 = (X < zero) ? (one - y) : y;
// phiD2 = phi(d2)
X = d2;
absX = abs(X);
t = one / (one + temp4 * absX);
y = one - oneBySqrt2pi * (float)exp(-X * X / two) * t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))));
phiD2 = (X < zero) ? (one - y) : y;
dcall[id] = S * phiD1 - KexpMinusRT * phiD2;
// phiD1 = phi(-d1);
X = -d1;
absX = abs(X);
t = one / (one + temp4 * absX);
y = one - oneBySqrt2pi * (float)exp(-X * X / two) * t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))));
phiD1 = (X < zero) ? (one - y) : y;
// phiD2 = phi(-d2);
X = -d2;
absX = abs(X);
t = one / (one + temp4 * absX);
y = one - oneBySqrt2pi * (float)exp(-X * X / two) * t * (c1 + t * (c2 + t * (c3 + t * (c4 + t * c5))));
phiD2 = (X < zero) ? (one - y) : y;
dput[id] = KexpMinusRT * phiD2 - S * phiD1;
}
}
extern "C" {
void bsCUDA(float* rand, float *put, float *call, int start, int end, int GPUN) {
float *drand, *dput, *dcall;
if (GPUN > 0) {
assert(end - start + 1 == GPUN);
#ifdef VERBOSE
printf("In vcCUDA\n");
printf("\t GPUN: %d\n", GPUN);
printf("\t range: %d..%d\n", start, end);
#endif
CudaSafeCall(hipMalloc(&drand, sizeof(float) * GPUN));
CudaSafeCall(hipMalloc(&dput, sizeof(float) * GPUN));
CudaSafeCall(hipMalloc(&dcall, sizeof(float) * GPUN));
CudaSafeCall(hipMemcpy(drand, rand + start, sizeof(float) * GPUN, hipMemcpyHostToDevice));
bs<<<ceil(((float)GPUN)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(drand, dput, dcall, GPUN);
CudaCheckError();
CudaSafeCall(hipDeviceSynchronize());
CudaSafeCall(hipMemcpy(put + start, dput, sizeof(float) * GPUN, hipMemcpyDeviceToHost));
CudaSafeCall(hipMemcpy(call + start, dcall, sizeof(float) * GPUN, hipMemcpyDeviceToHost));
CudaSafeCall(hipFree(drand));
CudaSafeCall(hipFree(dput));
CudaSafeCall(hipFree(dcall));
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void copy_kernel_frombuf(char *dest, char *src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int size_x, int size_y, int size_z, int buf_strides_x, int buf_strides_y, int buf_strides_z, int type_size, int dim, int OPS_soa) {
int idx_z = rz_s + z_step * (blockDim.z * blockIdx.z + threadIdx.z);
int idx_y = ry_s + y_step * (blockDim.y * blockIdx.y + threadIdx.y);
int idx_x = rx_s + x_step * (blockDim.x * blockIdx.x + threadIdx.x);
if ((x_step == 1 ? idx_x < rx_e : idx_x > rx_e) &&
(y_step == 1 ? idx_y < ry_e : idx_y > ry_e) &&
(z_step == 1 ? idx_z < rz_e : idx_z > rz_e)) {
if (OPS_soa) dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size;
else dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size * dim;
src += ((idx_z - rz_s) * z_step * buf_strides_z +
(idx_y - ry_s) * y_step * buf_strides_y +
(idx_x - rx_s) * x_step * buf_strides_x) *
type_size * dim;
for (int d = 0; d < dim; d++) {
memcpy(dest, src + d*type_size, type_size);
if (OPS_soa) dest += size_x * size_y * size_z * type_size;
else dest += type_size;
}
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void copy_kernel_frombuf(char *dest, char *src, int rx_s, int rx_e, int ry_s, int ry_e, int rz_s, int rz_e, int x_step, int y_step, int z_step, int size_x, int size_y, int size_z, int buf_strides_x, int buf_strides_y, int buf_strides_z, int type_size, int dim, int OPS_soa) {
int idx_z = rz_s + z_step * (blockDim.z * blockIdx.z + threadIdx.z);
int idx_y = ry_s + y_step * (blockDim.y * blockIdx.y + threadIdx.y);
int idx_x = rx_s + x_step * (blockDim.x * blockIdx.x + threadIdx.x);
if ((x_step == 1 ? idx_x < rx_e : idx_x > rx_e) &&
(y_step == 1 ? idx_y < ry_e : idx_y > ry_e) &&
(z_step == 1 ? idx_z < rz_e : idx_z > rz_e)) {
if (OPS_soa) dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size;
else dest += (idx_z * size_x * size_y + idx_y * size_x + idx_x) * type_size * dim;
src += ((idx_z - rz_s) * z_step * buf_strides_z +
(idx_y - ry_s) * y_step * buf_strides_y +
(idx_x - rx_s) * x_step * buf_strides_x) *
type_size * dim;
for (int d = 0; d < dim; d++) {
memcpy(dest, src + d*type_size, type_size);
if (OPS_soa) dest += size_x * size_y * size_z * type_size;
else dest += type_size;
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<stdio.h>
__global__ void kernel (float *out)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
sdata[tid] = 0.0;
__syncthreads();
//extern __shared__ float device_memory[];
// Compute the index variable
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//device_memory[threadIdx.x] += blockDim.x;
sdata[tid] += blockDim.x;
//device_memory[threadIdx.x] += threadIdx.x;
// Do I need this to get the output?
// Maybe not
//__syncthreads();
//out[threadIdx.x] = blockIdx.x;
out[tid] = sdata[tid];
}
int main()
{
int nbins = 16;
int dimx = 16;
int num_bytes = dimx*sizeof(float);
float *d_a=0, *h_a=0; // device and host pointers
// Allocate memory on host (CPU)
h_a = (float*)malloc(num_bytes);
// Allocate memory on device (GPU)
cudaMalloc((void**)&d_a,num_bytes);
// Check to see that there was enough memory for both
// allocations.
// If the memory allocation fails, it doesn't change the
// pointer value. That is why we set them to be 0 at declaration,
// and then see if they have changed or stayed the same.
if (0==h_a || 0==d_a)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
cudaMemset(d_a,0,num_bytes);
//-----------------------------------------------------------------------//
// Some explanatory code
/*
// This will give us 256 thread blocks, arranged in a 16x16 grid.
dim3 grid(16,16);
// This will give us 256 threads/block, arranged in a 16x16 grid.
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(XXX);
// This is a shortcut for launching some thread blocks.
// It launches a grid of 32 thread blocks arranged in a 1x32 grid
// and 512 threads per block, arranged in a 1x512 array.
kernel<<<32,512>>>(YYY);
*/
//dim3 grid,block;
//block.x = 8;
//grid.x = dimx/block.x;
//kernel<<<grid,block>>>(d_a);
//kernel<<<4,16>>>(d_a);
dim3 grid(16,16);
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(d_a);
cudaThreadSynchronize();
// Copy it back over
cudaMemcpy(h_a,d_a,num_bytes,cudaMemcpyDeviceToHost);
for (int i=0;i<dimx;i++)
{
printf("%f ",h_a[i]);
}
printf("\n");
free(h_a);
cudaFree(d_a);
return 0;
}
|
#include <hip/hip_runtime.h>
#include<stdio.h>
__global__ void kernel (float *out)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
sdata[tid] = 0.0;
__syncthreads();
//extern __shared__ float device_memory[];
// Compute the index variable
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//device_memory[threadIdx.x] += blockDim.x;
sdata[tid] += blockDim.x;
//device_memory[threadIdx.x] += threadIdx.x;
// Do I need this to get the output?
// Maybe not
//__syncthreads();
//out[threadIdx.x] = blockIdx.x;
out[tid] = sdata[tid];
}
int main()
{
int nbins = 16;
int dimx = 16;
int num_bytes = dimx*sizeof(float);
float *d_a=0, *h_a=0; // device and host pointers
// Allocate memory on host (CPU)
h_a = (float*)malloc(num_bytes);
// Allocate memory on device (GPU)
hipMalloc((void**)&d_a,num_bytes);
// Check to see that there was enough memory for both
// allocations.
// If the memory allocation fails, it doesn't change the
// pointer value. That is why we set them to be 0 at declaration,
// and then see if they have changed or stayed the same.
if (0==h_a || 0==d_a)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
hipMemset(d_a,0,num_bytes);
//-----------------------------------------------------------------------//
// Some explanatory code
/*
// This will give us 256 thread blocks, arranged in a 16x16 grid.
dim3 grid(16,16);
// This will give us 256 threads/block, arranged in a 16x16 grid.
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(XXX);
// This is a shortcut for launching some thread blocks.
// It launches a grid of 32 thread blocks arranged in a 1x32 grid
// and 512 threads per block, arranged in a 1x512 array.
kernel<<<32,512>>>(YYY);
*/
//dim3 grid,block;
//block.x = 8;
//grid.x = dimx/block.x;
//kernel<<<grid,block>>>(d_a);
//kernel<<<4,16>>>(d_a);
dim3 grid(16,16);
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(d_a);
hipDeviceSynchronize();
// Copy it back over
hipMemcpy(h_a,d_a,num_bytes,hipMemcpyDeviceToHost);
for (int i=0;i<dimx;i++)
{
printf("%f ",h_a[i]);
}
printf("\n");
free(h_a);
hipFree(d_a);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <zlib.h>
int hNumberOfReads = 0; // holds number of reads that are processed
int hReadsWritten = 0; // number of reads written to output file
char input_filename[400]="/home/linux/cuda-workspace/PreprocessReads/test.txt"; // input file name
char output_filename[400]="/mnt/sdc/Sequences/out.txt"; // output file name
char GZIP_PIPE[410] = "gzip -> "; // option -gz will fallback to uncompressed if error on pipe opening
char PIGZ_PIPE[410] = "pigz -> "; // option -pigz for multithreaded compression will fallback to gz and then to uncompressed
gzFile inputFile;
gzFile templateFile; // holds a second pointer in input read file for templating output with read name sequence and quality
FILE *outputFile;
bool overwrite_outputfile = false; // if set true the output file will be overwritten (cmd line option -f)
bool write_gz_output = false; // specify .gz file compressed output (cmd line option -gz)
bool write_pigz_output = false; // specify pigz multithreaded compression for output (-pigz option)
bool AdaptorTrimming = true; // algorithm selectors
bool QualityTrimming = true;
bool gRNATrimming = false;
int gRNA_start; // holds gRNA position relative to adaptor
int gRNA_length;
bool verbose = false; // if true print details of processing to screen
bool FastQ_WARNING = false; // set to true is a FastQ file warning has been issued
short MAX_READ_LENGTH = 203; // maximal length of reads in input file
short MIN_OUT_READ_LEN = 26; // minimum length of reads after trimming to write to output file
short MIN_BASE_QUAL = 20; // parameter for minimum quality for good base
short MIN_QUAL_AFTER_BAD = 25; // parameter for average base quality after bad base
short PHRED = 33; // subtract from qualities for numerical value of solexa
bool auto_phred_detect = false; // if true try to detect PHRED format in FastQ input file
int MAX_READS_ON_GPU = 10000000; // maximum number of reads that fit simultaneously on GPU
#define MAX_ADAPTOR_LENGTH 100 // maximum length of adaptor sequence for trimming, can contain N for any base
#define MAX_FASTQ_BUF_LINE 500 // structure of buffer for read from FastQ file
char Adaptor[MAX_ADAPTOR_LENGTH] = "ATCTCGTATGCCGTCTTCTGCTT"; // adaptor on CPU side from parsing command line
short mismatches = 2; // holds maximum number of mismatches on cpu side
int numGPUs = 0; // holds number of CUDA devices in the system [1 TITAN to max 2 TITAN Z]
#define MAX_GPU_NUM 4
int device[MAX_GPU_NUM]; // device numbers to use
cudaStream_t upload_stream[MAX_GPU_NUM]; // cuda streams per device for overlapped asynchronous operation
cudaStream_t compute_stream[MAX_GPU_NUM];
cudaStream_t download_stream[MAX_GPU_NUM];
char *dSequenceArray[MAX_GPU_NUM]; // device pointer to data array, will hold hReadSequence and hQualstr
char *dQualArray[MAX_GPU_NUM]; // device pointer to data array, will hold hReadSequence and hQualstr
short *dIntArray[MAX_GPU_NUM][2]; // two device pointers to integer array, will hold hReadLen
cudaEvent_t QUAL_COPY_DONE[MAX_GPU_NUM]; // events for device host synchronization
cudaEvent_t SEQ_COPY_DONE[MAX_GPU_NUM];
cudaEvent_t TrimmQ_DONE[MAX_GPU_NUM];
cudaEvent_t TrimmA_DONE[MAX_GPU_NUM];
cudaEvent_t DTH_COPY_DONE[MAX_GPU_NUM];
short *hReadLen[MAX_GPU_NUM][2]; // two buffer to hold read length on host
char *hReadSequence[MAX_GPU_NUM][2]; // buffer holding sequences of reads on host
char *hReadQualstr[MAX_GPU_NUM]; // two buffer holding qualities of reads on host
struct read
{
char name[MAX_FASTQ_BUF_LINE];
char sequence[MAX_FASTQ_BUF_LINE];
char descr[MAX_FASTQ_BUF_LINE];
char qualstr[MAX_FASTQ_BUF_LINE];
} Read; // buffer one read from FastQ file i 4 lines
// macro checks return value of the CUDA runtime call and exits the application if the call failed.
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
// For trimming adaptor sequences store adaptor and length as well as quality cutoffs in device constant memory
__constant__ __device__ char ADAPTOR[MAX_ADAPTOR_LENGTH]; // adaptor sequence can contain N for any match
__constant__ __device__ short ADAPTOR_LEN; // length of adaptor sequence in bases
__constant__ __device__ short MAX_MM=2; // maximum mismatches allowed not including Ns
__constant__ __device__ short QUAL_PADDING=33; // hold PHRED base for qualities
__constant__ __device__ short MIN_QUAL=20; // hold minimal quality that is considered good base
__constant__ __device__ short MIN_AVERAGE_QUAL_AFTER_BAD=25; // minimum quality average of remaining read after bad base
void SetAdaptorSequence(const char *AdaptorSequence, const short mismatches, const short PHRED, const short MIN_BASE_QUAL, const short MIN_QUAL_AFTER_BAD)
{ // define the adaptor, mismatches and quality parameters before trimming
short AdLen = strnlen(AdaptorSequence, MAX_ADAPTOR_LENGTH); // get length of adaptor sequence
for(int dev=0; dev < numGPUs; dev++)
{
cudaSetDevice(device[dev]);
if(verbose) printf("[%d] Setting Adaptor sequence %s, length %d on GPU \n", dev, AdaptorSequence, AdLen);
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(ADAPTOR, AdaptorSequence, AdLen)); // copy to device
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(ADAPTOR_LEN, &AdLen, sizeof(short))); // constant memory
if(verbose) printf("[%d] Setting mismatches %d, PHRED+%d on GPU \n", dev, mismatches, PHRED);
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(MAX_MM, &mismatches, sizeof(short)));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(QUAL_PADDING, &PHRED, sizeof(short)));
if(verbose) printf("[%d] Setting minimum base quality %d, average read quality after bad %d on GPU \n", dev, MIN_BASE_QUAL, MIN_QUAL_AFTER_BAD);
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(MIN_QUAL, &MIN_BASE_QUAL, sizeof(short)));
CUDA_CHECK_RETURN(cudaMemcpyToSymbol(MIN_AVERAGE_QUAL_AFTER_BAD, &MIN_QUAL_AFTER_BAD, sizeof(short)));
}
}
__global__ void Find_gRNA(char *dDataArray, short *read_length, int reads, short MAX_READ_LENGTH)
{
for(long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < reads; idx += blockDim.x * gridDim.x)
{
if (idx < reads)
{
char *sequence = &dDataArray[idx*MAX_READ_LENGTH];
short rl=read_length[idx];
short n;
short p=0;
short mm=0;
read_length[idx]=0;
while (p <= rl-ADAPTOR_LEN)
{
mm=0;
for (n=0; n<ADAPTOR_LEN; n++)
{
if (ADAPTOR[n]!='N' && sequence[p+n]!=ADAPTOR[n])
{
mm++;
if (mm > MAX_MM) break;
}
}
if (mm <= MAX_MM)
{
read_length[idx]=p;
break;
}
p++;
}
if (mm <= MAX_MM ) continue;
p=rl-ADAPTOR_LEN+1;
if (rl >= ADAPTOR_LEN-1)
{
mm=0;
for (n=0; n<ADAPTOR_LEN-1; n++)
{
if (ADAPTOR[n]!='N' && sequence[p+n]!=ADAPTOR[n])
{
mm++;
if (mm >= MAX_MM) break;
}
}
if (mm < MAX_MM)
{
read_length[idx]=p;
continue;
}
}
p++;
if (rl >= ADAPTOR_LEN-2)
{
mm=0;
for (n=0; n<ADAPTOR_LEN-2; n++)
{
if (ADAPTOR[n]!='N' && sequence[p+n]!=ADAPTOR[n])
{
mm++;
break;
}
}
if (MAX_MM > (mm+1))
{
read_length[idx]=p;
}
}
}
}
}
__global__ void TrimmAdaptors(char *dDataArray, short *read_length, int reads, short MAX_READ_LENGTH)
{
for(long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < reads; idx += blockDim.x * gridDim.x)
{
if (idx < reads)
{
char *sequence = &dDataArray[idx*MAX_READ_LENGTH];
short rl=read_length[idx];
short n;
short p=0;
short mm=0;
while (p <= rl-ADAPTOR_LEN)
{
mm=0;
for (n=0; n<ADAPTOR_LEN; n++)
{
if (ADAPTOR[n]!='N' && sequence[p+n]!=ADAPTOR[n])
{
mm++;
if (mm > MAX_MM) break;
}
}
if (mm <= MAX_MM)
{
read_length[idx]=p;
break;
}
p++;
}
if (mm <= MAX_MM ) continue;
p=rl-ADAPTOR_LEN+1;
if (rl >= ADAPTOR_LEN-1)
{
mm=0;
for (n=0; n<ADAPTOR_LEN-1; n++)
{
if (ADAPTOR[n]!='N' && sequence[p+n]!=ADAPTOR[n])
{
mm++;
if (mm >= MAX_MM) break;
}
}
if (mm < MAX_MM)
{
read_length[idx]=p;
continue;
}
}
p++;
if (rl >= ADAPTOR_LEN-2)
{
mm=0;
for (n=0; n<ADAPTOR_LEN-2; n++)
{
if (ADAPTOR[n]!='N' && sequence[p+n]!=ADAPTOR[n])
{
mm++;
break;
}
}
if (MAX_MM > (mm+1))
{
read_length[idx]=p;
}
}
}
}
}
__global__ void TrimmQuals(char *quals, short *read_length, int reads, short MAX_READ_LENGTH)
{
for(long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < reads; idx += blockDim.x * gridDim.x)
{
if (idx < reads)
{
char *q = &quals[idx*MAX_READ_LENGTH];
short rl=read_length[idx];
short i;
for(i=0; i<rl; i++)
{
if ((q[i]-QUAL_PADDING)<=MIN_QUAL)
{
short s=0;
short r=i+1;
while (r<rl)
{
s=s+q[r]-QUAL_PADDING;
r++;
}
if(s<(MIN_AVERAGE_QUAL_AFTER_BAD*(rl-i+1)))
{
read_length[idx]=i;
break;
}
}
}
}
}
}
void InitGPUs(void)
{
if(verbose) printf("\n");
for (int dev=0; dev<numGPUs; dev++) // initialize devices and allocate cuda resources
{
if(verbose) printf("[%d] Initializing GPU\n", dev);
CUDA_CHECK_RETURN(cudaSetDevice(device[dev]));
CUDA_CHECK_RETURN(cudaMalloc((void **)&dSequenceArray[dev], MAX_READS_ON_GPU * MAX_READ_LENGTH * sizeof(char))); // one data array
CUDA_CHECK_RETURN(cudaMalloc((void **)&dQualArray[dev], MAX_READS_ON_GPU * MAX_READ_LENGTH * sizeof(char))); // one data array
CUDA_CHECK_RETURN(cudaMalloc((void **)&dIntArray[dev][0], MAX_READS_ON_GPU * sizeof(short))); // two dIntArrays
CUDA_CHECK_RETURN(cudaMalloc((void **)&dIntArray[dev][1], MAX_READS_ON_GPU * sizeof(short)));
CUDA_CHECK_RETURN(cudaStreamCreate(&upload_stream[dev])); // create streams on devices
CUDA_CHECK_RETURN(cudaStreamCreate(&compute_stream[dev]));
CUDA_CHECK_RETURN(cudaStreamCreate(&download_stream[dev]));
CUDA_CHECK_RETURN(cudaEventCreateWithFlags(&QUAL_COPY_DONE[dev], cudaEventDisableTiming || cudaEventBlockingSync)); // events for device host synchronization
CUDA_CHECK_RETURN(cudaEventCreateWithFlags(&SEQ_COPY_DONE[dev], cudaEventDisableTiming));
CUDA_CHECK_RETURN(cudaEventCreateWithFlags(&TrimmQ_DONE[dev], cudaEventDisableTiming));
CUDA_CHECK_RETURN(cudaEventCreateWithFlags(&TrimmA_DONE[dev], cudaEventDisableTiming));
CUDA_CHECK_RETURN(cudaEventCreateWithFlags(&DTH_COPY_DONE[dev], cudaEventDisableTiming || cudaEventBlockingSync));
if(verbose) printf("[%d] Allocating pinned CPU memory for reads\n", dev);
CUDA_CHECK_RETURN(cudaMallocHost((void **) &hReadLen[dev][0], MAX_READS_ON_GPU * sizeof(short))); // allocate two buffers for reads length
CUDA_CHECK_RETURN(cudaMallocHost((void **) &hReadLen[dev][1], MAX_READS_ON_GPU * sizeof(short)));
CUDA_CHECK_RETURN(cudaMallocHost((void **) &hReadSequence[dev][0], MAX_READS_ON_GPU * MAX_READ_LENGTH * sizeof(char))); // two buffers for reads sequence
CUDA_CHECK_RETURN(cudaMallocHost((void **) &hReadSequence[dev][1], MAX_READS_ON_GPU * MAX_READ_LENGTH * sizeof(char)));
CUDA_CHECK_RETURN(cudaMallocHost((void **) &hReadQualstr[dev], MAX_READS_ON_GPU * MAX_READ_LENGTH * sizeof(char))); // one buffer for qualities
}
}
void FreeGPUs(void)
{
for (int dev=0; dev<numGPUs; dev++) // release cuda resources from all devices
{
CUDA_CHECK_RETURN(cudaSetDevice(device[dev]));
CUDA_CHECK_RETURN(cudaEventDestroy(QUAL_COPY_DONE[dev]));
CUDA_CHECK_RETURN(cudaEventDestroy(SEQ_COPY_DONE[dev]));
CUDA_CHECK_RETURN(cudaEventDestroy(TrimmQ_DONE[dev]));
CUDA_CHECK_RETURN(cudaEventDestroy(TrimmA_DONE[dev]));
CUDA_CHECK_RETURN(cudaEventDestroy(DTH_COPY_DONE[dev]));
CUDA_CHECK_RETURN(cudaStreamDestroy(upload_stream[dev]));
CUDA_CHECK_RETURN(cudaStreamDestroy(compute_stream[dev]));
CUDA_CHECK_RETURN(cudaStreamDestroy(download_stream[dev]));
CUDA_CHECK_RETURN(cudaFree(dIntArray[dev][0]));
CUDA_CHECK_RETURN(cudaFree(dIntArray[dev][1]));
CUDA_CHECK_RETURN(cudaFree(dSequenceArray[dev]));
CUDA_CHECK_RETURN(cudaFree(dQualArray[dev]));
cudaFreeHost(hReadLen[dev][0]); // free all page locked host memory
cudaFreeHost(hReadLen[dev][1]);
cudaFreeHost(hReadSequence[dev][0]);
cudaFreeHost(hReadSequence[dev][1]);
cudaFreeHost(hReadQualstr[dev]);
}
}
int FindGPUs(void)
{
int deviceCount;
numGPUs = 0;
CUDA_CHECK_RETURN(cudaGetDeviceCount(&deviceCount)); // search cuda devices and identify compute 3.5 with 6 GB
if (deviceCount == 0) return(0);
for (int dev=0; dev<deviceCount; dev++)
{
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
int driverVersion, runtimeVersion;
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
if(((deviceProp.major == 3 && deviceProp.minor >= 5) || (deviceProp.major > 3)) && deviceProp.totalGlobalMem > 6144000000)
{
if(verbose)
{
printf("\nDevice %d: %s\n", dev, deviceProp.name);
printf(" CUDA Driver / Runtime Version: %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Compute Capability: %d.%d\n", deviceProp.major, deviceProp.minor);
printf(" PCI bus ID: %d:%d\n", deviceProp.pciBusID, deviceProp.pciDeviceID);
printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
printf(" Number of Multiprocessors: %d\n", deviceProp.multiProcessorCount);
}
device[numGPUs] = dev;
numGPUs++;
if (numGPUs >= MAX_GPU_NUM) break;
}
}
return(numGPUs);
}
void OpenFiles()
{
// handle compression using zlib gzFile functions for input and pipe gzip for output
if(!overwrite_outputfile) // test if output file exists if option -f is not specified
{
outputFile = fopen(output_filename, "r");
if(outputFile!=NULL)
{
printf("ERROR output file exists already. To overwrite run with option -f\n");
exit(255);
} // do not attempt close file as we would have already exited if opening was successful !
}
inputFile = gzopen(input_filename, "r");
if(inputFile == NULL)
{
printf("ERROR opening input file: %s\n", input_filename);
exit(255);
}
templateFile = gzopen(input_filename, "r"); // holds a second access to input read file for rereading positions for output
if(templateFile == NULL)
{
printf("ERROR opening second handle to input file.\n");
exit(255);
}
if(write_pigz_output)
{
outputFile = popen(strcat(PIGZ_PIPE, output_filename), "w");
if(outputFile == NULL)
{
printf("Option -pigz: could not open pipe ... falling back on libz.\n\tMake sure pigz is installed with: sudo apt-get install pigz\n");
write_gz_output = true;
write_pigz_output = false;
}
}
if(write_gz_output && !write_pigz_output)
{
outputFile = popen(strcat(GZIP_PIPE, output_filename), "w");
if(outputFile == NULL)
{
printf("Option -gz: could not open pipe ... falling back to uncompressed output.\n");
write_gz_output = false;
}
}
if(!write_gz_output && !write_pigz_output) outputFile = fopen(output_filename, "w");
if(outputFile == NULL)
{
printf("ERROR opening output file: %s\n", output_filename);
exit(255);
}
}
int ReadBlockOfReads(int max_num_to_read, short *hReadLen, char* hReadSequence, char *hReadQualstr)
{
int n = 0; // returns the number of reads read
short length;
while(gzgets(inputFile, Read.name, MAX_FASTQ_BUF_LINE) != NULL)
{
if(Read.name[0] == 13) break; // end if CR <13> or LF <10> are encountered
if(Read.name[0] == 10) break;
if(gzgets(inputFile, Read.sequence, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input!\n");
exit(255);
}
if(gzgets(inputFile, Read.descr, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input!\n");
exit(255);
}
if(gzgets(inputFile, Read.qualstr, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input!\n");
exit(255);
}
if(!FastQ_WARNING) // write warning if FastQ file contains non-standard lines
{
if(Read.name[0]!='@' || Read.descr[0]!='+')
{
printf("Warning: Encountered unusual line identifiers in FastQ file format.\n");
FastQ_WARNING = true;
}
}
length = strlen(Read.sequence)-1;
if(length > MAX_READ_LENGTH) length = MAX_READ_LENGTH;
// printf("%d, ", length);
memcpy(&hReadSequence[n*MAX_READ_LENGTH], Read.sequence, length);
memcpy(&hReadQualstr[n*MAX_READ_LENGTH], Read.qualstr, length);
hReadLen[n] = length;
n++;
if(n == max_num_to_read) break;
}
hNumberOfReads = hNumberOfReads + n; // update global variable to total number of reads
return(n);
}
int WriteBlockOfReads(int num_of_reads_to_write, const short *hReadLen, short min_out_read_length)
{
int n=0;
int w=0;
short length;
char str_buffer[MAX_FASTQ_BUF_LINE];
if(num_of_reads_to_write == 0) return(0);
if(verbose) printf("I/O > num_of_reads_to_write = %d\n", num_of_reads_to_write);
while(gzgets(templateFile, Read.name, MAX_FASTQ_BUF_LINE) != NULL)
{
if(gzgets(templateFile, Read.sequence, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input file for writing trimmed output!\n");
exit(255);
}
if(gzgets(templateFile, Read.descr, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input file for writing trimmed output!\n");
exit(255);
}
if(gzgets(templateFile, Read.qualstr, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input file for writing trimmed output!\n");
exit(255);
}
length = hReadLen[n];
if(gRNATrimming) // isolate the gRNA from adaptor match
{
if(length>0)
{
strncpy(str_buffer, &Read.sequence[length+gRNA_start], gRNA_length);
strncpy(&Read.sequence[0], str_buffer, gRNA_length);
Read.sequence[gRNA_length]=10;
Read.sequence[gRNA_length+1]=0;
strncpy(str_buffer, &Read.qualstr[length+gRNA_start], gRNA_length);
strncpy(&Read.qualstr[0], str_buffer, gRNA_length);
Read.qualstr[gRNA_length]=10;
Read.qualstr[gRNA_length+1]=0;
fprintf(outputFile,"%s", Read.name);
fprintf(outputFile,"%s", Read.sequence);
fprintf(outputFile,"%s", Read.descr);
fprintf(outputFile,"%s", Read.qualstr);
w++;
}
}
else // normal quality and adaptor trimming
{
if(length >= min_out_read_length)
{
Read.sequence[length]=10; // write only reads that are longer than minimum to output file
Read.sequence[length+1]=0;
Read.qualstr[length]=10;
Read.qualstr[length+1]=0;
fprintf(outputFile,"%s", Read.name);
fprintf(outputFile,"%s", Read.sequence);
fprintf(outputFile,"%s", Read.descr);
fprintf(outputFile,"%s", Read.qualstr);
w++;
}
}
n++;
if(n == num_of_reads_to_write) break;
}
hReadsWritten = hReadsWritten + w;
return(n);
}
void CloseFiles(void)
{
gzclose(inputFile);
gzclose(templateFile);
if(write_gz_output || write_pigz_output) pclose(outputFile); else fclose(outputFile);
}
bool Detect_FastQ_Format_PHRED(void)
{
int n = 0; // returns the number of reads read
short length;
uint min_qual = 65;
uint max_qual = 65;
bool deciphered = false;
inputFile = gzopen(input_filename, "r");
if(inputFile == NULL)
{
printf("ERROR opening input file %s\n", input_filename);
exit(255);
}
while(gzgets(inputFile, Read.name, MAX_FASTQ_BUF_LINE) != NULL)
{
if(Read.name[0] == 13) break; // end if CR <13> or LF <10> are encountered
if(Read.name[0] == 10) break;
if(gzgets(inputFile, Read.sequence, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input!\n");
exit(255);
}
if(gzgets(inputFile, Read.descr, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input!\n");
exit(255);
}
if(gzgets(inputFile, Read.qualstr, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input!\n");
exit(255);
}
if(!FastQ_WARNING) // write warning if FastQ file contains non-standard lines
{
if(Read.name[0]!='@' || Read.descr[0]!='+')
{
printf("Warning: Encountered unusual line identifiers in FastQ file format.");
FastQ_WARNING = true;
}
}
length = strlen(Read.sequence)-1;
for(n=0; n<length;n++)
{
if(min_qual > (uint)Read.qualstr[n])
{
min_qual = (uint) Read.qualstr[n];
}
else
{
if(max_qual < (uint)Read.qualstr[n]) max_qual = (uint) Read.qualstr[n];
}
}
if(min_qual < 59)
{
PHRED = 33; // FastQ Sanger and Illumina 1.8+ format
deciphered = true;
break;
}
if(max_qual > 80)
{
PHRED = 64; // FastQ Illumina 1.3+ format
deciphered = true;
break;
}
}
gzclose(inputFile);
return(deciphered);
}
int PreprocessReadsOnGPU(void)
{
bool DONE = false;
bool ALL_DONE = false;
int ReadsProcessed = 0;
int ReadsToProcess[MAX_GPU_NUM];
int ReadsSentToGPU[MAX_GPU_NUM];
int dev_side = 0; // left or right side of device memories [0] or [1]
int dev_next_side = 1;
int side_flip;
if(verbose) printf("\nPreprocessing reads on GPU\n");
for(int dev=0; dev<numGPUs; dev++) // copy data and run kernels initial round to fill CUDA queue and not waste time !
{
ReadsToProcess[dev] = 0;
ReadsSentToGPU[dev] = 0;
if(!DONE)
{
ReadsToProcess[dev] = ReadBlockOfReads(MAX_READS_ON_GPU, hReadLen[dev][dev_side], hReadSequence[dev][dev_side], hReadQualstr[dev]);
if(ReadsToProcess[dev] < MAX_READS_ON_GPU)
{
DONE = true;
}
if(ReadsToProcess[dev] == 0) continue;
CUDA_CHECK_RETURN(cudaSetDevice(device[dev]));
if(QualityTrimming) CUDA_CHECK_RETURN(cudaMemcpyAsync(dQualArray[dev], hReadQualstr[dev], ReadsToProcess[dev] * MAX_READ_LENGTH * sizeof(char), cudaMemcpyHostToDevice, upload_stream[dev]));
CUDA_CHECK_RETURN(cudaMemcpyAsync(dIntArray[dev][dev_side], hReadLen[dev][dev_side], ReadsToProcess[dev] * sizeof(short), cudaMemcpyHostToDevice, upload_stream[dev]));
CUDA_CHECK_RETURN(cudaEventRecord(QUAL_COPY_DONE[dev], upload_stream[dev]));
CUDA_CHECK_RETURN(cudaStreamWaitEvent(compute_stream[dev], QUAL_COPY_DONE[dev], 0));
if(QualityTrimming)
{
if(verbose) printf("[%d] TrimmQuals\n", device[dev]);
TrimmQuals<<<4096, 256, 0, compute_stream[dev]>>>(dQualArray[dev], dIntArray[dev][dev_side], ReadsToProcess[dev], MAX_READ_LENGTH);
CUDA_CHECK_RETURN(cudaGetLastError());
}
CUDA_CHECK_RETURN(cudaEventRecord(TrimmQ_DONE[dev], compute_stream[dev]));
if(AdaptorTrimming || gRNATrimming) CUDA_CHECK_RETURN(cudaMemcpyAsync(dSequenceArray[dev], hReadSequence[dev][dev_side], ReadsToProcess[dev] * MAX_READ_LENGTH * sizeof(char), cudaMemcpyHostToDevice, upload_stream[dev]));
CUDA_CHECK_RETURN(cudaEventRecord(SEQ_COPY_DONE[dev], upload_stream[dev]));
CUDA_CHECK_RETURN(cudaStreamWaitEvent(compute_stream[dev], SEQ_COPY_DONE[dev], 0));
if(gRNATrimming)
{
if(verbose) printf("[%d] Find_gRNA\n", device[dev]);
Find_gRNA<<<4096, 256, 0, compute_stream[dev]>>>(dSequenceArray[dev], dIntArray[dev][dev_side], ReadsToProcess[dev], MAX_READ_LENGTH);
CUDA_CHECK_RETURN(cudaGetLastError());
}
else
{
if(AdaptorTrimming)
{
if(verbose) printf("[%d] TrimmAdaptors\n", device[dev]);
TrimmAdaptors<<<4096, 256, 0, compute_stream[dev]>>>(dSequenceArray[dev], dIntArray[dev][dev_side], ReadsToProcess[dev], MAX_READ_LENGTH);
CUDA_CHECK_RETURN(cudaGetLastError());
}
}
CUDA_CHECK_RETURN(cudaEventRecord(TrimmA_DONE[dev], compute_stream[dev]));
CUDA_CHECK_RETURN(cudaStreamWaitEvent(download_stream[dev], TrimmA_DONE[dev], 0));
CUDA_CHECK_RETURN(cudaMemcpyAsync(hReadLen[dev][dev_side], dIntArray[dev][dev_side], ReadsToProcess[dev] * sizeof(short), cudaMemcpyDeviceToHost, download_stream[dev]));
CUDA_CHECK_RETURN(cudaEventRecord(DTH_COPY_DONE[dev], download_stream[dev]));
if(verbose) printf("[%d] Memcpy DtoH %d reads\n", device[dev], ReadsToProcess[dev]);
ReadsSentToGPU[dev] = ReadsToProcess[dev];
ReadsToProcess[dev] = 0;
}
}
while(!ALL_DONE)
{
ALL_DONE = true; // setup for test if should stop if there is no more data on the GPU
if(!DONE)
{
for(int dev=0; dev<numGPUs; dev++) // read a block of data, copy QUALS to GPU and run QUAL kernel
{
ReadsToProcess[dev] = 0;
CUDA_CHECK_RETURN(cudaSetDevice(device[dev]));
if(ReadsSentToGPU[dev]>0)
{
CUDA_CHECK_RETURN(cudaEventSynchronize(TrimmQ_DONE[dev]));
}
ReadsToProcess[dev] = ReadBlockOfReads(MAX_READS_ON_GPU, hReadLen[dev][dev_next_side], hReadSequence[dev][dev_next_side], hReadQualstr[dev]);
if(ReadsToProcess[dev] < MAX_READS_ON_GPU)
{
DONE = true;
}
if(ReadsToProcess[dev] == 0) break;
ALL_DONE = false; // there is more work on the GPUs that requires further processing
if(QualityTrimming) CUDA_CHECK_RETURN(cudaMemcpyAsync(dQualArray[dev], hReadQualstr[dev], ReadsToProcess[dev] * MAX_READ_LENGTH * sizeof(char), cudaMemcpyHostToDevice, upload_stream[dev]));
CUDA_CHECK_RETURN(cudaMemcpyAsync(dIntArray[dev][dev_next_side], hReadLen[dev][dev_next_side], ReadsToProcess[dev] * sizeof(short), cudaMemcpyHostToDevice, upload_stream[dev]));
CUDA_CHECK_RETURN(cudaEventRecord(QUAL_COPY_DONE[dev], upload_stream[dev]));
CUDA_CHECK_RETURN(cudaStreamWaitEvent(compute_stream[dev], QUAL_COPY_DONE[dev], 0));
if(QualityTrimming)
{
if(verbose) printf("[%d] TrimmQuals\n", device[dev]);
TrimmQuals<<<4096, 256, 0, compute_stream[dev]>>>(dQualArray[dev], dIntArray[dev][dev_next_side], ReadsToProcess[dev], MAX_READ_LENGTH);
CUDA_CHECK_RETURN(cudaGetLastError());
}
}
}
for(int dev=0; dev<numGPUs; dev++) // copy sequence and run Adaptor trim kernel
{
if(ReadsToProcess[dev]>0)
{
CUDA_CHECK_RETURN(cudaSetDevice(device[dev]));
if(ReadsSentToGPU[dev]>0)
{
CUDA_CHECK_RETURN(cudaEventSynchronize(TrimmA_DONE[dev]));
}
if(AdaptorTrimming || gRNATrimming) CUDA_CHECK_RETURN(cudaMemcpyAsync(dSequenceArray[dev], hReadSequence[dev][dev_next_side], ReadsToProcess[dev] * MAX_READ_LENGTH * sizeof(char), cudaMemcpyHostToDevice, upload_stream[dev]));
CUDA_CHECK_RETURN(cudaEventRecord(SEQ_COPY_DONE[dev], upload_stream[dev]));
CUDA_CHECK_RETURN(cudaStreamWaitEvent(compute_stream[dev], SEQ_COPY_DONE[dev], 0));
if(gRNATrimming)
{
if(verbose) printf("[%d] Find_gRNA\n", device[dev]);
Find_gRNA<<<4096, 256, 0, compute_stream[dev]>>>(dSequenceArray[dev], dIntArray[dev][dev_next_side], ReadsToProcess[dev], MAX_READ_LENGTH);
CUDA_CHECK_RETURN(cudaGetLastError());
}
else
{
if(AdaptorTrimming)
{
if(verbose) printf("[%d] TrimmAdaptors\n", device[dev]);
TrimmAdaptors<<<4096, 256, 0, compute_stream[dev]>>>(dSequenceArray[dev], dIntArray[dev][dev_next_side], ReadsToProcess[dev], MAX_READ_LENGTH);
CUDA_CHECK_RETURN(cudaGetLastError());
}
}
CUDA_CHECK_RETURN(cudaEventRecord(TrimmA_DONE[dev], compute_stream[dev]));
}
}
for(int dev=0; dev<numGPUs; dev++) // copy data from devices and write output file
{
if(verbose) printf("[%d] > ReadsSentToGPU = %d\n", dev, ReadsSentToGPU[dev]);
if(ReadsSentToGPU[dev]>0)
{
CUDA_CHECK_RETURN(cudaSetDevice(device[dev]));
CUDA_CHECK_RETURN(cudaEventSynchronize(DTH_COPY_DONE[dev]));
if(verbose) printf("[%d] Writing hReadLen[%d][%d]\n", device[dev], dev, dev_side);
WriteBlockOfReads(ReadsSentToGPU[dev], hReadLen[dev][dev_side], MIN_OUT_READ_LEN); // write output block
ReadsProcessed += ReadsSentToGPU[dev]; // count how many reads were considered for writing - actual output has minimal length requirement
}
if(ReadsToProcess[dev]>0)
{
CUDA_CHECK_RETURN(cudaStreamWaitEvent(download_stream[dev], TrimmA_DONE[dev], 0));
CUDA_CHECK_RETURN(cudaMemcpyAsync(hReadLen[dev][dev_next_side], dIntArray[dev][dev_next_side], ReadsToProcess[dev] * sizeof(short), cudaMemcpyDeviceToHost, download_stream[dev]));
CUDA_CHECK_RETURN(cudaEventRecord(DTH_COPY_DONE[dev], download_stream[dev]));
if(verbose) printf("[%d] Memcpy DtoH %d reads\n", device[dev], ReadsToProcess[dev]);
}
ReadsSentToGPU[dev] = ReadsToProcess[dev];
ReadsToProcess[dev] = 0;
}
side_flip = dev_side; // dev_side alternate between 0 and 1 to use memory efficiently
dev_side = dev_next_side;
dev_next_side = side_flip;
}
return(ReadsProcessed);
}
void usage_help(void)
{
printf("\nPreprocessReads multi-GPU version\n");
printf("\tsupports up to 4 NVIDIA GPUs with compute capability greater than 3.5 and at least 6 GB memory.\n");
printf("\tcompiled for minimum CUDA API 6.5 (build 20210107).\n");
printf("\nProgram Options:\n\n");
printf("\t-i, -I ....... specify the path for the input file [required]\n");
printf("\t-o, -O ....... specify the path for the output file [required]\n");
printf("\t-len, -Len ... specify the minimum length for reads written to the output file\n");
printf("\t-f ........... over write output file if it exists\n");
printf("\t-gz .......... write output to file in compressed format (.gz)\n");
printf("\t-pigz ........ write output to file in compressed format (.gz) using multithreaded compression\n");
printf("\t-v, -V ....... verbose, prints details on processing to screen\n");
printf("\t-a, -A ....... adaptor sequence, can contain Ns that will match any base in the read\n");
printf("\t-mm, -MM ..... number of mismatches allowed in adaptor search (not including Ns)\n");
printf("\t-Phred ....... sets the base of the qualities PHRED values [default 33], use AUTO for autodetection\n");
printf("\t-Qmin ........ sets the minimum acceptable quality score for a good base [default 20]\n");
printf("\t-Qualab ...... sets the minimum average base quality score for remainder of the read after a bad base [default 25]\n");
printf("\t-dev, -Dev ... specify CUDA device to use (can be used repeatedly for selecting multiple GPUs,\n");
printf("\t if not specified an automatic search for GPUs with compute capability 3.5 and 6 GB memory is performed)\n");
printf("\nAlgorithm selection:\n\n");
printf("\t/TA .......... select adaptor trimming only (can be also used before option /gRNA to avoid quality trimming)\n");
printf("\t/TQ .......... select quality trimming only\n");
printf("\t/TAQ ......... select quality and adaptor trimming [default]\n");
printf("\t/gRNA ........ select gRNA recovery, followed by start relative to start of adaptor sequence and length of gRNA to be retrieved\n");
printf("\t ( /gRNA 12 20 will retrieve 20 nucleotide read sequence starting 12 positions after the 5' end match of adaptor)\n");
printf("\nProgrammer options variables:\n");
printf("\t-G ........... set MAX_READS_ON_GPU variable\n");
printf("\t-R ........... set MAX_READ_LENGTH variable\n");
printf("\nexample usage:\n");
printf("\tPreprocessReads -dev 0 -dev 1 -i infile.fastq -o outfilename.fastq -len 26 -a ATTAGATCGATGCTA -mm 2\n\n");
exit(3);
}
void invalid_command_line_option(const char *errorstr)
{
printf("\nERROR %s\n", errorstr);
printf("\nUsage ----------------------------------------------\n");
usage_help();
}
void parseCommandLineArguments(int argc, char *argv[])
{
bool inputfile_set = false;
bool outputfile_set = false;
bool adaptor_set = false;
bool mismatch_set = false;
bool outlen_set = false;
for(int n=1; n<argc; n++)
{
if(argv[n][0] == '-')
{
char a = argv[n][1];
switch (a)
{
case 'I':
case 'i':
if(inputfile_set) invalid_command_line_option("option -i: input file name already specified.");
if(++n<argc)
{
if(strlen(argv[n])>400) invalid_command_line_option("option -i: input file path length exceeds limit.");
strcpy(input_filename, argv[n]);
inputfile_set=true;
break;
}
else invalid_command_line_option("option -i: INPUT file name missing.");
break;
case 'O':
case 'o':
if(outputfile_set) invalid_command_line_option("option -o: output file name already specified.");
if(++n<argc)
{
if(strlen(argv[n])>400) invalid_command_line_option("option -o: output file path length exceeds limit.");
strcpy(output_filename, argv[n]);
outputfile_set=true;
break;
}
else invalid_command_line_option("option -o: OUTPUT file name missing.");
break;
case 'l':
case 'L':
if(argv[n][2]!='e' || argv[n][3]!='n') invalid_command_line_option("invalid command line option encountered.");
if(outlen_set) invalid_command_line_option("option -len: minimum read length to include in output file is already specified.");
if(++n<argc)
{
int ol=atoi(argv[n]);
if(ol<0 || ol>MAX_READ_LENGTH) invalid_command_line_option("option -len: encountered invalid minimum read length to include in output file.");
MIN_OUT_READ_LEN = (short) ol;
outlen_set=true;
break;
}
else invalid_command_line_option("option -len: expected minimum output read length not found.");
break;
case 'A':
case 'a':
if(adaptor_set) invalid_command_line_option("option -a: adaptor sequence already specified.");
if(++n<argc)
{
if(strlen(argv[n])>MAX_ADAPTOR_LENGTH) invalid_command_line_option("option -a: adaptor sequence length exceeds limit.");
if(strlen(argv[n])<3) invalid_command_line_option("option -a: adaptor sequence length too short.");
strcpy(Adaptor, argv[n]);
adaptor_set=true;
break;
}
else invalid_command_line_option("option -a: expected adaptor sequence.");
break;
case 'M':
case 'm':
if(argv[n][2]!='m' && argv[n][2]!='M') invalid_command_line_option("invalid command line option encountered.");
if(mismatch_set) invalid_command_line_option("option -mm: number of mismatches is already specified.");
if(++n<argc)
{
int mm=atoi(argv[n]);
if(mm>20) invalid_command_line_option("option -mm: number of mismatches exceeds limit.");
mismatches = (short) mm;
mismatch_set=true;
break;
}
else invalid_command_line_option("option -mm: expected number of mismatches not found.");
break;
case 'd':
case 'D':
if(argv[n][2]!='e' || argv[n][3]!='v') invalid_command_line_option("invalid command line option encountered.");
if(numGPUs>=MAX_GPU_NUM) invalid_command_line_option("option -dev: number of CUDA devices exceeds maximum.");
if(++n<argc)
{
int d=atoi(argv[n]);
if(d<0 || d>20) invalid_command_line_option("option -dev: invalid device number.");
device[numGPUs] = d;
numGPUs++;
break;
}
else invalid_command_line_option("option -dev: expected CUDA device number not found.");
break;
case 'f':
overwrite_outputfile = true; // -f
break;
case 'g': // -gz
if((argv[n][2]=='z') && (strlen(argv[n])==3))
{
write_gz_output = true;
break;
}
else invalid_command_line_option("invalid command line option encountered.");
break;
case 'p':
case 'P':
if((strcmp(argv[n], "-Pigz")==0) || (strcmp(argv[n], "-pigz")==0)) // -pigz
{
write_pigz_output = true;
break;
}
else // -phred
{
if(argv[n][2]!='h' || argv[n][3]!='r' || argv[n][4]!='e' || argv[n][5]!='d') invalid_command_line_option("invalid command line option encountered.");
if(++n<argc)
{
if((strcmp(argv[n], "AUTO")==0) || (strcmp(argv[n], "auto")==0))
{
auto_phred_detect = true;
break;
}
else
{
int d=atoi(argv[n]);
if(d<0 || d>100) invalid_command_line_option("option -Phred: invalid phred base.");
PHRED = d;
break;
}
}
else invalid_command_line_option("option -phred: expected number or AUTO not found.");
}
break;
case 'Q':
case 'q':
if((strcmp(argv[n], "-Qmin")==0) || (strcmp(argv[n], "-qmin")==0))
{
if(++n<argc)
{
int qm=atoi(argv[n]);
if(qm<10 || qm>64) invalid_command_line_option("option -qmin: outside 10 to 64 limit.");
MIN_BASE_QUAL = (short) qm;
break;
}
else invalid_command_line_option("option -qmin: expected number not found.");
}
else
{
if((strcmp(argv[n], "-Qualab")==0) || (strcmp(argv[n], "-qualab")==0))
{
if(++n<argc)
{
int qm=atoi(argv[n]);
if(qm<10 || qm>64) invalid_command_line_option("option -qualab: outside 10 to 64 limit.");
MIN_QUAL_AFTER_BAD = (short) qm;
break;
}
else invalid_command_line_option("option -qmin: expected number not found.");
}
}
break;
case 'v':
case 'V':
verbose = true;
break;
case 'H':
case 'h':
case '?':
printf("\nHELP ----------------------------------------------\n");
usage_help();
break;
case 'G': // programmer option to set MAX_NUM_READS_ON_GPU parameter
if(++n<argc)
{
int mrg=atoi(argv[n]);
if(mrg<=0) invalid_command_line_option("invalid command line option encountered.");
MAX_READS_ON_GPU = mrg;
break;
}
else invalid_command_line_option("invalid command line option encountered.");
break;
case 'R': // programmer option to set MAX_READ_LENGTH parameter
if(++n<argc)
{
int mrl=atoi(argv[n]);
if(mrl<=0 || mrl>10000) invalid_command_line_option("invalid command line option encountered.");
MAX_READ_LENGTH = (short) mrl;
break;
}
else invalid_command_line_option("invalid command line option encountered.");
break;
default:
invalid_command_line_option("invalid command line option encountered.");
}
}
else
{
if(argv[n][0] == '/')
{
char a = argv[n][1];
switch (a)
{
case 'T':
AdaptorTrimming = false;
QualityTrimming = false;
gRNATrimming = false;
if(strlen(argv[n]) == 3)
{
if(argv[n][2]=='A')
{
AdaptorTrimming = true;
break;
}
else
{
if(argv[n][2]=='Q')
{
QualityTrimming = true;
break;
}
else invalid_command_line_option("option /T: algorithm selection is invalid.");
}
}
else
{
if(strlen(argv[n])==3 && argv[n][2]=='A' && argv[n][3]=='Q')
{
AdaptorTrimming = true;
QualityTrimming = true;
break;
}
else invalid_command_line_option("option /T: algorithm selection is invalid.");
}
break;
case 'g':
if(strlen(argv[n]) == 5 && argv[n][2]=='R' && argv[n][3]=='N' && argv[n][4]=='A')
{
gRNATrimming = true;
if(++n < argc)
{
gRNA_start = atoi(argv[n]);
}
else invalid_command_line_option("option /gRNA: expected start position relative to adaptor 5' not found.");
if(++n < argc)
{
gRNA_length = atoi(argv[n]);
}
else invalid_command_line_option("option /gRNA: expected gRNA length not found.");
break;
}
else invalid_command_line_option("option /g: algorithm selection is invalid.");
break;
}
}
else invalid_command_line_option("parsing command line options.");
}
}
if(!inputfile_set || !outputfile_set || ((AdaptorTrimming || gRNATrimming) && !adaptor_set)) invalid_command_line_option("missing required command line options.");
if(strcmp(input_filename, output_filename)==0)
{
printf("ERROR: encountered identical input and output file paths!\n"); // input file cannot be identical to output file
exit(253);
}
printf("Setting parameters:\n");
printf("\tInput file name : %s\n", input_filename);
printf("\tOutput file name : %s\n", output_filename);
if(AdaptorTrimming || gRNATrimming) printf("\tAdaptor sequence : %s\n", Adaptor);
if(verbose)
{
if(AdaptorTrimming || gRNATrimming) printf("\tMismatches = %d\n", mismatches);
printf("\tTrimming: ");
if(QualityTrimming) printf("Quality ");
if(AdaptorTrimming) printf("Adaptors ");
if(gRNATrimming) printf("Find gRNA");
printf("\n");
if(QualityTrimming)
{
printf("\tMinimum accepted base quality = %d; average quality of read after bad base = %d\n", MIN_BASE_QUAL, MIN_QUAL_AFTER_BAD);
}
printf("\tFastQ Quality Encoding: ");
if(!auto_phred_detect) printf("PHRED+%d\n", PHRED); else printf("<AUTO DETECTION>\n");
printf("\tMAX_READ_LENGTH : %d\n", MAX_READ_LENGTH);
printf("\tMinimum read length for output = %d\n", MIN_OUT_READ_LEN);
if(overwrite_outputfile) printf("\tForce overwrite output file if it exists.\n");
}
if(numGPUs>0)
{
printf("\nUsing CUDA devices: ");
for(int n=0; n<numGPUs; n++) printf("[%d]", device[n]);
printf("\n");
}
}
int main(int argc, char *argv[])
{
time_t start_t = time(NULL); // for timing the process
time_t end_t;
int reads_processed = 0;
parseCommandLineArguments(argc, argv); // get parameters from the command line
if(numGPUs == 0)
{
if(FindGPUs() == 0) // check for GPUs
{
printf("\nERROR: No suitable CUDA devices found ... exiting\n");
exit(1);
}
else
{
if(numGPUs == 1)
printf("\nOne CUDA device with at least compute capability 3.5 and 6 GB memory found.\n");
else
printf("\n%d CUDA devices with at least compute capability 3.5 and 6 GB memory found.\n", numGPUs);
}
}
if(auto_phred_detect)
{
if(Detect_FastQ_Format_PHRED())
{
if(verbose) printf("<AUTO DETECTION> FastQ Quality Encoding: PHRED+%d\n", PHRED);
}
else
{
printf("\nERROR: Unable to detect FastQ format version.\n");
exit(32);
}
}
OpenFiles(); // access files
InitGPUs();
SetAdaptorSequence(Adaptor, mismatches, PHRED, MIN_BASE_QUAL, MIN_QUAL_AFTER_BAD);
reads_processed = PreprocessReadsOnGPU();
printf("\nReads processed: %d\n", reads_processed);
printf("Reads in input file %s : %d\n",input_filename, hNumberOfReads);
printf("Reads written to output file %s : %d\n", output_filename, hReadsWritten);
if(FastQ_WARNING) printf("Encountered potential issues with the FastQ file format in %s\n", input_filename);
CloseFiles();
FreeGPUs();
end_t = time(NULL);
printf("\nTotal time elapsed: %.0lf seconds\n", difftime(end_t, start_t));
// printf("\n<Press RTN key to exit>\n");
// getchar();
return(0);
}
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <zlib.h>
int hNumberOfReads = 0; // holds number of reads that are processed
int hReadsWritten = 0; // number of reads written to output file
char input_filename[400]="/home/linux/cuda-workspace/PreprocessReads/test.txt"; // input file name
char output_filename[400]="/mnt/sdc/Sequences/out.txt"; // output file name
char GZIP_PIPE[410] = "gzip -> "; // option -gz will fallback to uncompressed if error on pipe opening
char PIGZ_PIPE[410] = "pigz -> "; // option -pigz for multithreaded compression will fallback to gz and then to uncompressed
gzFile inputFile;
gzFile templateFile; // holds a second pointer in input read file for templating output with read name sequence and quality
FILE *outputFile;
bool overwrite_outputfile = false; // if set true the output file will be overwritten (cmd line option -f)
bool write_gz_output = false; // specify .gz file compressed output (cmd line option -gz)
bool write_pigz_output = false; // specify pigz multithreaded compression for output (-pigz option)
bool AdaptorTrimming = true; // algorithm selectors
bool QualityTrimming = true;
bool gRNATrimming = false;
int gRNA_start; // holds gRNA position relative to adaptor
int gRNA_length;
bool verbose = false; // if true print details of processing to screen
bool FastQ_WARNING = false; // set to true is a FastQ file warning has been issued
short MAX_READ_LENGTH = 203; // maximal length of reads in input file
short MIN_OUT_READ_LEN = 26; // minimum length of reads after trimming to write to output file
short MIN_BASE_QUAL = 20; // parameter for minimum quality for good base
short MIN_QUAL_AFTER_BAD = 25; // parameter for average base quality after bad base
short PHRED = 33; // subtract from qualities for numerical value of solexa
bool auto_phred_detect = false; // if true try to detect PHRED format in FastQ input file
int MAX_READS_ON_GPU = 10000000; // maximum number of reads that fit simultaneously on GPU
#define MAX_ADAPTOR_LENGTH 100 // maximum length of adaptor sequence for trimming, can contain N for any base
#define MAX_FASTQ_BUF_LINE 500 // structure of buffer for read from FastQ file
char Adaptor[MAX_ADAPTOR_LENGTH] = "ATCTCGTATGCCGTCTTCTGCTT"; // adaptor on CPU side from parsing command line
short mismatches = 2; // holds maximum number of mismatches on cpu side
int numGPUs = 0; // holds number of CUDA devices in the system [1 TITAN to max 2 TITAN Z]
#define MAX_GPU_NUM 4
int device[MAX_GPU_NUM]; // device numbers to use
hipStream_t upload_stream[MAX_GPU_NUM]; // cuda streams per device for overlapped asynchronous operation
hipStream_t compute_stream[MAX_GPU_NUM];
hipStream_t download_stream[MAX_GPU_NUM];
char *dSequenceArray[MAX_GPU_NUM]; // device pointer to data array, will hold hReadSequence and hQualstr
char *dQualArray[MAX_GPU_NUM]; // device pointer to data array, will hold hReadSequence and hQualstr
short *dIntArray[MAX_GPU_NUM][2]; // two device pointers to integer array, will hold hReadLen
hipEvent_t QUAL_COPY_DONE[MAX_GPU_NUM]; // events for device host synchronization
hipEvent_t SEQ_COPY_DONE[MAX_GPU_NUM];
hipEvent_t TrimmQ_DONE[MAX_GPU_NUM];
hipEvent_t TrimmA_DONE[MAX_GPU_NUM];
hipEvent_t DTH_COPY_DONE[MAX_GPU_NUM];
short *hReadLen[MAX_GPU_NUM][2]; // two buffer to hold read length on host
char *hReadSequence[MAX_GPU_NUM][2]; // buffer holding sequences of reads on host
char *hReadQualstr[MAX_GPU_NUM]; // two buffer holding qualities of reads on host
struct read
{
char name[MAX_FASTQ_BUF_LINE];
char sequence[MAX_FASTQ_BUF_LINE];
char descr[MAX_FASTQ_BUF_LINE];
char qualstr[MAX_FASTQ_BUF_LINE];
} Read; // buffer one read from FastQ file i 4 lines
// macro checks return value of the CUDA runtime call and exits the application if the call failed.
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
// For trimming adaptor sequences store adaptor and length as well as quality cutoffs in device constant memory
__constant__ __device__ char ADAPTOR[MAX_ADAPTOR_LENGTH]; // adaptor sequence can contain N for any match
__constant__ __device__ short ADAPTOR_LEN; // length of adaptor sequence in bases
__constant__ __device__ short MAX_MM=2; // maximum mismatches allowed not including Ns
__constant__ __device__ short QUAL_PADDING=33; // hold PHRED base for qualities
__constant__ __device__ short MIN_QUAL=20; // hold minimal quality that is considered good base
__constant__ __device__ short MIN_AVERAGE_QUAL_AFTER_BAD=25; // minimum quality average of remaining read after bad base
void SetAdaptorSequence(const char *AdaptorSequence, const short mismatches, const short PHRED, const short MIN_BASE_QUAL, const short MIN_QUAL_AFTER_BAD)
{ // define the adaptor, mismatches and quality parameters before trimming
short AdLen = strnlen(AdaptorSequence, MAX_ADAPTOR_LENGTH); // get length of adaptor sequence
for(int dev=0; dev < numGPUs; dev++)
{
hipSetDevice(device[dev]);
if(verbose) printf("[%d] Setting Adaptor sequence %s, length %d on GPU \n", dev, AdaptorSequence, AdLen);
CUDA_CHECK_RETURN(hipMemcpyToSymbol(HIP_SYMBOL(ADAPTOR), AdaptorSequence, AdLen)); // copy to device
CUDA_CHECK_RETURN(hipMemcpyToSymbol(HIP_SYMBOL(ADAPTOR_LEN), &AdLen, sizeof(short))); // constant memory
if(verbose) printf("[%d] Setting mismatches %d, PHRED+%d on GPU \n", dev, mismatches, PHRED);
CUDA_CHECK_RETURN(hipMemcpyToSymbol(HIP_SYMBOL(MAX_MM), &mismatches, sizeof(short)));
CUDA_CHECK_RETURN(hipMemcpyToSymbol(HIP_SYMBOL(QUAL_PADDING), &PHRED, sizeof(short)));
if(verbose) printf("[%d] Setting minimum base quality %d, average read quality after bad %d on GPU \n", dev, MIN_BASE_QUAL, MIN_QUAL_AFTER_BAD);
CUDA_CHECK_RETURN(hipMemcpyToSymbol(HIP_SYMBOL(MIN_QUAL), &MIN_BASE_QUAL, sizeof(short)));
CUDA_CHECK_RETURN(hipMemcpyToSymbol(HIP_SYMBOL(MIN_AVERAGE_QUAL_AFTER_BAD), &MIN_QUAL_AFTER_BAD, sizeof(short)));
}
}
__global__ void Find_gRNA(char *dDataArray, short *read_length, int reads, short MAX_READ_LENGTH)
{
for(long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < reads; idx += blockDim.x * gridDim.x)
{
if (idx < reads)
{
char *sequence = &dDataArray[idx*MAX_READ_LENGTH];
short rl=read_length[idx];
short n;
short p=0;
short mm=0;
read_length[idx]=0;
while (p <= rl-ADAPTOR_LEN)
{
mm=0;
for (n=0; n<ADAPTOR_LEN; n++)
{
if (ADAPTOR[n]!='N' && sequence[p+n]!=ADAPTOR[n])
{
mm++;
if (mm > MAX_MM) break;
}
}
if (mm <= MAX_MM)
{
read_length[idx]=p;
break;
}
p++;
}
if (mm <= MAX_MM ) continue;
p=rl-ADAPTOR_LEN+1;
if (rl >= ADAPTOR_LEN-1)
{
mm=0;
for (n=0; n<ADAPTOR_LEN-1; n++)
{
if (ADAPTOR[n]!='N' && sequence[p+n]!=ADAPTOR[n])
{
mm++;
if (mm >= MAX_MM) break;
}
}
if (mm < MAX_MM)
{
read_length[idx]=p;
continue;
}
}
p++;
if (rl >= ADAPTOR_LEN-2)
{
mm=0;
for (n=0; n<ADAPTOR_LEN-2; n++)
{
if (ADAPTOR[n]!='N' && sequence[p+n]!=ADAPTOR[n])
{
mm++;
break;
}
}
if (MAX_MM > (mm+1))
{
read_length[idx]=p;
}
}
}
}
}
__global__ void TrimmAdaptors(char *dDataArray, short *read_length, int reads, short MAX_READ_LENGTH)
{
for(long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < reads; idx += blockDim.x * gridDim.x)
{
if (idx < reads)
{
char *sequence = &dDataArray[idx*MAX_READ_LENGTH];
short rl=read_length[idx];
short n;
short p=0;
short mm=0;
while (p <= rl-ADAPTOR_LEN)
{
mm=0;
for (n=0; n<ADAPTOR_LEN; n++)
{
if (ADAPTOR[n]!='N' && sequence[p+n]!=ADAPTOR[n])
{
mm++;
if (mm > MAX_MM) break;
}
}
if (mm <= MAX_MM)
{
read_length[idx]=p;
break;
}
p++;
}
if (mm <= MAX_MM ) continue;
p=rl-ADAPTOR_LEN+1;
if (rl >= ADAPTOR_LEN-1)
{
mm=0;
for (n=0; n<ADAPTOR_LEN-1; n++)
{
if (ADAPTOR[n]!='N' && sequence[p+n]!=ADAPTOR[n])
{
mm++;
if (mm >= MAX_MM) break;
}
}
if (mm < MAX_MM)
{
read_length[idx]=p;
continue;
}
}
p++;
if (rl >= ADAPTOR_LEN-2)
{
mm=0;
for (n=0; n<ADAPTOR_LEN-2; n++)
{
if (ADAPTOR[n]!='N' && sequence[p+n]!=ADAPTOR[n])
{
mm++;
break;
}
}
if (MAX_MM > (mm+1))
{
read_length[idx]=p;
}
}
}
}
}
__global__ void TrimmQuals(char *quals, short *read_length, int reads, short MAX_READ_LENGTH)
{
for(long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < reads; idx += blockDim.x * gridDim.x)
{
if (idx < reads)
{
char *q = &quals[idx*MAX_READ_LENGTH];
short rl=read_length[idx];
short i;
for(i=0; i<rl; i++)
{
if ((q[i]-QUAL_PADDING)<=MIN_QUAL)
{
short s=0;
short r=i+1;
while (r<rl)
{
s=s+q[r]-QUAL_PADDING;
r++;
}
if(s<(MIN_AVERAGE_QUAL_AFTER_BAD*(rl-i+1)))
{
read_length[idx]=i;
break;
}
}
}
}
}
}
void InitGPUs(void)
{
if(verbose) printf("\n");
for (int dev=0; dev<numGPUs; dev++) // initialize devices and allocate cuda resources
{
if(verbose) printf("[%d] Initializing GPU\n", dev);
CUDA_CHECK_RETURN(hipSetDevice(device[dev]));
CUDA_CHECK_RETURN(hipMalloc((void **)&dSequenceArray[dev], MAX_READS_ON_GPU * MAX_READ_LENGTH * sizeof(char))); // one data array
CUDA_CHECK_RETURN(hipMalloc((void **)&dQualArray[dev], MAX_READS_ON_GPU * MAX_READ_LENGTH * sizeof(char))); // one data array
CUDA_CHECK_RETURN(hipMalloc((void **)&dIntArray[dev][0], MAX_READS_ON_GPU * sizeof(short))); // two dIntArrays
CUDA_CHECK_RETURN(hipMalloc((void **)&dIntArray[dev][1], MAX_READS_ON_GPU * sizeof(short)));
CUDA_CHECK_RETURN(hipStreamCreate(&upload_stream[dev])); // create streams on devices
CUDA_CHECK_RETURN(hipStreamCreate(&compute_stream[dev]));
CUDA_CHECK_RETURN(hipStreamCreate(&download_stream[dev]));
CUDA_CHECK_RETURN(hipEventCreateWithFlags(&QUAL_COPY_DONE[dev], hipEventDisableTiming || hipEventBlockingSync)); // events for device host synchronization
CUDA_CHECK_RETURN(hipEventCreateWithFlags(&SEQ_COPY_DONE[dev], hipEventDisableTiming));
CUDA_CHECK_RETURN(hipEventCreateWithFlags(&TrimmQ_DONE[dev], hipEventDisableTiming));
CUDA_CHECK_RETURN(hipEventCreateWithFlags(&TrimmA_DONE[dev], hipEventDisableTiming));
CUDA_CHECK_RETURN(hipEventCreateWithFlags(&DTH_COPY_DONE[dev], hipEventDisableTiming || hipEventBlockingSync));
if(verbose) printf("[%d] Allocating pinned CPU memory for reads\n", dev);
CUDA_CHECK_RETURN(hipHostMalloc((void **) &hReadLen[dev][0], MAX_READS_ON_GPU * sizeof(short), hipHostMallocDefault)); // allocate two buffers for reads length
CUDA_CHECK_RETURN(hipHostMalloc((void **) &hReadLen[dev][1], MAX_READS_ON_GPU * sizeof(short), hipHostMallocDefault));
CUDA_CHECK_RETURN(hipHostMalloc((void **) &hReadSequence[dev][0], MAX_READS_ON_GPU * MAX_READ_LENGTH * sizeof(char), hipHostMallocDefault)); // two buffers for reads sequence
CUDA_CHECK_RETURN(hipHostMalloc((void **) &hReadSequence[dev][1], MAX_READS_ON_GPU * MAX_READ_LENGTH * sizeof(char), hipHostMallocDefault));
CUDA_CHECK_RETURN(hipHostMalloc((void **) &hReadQualstr[dev], MAX_READS_ON_GPU * MAX_READ_LENGTH * sizeof(char), hipHostMallocDefault)); // one buffer for qualities
}
}
void FreeGPUs(void)
{
for (int dev=0; dev<numGPUs; dev++) // release cuda resources from all devices
{
CUDA_CHECK_RETURN(hipSetDevice(device[dev]));
CUDA_CHECK_RETURN(hipEventDestroy(QUAL_COPY_DONE[dev]));
CUDA_CHECK_RETURN(hipEventDestroy(SEQ_COPY_DONE[dev]));
CUDA_CHECK_RETURN(hipEventDestroy(TrimmQ_DONE[dev]));
CUDA_CHECK_RETURN(hipEventDestroy(TrimmA_DONE[dev]));
CUDA_CHECK_RETURN(hipEventDestroy(DTH_COPY_DONE[dev]));
CUDA_CHECK_RETURN(hipStreamDestroy(upload_stream[dev]));
CUDA_CHECK_RETURN(hipStreamDestroy(compute_stream[dev]));
CUDA_CHECK_RETURN(hipStreamDestroy(download_stream[dev]));
CUDA_CHECK_RETURN(hipFree(dIntArray[dev][0]));
CUDA_CHECK_RETURN(hipFree(dIntArray[dev][1]));
CUDA_CHECK_RETURN(hipFree(dSequenceArray[dev]));
CUDA_CHECK_RETURN(hipFree(dQualArray[dev]));
hipHostFree(hReadLen[dev][0]); // free all page locked host memory
hipHostFree(hReadLen[dev][1]);
hipHostFree(hReadSequence[dev][0]);
hipHostFree(hReadSequence[dev][1]);
hipHostFree(hReadQualstr[dev]);
}
}
int FindGPUs(void)
{
int deviceCount;
numGPUs = 0;
CUDA_CHECK_RETURN(hipGetDeviceCount(&deviceCount)); // search cuda devices and identify compute 3.5 with 6 GB
if (deviceCount == 0) return(0);
for (int dev=0; dev<deviceCount; dev++)
{
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
int driverVersion, runtimeVersion;
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
if(((deviceProp.major == 3 && deviceProp.minor >= 5) || (deviceProp.major > 3)) && deviceProp.totalGlobalMem > 6144000000)
{
if(verbose)
{
printf("\nDevice %d: %s\n", dev, deviceProp.name);
printf(" CUDA Driver / Runtime Version: %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Compute Capability: %d.%d\n", deviceProp.major, deviceProp.minor);
printf(" PCI bus ID: %d:%d\n", deviceProp.pciBusID, deviceProp.pciDeviceID);
printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
printf(" Number of Multiprocessors: %d\n", deviceProp.multiProcessorCount);
}
device[numGPUs] = dev;
numGPUs++;
if (numGPUs >= MAX_GPU_NUM) break;
}
}
return(numGPUs);
}
void OpenFiles()
{
// handle compression using zlib gzFile functions for input and pipe gzip for output
if(!overwrite_outputfile) // test if output file exists if option -f is not specified
{
outputFile = fopen(output_filename, "r");
if(outputFile!=NULL)
{
printf("ERROR output file exists already. To overwrite run with option -f\n");
exit(255);
} // do not attempt close file as we would have already exited if opening was successful !
}
inputFile = gzopen(input_filename, "r");
if(inputFile == NULL)
{
printf("ERROR opening input file: %s\n", input_filename);
exit(255);
}
templateFile = gzopen(input_filename, "r"); // holds a second access to input read file for rereading positions for output
if(templateFile == NULL)
{
printf("ERROR opening second handle to input file.\n");
exit(255);
}
if(write_pigz_output)
{
outputFile = popen(strcat(PIGZ_PIPE, output_filename), "w");
if(outputFile == NULL)
{
printf("Option -pigz: could not open pipe ... falling back on libz.\n\tMake sure pigz is installed with: sudo apt-get install pigz\n");
write_gz_output = true;
write_pigz_output = false;
}
}
if(write_gz_output && !write_pigz_output)
{
outputFile = popen(strcat(GZIP_PIPE, output_filename), "w");
if(outputFile == NULL)
{
printf("Option -gz: could not open pipe ... falling back to uncompressed output.\n");
write_gz_output = false;
}
}
if(!write_gz_output && !write_pigz_output) outputFile = fopen(output_filename, "w");
if(outputFile == NULL)
{
printf("ERROR opening output file: %s\n", output_filename);
exit(255);
}
}
int ReadBlockOfReads(int max_num_to_read, short *hReadLen, char* hReadSequence, char *hReadQualstr)
{
int n = 0; // returns the number of reads read
short length;
while(gzgets(inputFile, Read.name, MAX_FASTQ_BUF_LINE) != NULL)
{
if(Read.name[0] == 13) break; // end if CR <13> or LF <10> are encountered
if(Read.name[0] == 10) break;
if(gzgets(inputFile, Read.sequence, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input!\n");
exit(255);
}
if(gzgets(inputFile, Read.descr, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input!\n");
exit(255);
}
if(gzgets(inputFile, Read.qualstr, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input!\n");
exit(255);
}
if(!FastQ_WARNING) // write warning if FastQ file contains non-standard lines
{
if(Read.name[0]!='@' || Read.descr[0]!='+')
{
printf("Warning: Encountered unusual line identifiers in FastQ file format.\n");
FastQ_WARNING = true;
}
}
length = strlen(Read.sequence)-1;
if(length > MAX_READ_LENGTH) length = MAX_READ_LENGTH;
// printf("%d, ", length);
memcpy(&hReadSequence[n*MAX_READ_LENGTH], Read.sequence, length);
memcpy(&hReadQualstr[n*MAX_READ_LENGTH], Read.qualstr, length);
hReadLen[n] = length;
n++;
if(n == max_num_to_read) break;
}
hNumberOfReads = hNumberOfReads + n; // update global variable to total number of reads
return(n);
}
int WriteBlockOfReads(int num_of_reads_to_write, const short *hReadLen, short min_out_read_length)
{
int n=0;
int w=0;
short length;
char str_buffer[MAX_FASTQ_BUF_LINE];
if(num_of_reads_to_write == 0) return(0);
if(verbose) printf("I/O > num_of_reads_to_write = %d\n", num_of_reads_to_write);
while(gzgets(templateFile, Read.name, MAX_FASTQ_BUF_LINE) != NULL)
{
if(gzgets(templateFile, Read.sequence, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input file for writing trimmed output!\n");
exit(255);
}
if(gzgets(templateFile, Read.descr, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input file for writing trimmed output!\n");
exit(255);
}
if(gzgets(templateFile, Read.qualstr, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input file for writing trimmed output!\n");
exit(255);
}
length = hReadLen[n];
if(gRNATrimming) // isolate the gRNA from adaptor match
{
if(length>0)
{
strncpy(str_buffer, &Read.sequence[length+gRNA_start], gRNA_length);
strncpy(&Read.sequence[0], str_buffer, gRNA_length);
Read.sequence[gRNA_length]=10;
Read.sequence[gRNA_length+1]=0;
strncpy(str_buffer, &Read.qualstr[length+gRNA_start], gRNA_length);
strncpy(&Read.qualstr[0], str_buffer, gRNA_length);
Read.qualstr[gRNA_length]=10;
Read.qualstr[gRNA_length+1]=0;
fprintf(outputFile,"%s", Read.name);
fprintf(outputFile,"%s", Read.sequence);
fprintf(outputFile,"%s", Read.descr);
fprintf(outputFile,"%s", Read.qualstr);
w++;
}
}
else // normal quality and adaptor trimming
{
if(length >= min_out_read_length)
{
Read.sequence[length]=10; // write only reads that are longer than minimum to output file
Read.sequence[length+1]=0;
Read.qualstr[length]=10;
Read.qualstr[length+1]=0;
fprintf(outputFile,"%s", Read.name);
fprintf(outputFile,"%s", Read.sequence);
fprintf(outputFile,"%s", Read.descr);
fprintf(outputFile,"%s", Read.qualstr);
w++;
}
}
n++;
if(n == num_of_reads_to_write) break;
}
hReadsWritten = hReadsWritten + w;
return(n);
}
void CloseFiles(void)
{
gzclose(inputFile);
gzclose(templateFile);
if(write_gz_output || write_pigz_output) pclose(outputFile); else fclose(outputFile);
}
bool Detect_FastQ_Format_PHRED(void)
{
int n = 0; // returns the number of reads read
short length;
uint min_qual = 65;
uint max_qual = 65;
bool deciphered = false;
inputFile = gzopen(input_filename, "r");
if(inputFile == NULL)
{
printf("ERROR opening input file %s\n", input_filename);
exit(255);
}
while(gzgets(inputFile, Read.name, MAX_FASTQ_BUF_LINE) != NULL)
{
if(Read.name[0] == 13) break; // end if CR <13> or LF <10> are encountered
if(Read.name[0] == 10) break;
if(gzgets(inputFile, Read.sequence, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input!\n");
exit(255);
}
if(gzgets(inputFile, Read.descr, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input!\n");
exit(255);
}
if(gzgets(inputFile, Read.qualstr, MAX_FASTQ_BUF_LINE)==NULL)
{
printf("ERROR parsing input!\n");
exit(255);
}
if(!FastQ_WARNING) // write warning if FastQ file contains non-standard lines
{
if(Read.name[0]!='@' || Read.descr[0]!='+')
{
printf("Warning: Encountered unusual line identifiers in FastQ file format.");
FastQ_WARNING = true;
}
}
length = strlen(Read.sequence)-1;
for(n=0; n<length;n++)
{
if(min_qual > (uint)Read.qualstr[n])
{
min_qual = (uint) Read.qualstr[n];
}
else
{
if(max_qual < (uint)Read.qualstr[n]) max_qual = (uint) Read.qualstr[n];
}
}
if(min_qual < 59)
{
PHRED = 33; // FastQ Sanger and Illumina 1.8+ format
deciphered = true;
break;
}
if(max_qual > 80)
{
PHRED = 64; // FastQ Illumina 1.3+ format
deciphered = true;
break;
}
}
gzclose(inputFile);
return(deciphered);
}
int PreprocessReadsOnGPU(void)
{
bool DONE = false;
bool ALL_DONE = false;
int ReadsProcessed = 0;
int ReadsToProcess[MAX_GPU_NUM];
int ReadsSentToGPU[MAX_GPU_NUM];
int dev_side = 0; // left or right side of device memories [0] or [1]
int dev_next_side = 1;
int side_flip;
if(verbose) printf("\nPreprocessing reads on GPU\n");
for(int dev=0; dev<numGPUs; dev++) // copy data and run kernels initial round to fill CUDA queue and not waste time !
{
ReadsToProcess[dev] = 0;
ReadsSentToGPU[dev] = 0;
if(!DONE)
{
ReadsToProcess[dev] = ReadBlockOfReads(MAX_READS_ON_GPU, hReadLen[dev][dev_side], hReadSequence[dev][dev_side], hReadQualstr[dev]);
if(ReadsToProcess[dev] < MAX_READS_ON_GPU)
{
DONE = true;
}
if(ReadsToProcess[dev] == 0) continue;
CUDA_CHECK_RETURN(hipSetDevice(device[dev]));
if(QualityTrimming) CUDA_CHECK_RETURN(hipMemcpyAsync(dQualArray[dev], hReadQualstr[dev], ReadsToProcess[dev] * MAX_READ_LENGTH * sizeof(char), hipMemcpyHostToDevice, upload_stream[dev]));
CUDA_CHECK_RETURN(hipMemcpyAsync(dIntArray[dev][dev_side], hReadLen[dev][dev_side], ReadsToProcess[dev] * sizeof(short), hipMemcpyHostToDevice, upload_stream[dev]));
CUDA_CHECK_RETURN(hipEventRecord(QUAL_COPY_DONE[dev], upload_stream[dev]));
CUDA_CHECK_RETURN(hipStreamWaitEvent(compute_stream[dev], QUAL_COPY_DONE[dev], 0));
if(QualityTrimming)
{
if(verbose) printf("[%d] TrimmQuals\n", device[dev]);
TrimmQuals<<<4096, 256, 0, compute_stream[dev]>>>(dQualArray[dev], dIntArray[dev][dev_side], ReadsToProcess[dev], MAX_READ_LENGTH);
CUDA_CHECK_RETURN(hipGetLastError());
}
CUDA_CHECK_RETURN(hipEventRecord(TrimmQ_DONE[dev], compute_stream[dev]));
if(AdaptorTrimming || gRNATrimming) CUDA_CHECK_RETURN(hipMemcpyAsync(dSequenceArray[dev], hReadSequence[dev][dev_side], ReadsToProcess[dev] * MAX_READ_LENGTH * sizeof(char), hipMemcpyHostToDevice, upload_stream[dev]));
CUDA_CHECK_RETURN(hipEventRecord(SEQ_COPY_DONE[dev], upload_stream[dev]));
CUDA_CHECK_RETURN(hipStreamWaitEvent(compute_stream[dev], SEQ_COPY_DONE[dev], 0));
if(gRNATrimming)
{
if(verbose) printf("[%d] Find_gRNA\n", device[dev]);
Find_gRNA<<<4096, 256, 0, compute_stream[dev]>>>(dSequenceArray[dev], dIntArray[dev][dev_side], ReadsToProcess[dev], MAX_READ_LENGTH);
CUDA_CHECK_RETURN(hipGetLastError());
}
else
{
if(AdaptorTrimming)
{
if(verbose) printf("[%d] TrimmAdaptors\n", device[dev]);
TrimmAdaptors<<<4096, 256, 0, compute_stream[dev]>>>(dSequenceArray[dev], dIntArray[dev][dev_side], ReadsToProcess[dev], MAX_READ_LENGTH);
CUDA_CHECK_RETURN(hipGetLastError());
}
}
CUDA_CHECK_RETURN(hipEventRecord(TrimmA_DONE[dev], compute_stream[dev]));
CUDA_CHECK_RETURN(hipStreamWaitEvent(download_stream[dev], TrimmA_DONE[dev], 0));
CUDA_CHECK_RETURN(hipMemcpyAsync(hReadLen[dev][dev_side], dIntArray[dev][dev_side], ReadsToProcess[dev] * sizeof(short), hipMemcpyDeviceToHost, download_stream[dev]));
CUDA_CHECK_RETURN(hipEventRecord(DTH_COPY_DONE[dev], download_stream[dev]));
if(verbose) printf("[%d] Memcpy DtoH %d reads\n", device[dev], ReadsToProcess[dev]);
ReadsSentToGPU[dev] = ReadsToProcess[dev];
ReadsToProcess[dev] = 0;
}
}
while(!ALL_DONE)
{
ALL_DONE = true; // setup for test if should stop if there is no more data on the GPU
if(!DONE)
{
for(int dev=0; dev<numGPUs; dev++) // read a block of data, copy QUALS to GPU and run QUAL kernel
{
ReadsToProcess[dev] = 0;
CUDA_CHECK_RETURN(hipSetDevice(device[dev]));
if(ReadsSentToGPU[dev]>0)
{
CUDA_CHECK_RETURN(hipEventSynchronize(TrimmQ_DONE[dev]));
}
ReadsToProcess[dev] = ReadBlockOfReads(MAX_READS_ON_GPU, hReadLen[dev][dev_next_side], hReadSequence[dev][dev_next_side], hReadQualstr[dev]);
if(ReadsToProcess[dev] < MAX_READS_ON_GPU)
{
DONE = true;
}
if(ReadsToProcess[dev] == 0) break;
ALL_DONE = false; // there is more work on the GPUs that requires further processing
if(QualityTrimming) CUDA_CHECK_RETURN(hipMemcpyAsync(dQualArray[dev], hReadQualstr[dev], ReadsToProcess[dev] * MAX_READ_LENGTH * sizeof(char), hipMemcpyHostToDevice, upload_stream[dev]));
CUDA_CHECK_RETURN(hipMemcpyAsync(dIntArray[dev][dev_next_side], hReadLen[dev][dev_next_side], ReadsToProcess[dev] * sizeof(short), hipMemcpyHostToDevice, upload_stream[dev]));
CUDA_CHECK_RETURN(hipEventRecord(QUAL_COPY_DONE[dev], upload_stream[dev]));
CUDA_CHECK_RETURN(hipStreamWaitEvent(compute_stream[dev], QUAL_COPY_DONE[dev], 0));
if(QualityTrimming)
{
if(verbose) printf("[%d] TrimmQuals\n", device[dev]);
TrimmQuals<<<4096, 256, 0, compute_stream[dev]>>>(dQualArray[dev], dIntArray[dev][dev_next_side], ReadsToProcess[dev], MAX_READ_LENGTH);
CUDA_CHECK_RETURN(hipGetLastError());
}
}
}
for(int dev=0; dev<numGPUs; dev++) // copy sequence and run Adaptor trim kernel
{
if(ReadsToProcess[dev]>0)
{
CUDA_CHECK_RETURN(hipSetDevice(device[dev]));
if(ReadsSentToGPU[dev]>0)
{
CUDA_CHECK_RETURN(hipEventSynchronize(TrimmA_DONE[dev]));
}
if(AdaptorTrimming || gRNATrimming) CUDA_CHECK_RETURN(hipMemcpyAsync(dSequenceArray[dev], hReadSequence[dev][dev_next_side], ReadsToProcess[dev] * MAX_READ_LENGTH * sizeof(char), hipMemcpyHostToDevice, upload_stream[dev]));
CUDA_CHECK_RETURN(hipEventRecord(SEQ_COPY_DONE[dev], upload_stream[dev]));
CUDA_CHECK_RETURN(hipStreamWaitEvent(compute_stream[dev], SEQ_COPY_DONE[dev], 0));
if(gRNATrimming)
{
if(verbose) printf("[%d] Find_gRNA\n", device[dev]);
Find_gRNA<<<4096, 256, 0, compute_stream[dev]>>>(dSequenceArray[dev], dIntArray[dev][dev_next_side], ReadsToProcess[dev], MAX_READ_LENGTH);
CUDA_CHECK_RETURN(hipGetLastError());
}
else
{
if(AdaptorTrimming)
{
if(verbose) printf("[%d] TrimmAdaptors\n", device[dev]);
TrimmAdaptors<<<4096, 256, 0, compute_stream[dev]>>>(dSequenceArray[dev], dIntArray[dev][dev_next_side], ReadsToProcess[dev], MAX_READ_LENGTH);
CUDA_CHECK_RETURN(hipGetLastError());
}
}
CUDA_CHECK_RETURN(hipEventRecord(TrimmA_DONE[dev], compute_stream[dev]));
}
}
for(int dev=0; dev<numGPUs; dev++) // copy data from devices and write output file
{
if(verbose) printf("[%d] > ReadsSentToGPU = %d\n", dev, ReadsSentToGPU[dev]);
if(ReadsSentToGPU[dev]>0)
{
CUDA_CHECK_RETURN(hipSetDevice(device[dev]));
CUDA_CHECK_RETURN(hipEventSynchronize(DTH_COPY_DONE[dev]));
if(verbose) printf("[%d] Writing hReadLen[%d][%d]\n", device[dev], dev, dev_side);
WriteBlockOfReads(ReadsSentToGPU[dev], hReadLen[dev][dev_side], MIN_OUT_READ_LEN); // write output block
ReadsProcessed += ReadsSentToGPU[dev]; // count how many reads were considered for writing - actual output has minimal length requirement
}
if(ReadsToProcess[dev]>0)
{
CUDA_CHECK_RETURN(hipStreamWaitEvent(download_stream[dev], TrimmA_DONE[dev], 0));
CUDA_CHECK_RETURN(hipMemcpyAsync(hReadLen[dev][dev_next_side], dIntArray[dev][dev_next_side], ReadsToProcess[dev] * sizeof(short), hipMemcpyDeviceToHost, download_stream[dev]));
CUDA_CHECK_RETURN(hipEventRecord(DTH_COPY_DONE[dev], download_stream[dev]));
if(verbose) printf("[%d] Memcpy DtoH %d reads\n", device[dev], ReadsToProcess[dev]);
}
ReadsSentToGPU[dev] = ReadsToProcess[dev];
ReadsToProcess[dev] = 0;
}
side_flip = dev_side; // dev_side alternate between 0 and 1 to use memory efficiently
dev_side = dev_next_side;
dev_next_side = side_flip;
}
return(ReadsProcessed);
}
void usage_help(void)
{
printf("\nPreprocessReads multi-GPU version\n");
printf("\tsupports up to 4 NVIDIA GPUs with compute capability greater than 3.5 and at least 6 GB memory.\n");
printf("\tcompiled for minimum CUDA API 6.5 (build 20210107).\n");
printf("\nProgram Options:\n\n");
printf("\t-i, -I ....... specify the path for the input file [required]\n");
printf("\t-o, -O ....... specify the path for the output file [required]\n");
printf("\t-len, -Len ... specify the minimum length for reads written to the output file\n");
printf("\t-f ........... over write output file if it exists\n");
printf("\t-gz .......... write output to file in compressed format (.gz)\n");
printf("\t-pigz ........ write output to file in compressed format (.gz) using multithreaded compression\n");
printf("\t-v, -V ....... verbose, prints details on processing to screen\n");
printf("\t-a, -A ....... adaptor sequence, can contain Ns that will match any base in the read\n");
printf("\t-mm, -MM ..... number of mismatches allowed in adaptor search (not including Ns)\n");
printf("\t-Phred ....... sets the base of the qualities PHRED values [default 33], use AUTO for autodetection\n");
printf("\t-Qmin ........ sets the minimum acceptable quality score for a good base [default 20]\n");
printf("\t-Qualab ...... sets the minimum average base quality score for remainder of the read after a bad base [default 25]\n");
printf("\t-dev, -Dev ... specify CUDA device to use (can be used repeatedly for selecting multiple GPUs,\n");
printf("\t if not specified an automatic search for GPUs with compute capability 3.5 and 6 GB memory is performed)\n");
printf("\nAlgorithm selection:\n\n");
printf("\t/TA .......... select adaptor trimming only (can be also used before option /gRNA to avoid quality trimming)\n");
printf("\t/TQ .......... select quality trimming only\n");
printf("\t/TAQ ......... select quality and adaptor trimming [default]\n");
printf("\t/gRNA ........ select gRNA recovery, followed by start relative to start of adaptor sequence and length of gRNA to be retrieved\n");
printf("\t ( /gRNA 12 20 will retrieve 20 nucleotide read sequence starting 12 positions after the 5' end match of adaptor)\n");
printf("\nProgrammer options variables:\n");
printf("\t-G ........... set MAX_READS_ON_GPU variable\n");
printf("\t-R ........... set MAX_READ_LENGTH variable\n");
printf("\nexample usage:\n");
printf("\tPreprocessReads -dev 0 -dev 1 -i infile.fastq -o outfilename.fastq -len 26 -a ATTAGATCGATGCTA -mm 2\n\n");
exit(3);
}
void invalid_command_line_option(const char *errorstr)
{
printf("\nERROR %s\n", errorstr);
printf("\nUsage ----------------------------------------------\n");
usage_help();
}
void parseCommandLineArguments(int argc, char *argv[])
{
bool inputfile_set = false;
bool outputfile_set = false;
bool adaptor_set = false;
bool mismatch_set = false;
bool outlen_set = false;
for(int n=1; n<argc; n++)
{
if(argv[n][0] == '-')
{
char a = argv[n][1];
switch (a)
{
case 'I':
case 'i':
if(inputfile_set) invalid_command_line_option("option -i: input file name already specified.");
if(++n<argc)
{
if(strlen(argv[n])>400) invalid_command_line_option("option -i: input file path length exceeds limit.");
strcpy(input_filename, argv[n]);
inputfile_set=true;
break;
}
else invalid_command_line_option("option -i: INPUT file name missing.");
break;
case 'O':
case 'o':
if(outputfile_set) invalid_command_line_option("option -o: output file name already specified.");
if(++n<argc)
{
if(strlen(argv[n])>400) invalid_command_line_option("option -o: output file path length exceeds limit.");
strcpy(output_filename, argv[n]);
outputfile_set=true;
break;
}
else invalid_command_line_option("option -o: OUTPUT file name missing.");
break;
case 'l':
case 'L':
if(argv[n][2]!='e' || argv[n][3]!='n') invalid_command_line_option("invalid command line option encountered.");
if(outlen_set) invalid_command_line_option("option -len: minimum read length to include in output file is already specified.");
if(++n<argc)
{
int ol=atoi(argv[n]);
if(ol<0 || ol>MAX_READ_LENGTH) invalid_command_line_option("option -len: encountered invalid minimum read length to include in output file.");
MIN_OUT_READ_LEN = (short) ol;
outlen_set=true;
break;
}
else invalid_command_line_option("option -len: expected minimum output read length not found.");
break;
case 'A':
case 'a':
if(adaptor_set) invalid_command_line_option("option -a: adaptor sequence already specified.");
if(++n<argc)
{
if(strlen(argv[n])>MAX_ADAPTOR_LENGTH) invalid_command_line_option("option -a: adaptor sequence length exceeds limit.");
if(strlen(argv[n])<3) invalid_command_line_option("option -a: adaptor sequence length too short.");
strcpy(Adaptor, argv[n]);
adaptor_set=true;
break;
}
else invalid_command_line_option("option -a: expected adaptor sequence.");
break;
case 'M':
case 'm':
if(argv[n][2]!='m' && argv[n][2]!='M') invalid_command_line_option("invalid command line option encountered.");
if(mismatch_set) invalid_command_line_option("option -mm: number of mismatches is already specified.");
if(++n<argc)
{
int mm=atoi(argv[n]);
if(mm>20) invalid_command_line_option("option -mm: number of mismatches exceeds limit.");
mismatches = (short) mm;
mismatch_set=true;
break;
}
else invalid_command_line_option("option -mm: expected number of mismatches not found.");
break;
case 'd':
case 'D':
if(argv[n][2]!='e' || argv[n][3]!='v') invalid_command_line_option("invalid command line option encountered.");
if(numGPUs>=MAX_GPU_NUM) invalid_command_line_option("option -dev: number of CUDA devices exceeds maximum.");
if(++n<argc)
{
int d=atoi(argv[n]);
if(d<0 || d>20) invalid_command_line_option("option -dev: invalid device number.");
device[numGPUs] = d;
numGPUs++;
break;
}
else invalid_command_line_option("option -dev: expected CUDA device number not found.");
break;
case 'f':
overwrite_outputfile = true; // -f
break;
case 'g': // -gz
if((argv[n][2]=='z') && (strlen(argv[n])==3))
{
write_gz_output = true;
break;
}
else invalid_command_line_option("invalid command line option encountered.");
break;
case 'p':
case 'P':
if((strcmp(argv[n], "-Pigz")==0) || (strcmp(argv[n], "-pigz")==0)) // -pigz
{
write_pigz_output = true;
break;
}
else // -phred
{
if(argv[n][2]!='h' || argv[n][3]!='r' || argv[n][4]!='e' || argv[n][5]!='d') invalid_command_line_option("invalid command line option encountered.");
if(++n<argc)
{
if((strcmp(argv[n], "AUTO")==0) || (strcmp(argv[n], "auto")==0))
{
auto_phred_detect = true;
break;
}
else
{
int d=atoi(argv[n]);
if(d<0 || d>100) invalid_command_line_option("option -Phred: invalid phred base.");
PHRED = d;
break;
}
}
else invalid_command_line_option("option -phred: expected number or AUTO not found.");
}
break;
case 'Q':
case 'q':
if((strcmp(argv[n], "-Qmin")==0) || (strcmp(argv[n], "-qmin")==0))
{
if(++n<argc)
{
int qm=atoi(argv[n]);
if(qm<10 || qm>64) invalid_command_line_option("option -qmin: outside 10 to 64 limit.");
MIN_BASE_QUAL = (short) qm;
break;
}
else invalid_command_line_option("option -qmin: expected number not found.");
}
else
{
if((strcmp(argv[n], "-Qualab")==0) || (strcmp(argv[n], "-qualab")==0))
{
if(++n<argc)
{
int qm=atoi(argv[n]);
if(qm<10 || qm>64) invalid_command_line_option("option -qualab: outside 10 to 64 limit.");
MIN_QUAL_AFTER_BAD = (short) qm;
break;
}
else invalid_command_line_option("option -qmin: expected number not found.");
}
}
break;
case 'v':
case 'V':
verbose = true;
break;
case 'H':
case 'h':
case '?':
printf("\nHELP ----------------------------------------------\n");
usage_help();
break;
case 'G': // programmer option to set MAX_NUM_READS_ON_GPU parameter
if(++n<argc)
{
int mrg=atoi(argv[n]);
if(mrg<=0) invalid_command_line_option("invalid command line option encountered.");
MAX_READS_ON_GPU = mrg;
break;
}
else invalid_command_line_option("invalid command line option encountered.");
break;
case 'R': // programmer option to set MAX_READ_LENGTH parameter
if(++n<argc)
{
int mrl=atoi(argv[n]);
if(mrl<=0 || mrl>10000) invalid_command_line_option("invalid command line option encountered.");
MAX_READ_LENGTH = (short) mrl;
break;
}
else invalid_command_line_option("invalid command line option encountered.");
break;
default:
invalid_command_line_option("invalid command line option encountered.");
}
}
else
{
if(argv[n][0] == '/')
{
char a = argv[n][1];
switch (a)
{
case 'T':
AdaptorTrimming = false;
QualityTrimming = false;
gRNATrimming = false;
if(strlen(argv[n]) == 3)
{
if(argv[n][2]=='A')
{
AdaptorTrimming = true;
break;
}
else
{
if(argv[n][2]=='Q')
{
QualityTrimming = true;
break;
}
else invalid_command_line_option("option /T: algorithm selection is invalid.");
}
}
else
{
if(strlen(argv[n])==3 && argv[n][2]=='A' && argv[n][3]=='Q')
{
AdaptorTrimming = true;
QualityTrimming = true;
break;
}
else invalid_command_line_option("option /T: algorithm selection is invalid.");
}
break;
case 'g':
if(strlen(argv[n]) == 5 && argv[n][2]=='R' && argv[n][3]=='N' && argv[n][4]=='A')
{
gRNATrimming = true;
if(++n < argc)
{
gRNA_start = atoi(argv[n]);
}
else invalid_command_line_option("option /gRNA: expected start position relative to adaptor 5' not found.");
if(++n < argc)
{
gRNA_length = atoi(argv[n]);
}
else invalid_command_line_option("option /gRNA: expected gRNA length not found.");
break;
}
else invalid_command_line_option("option /g: algorithm selection is invalid.");
break;
}
}
else invalid_command_line_option("parsing command line options.");
}
}
if(!inputfile_set || !outputfile_set || ((AdaptorTrimming || gRNATrimming) && !adaptor_set)) invalid_command_line_option("missing required command line options.");
if(strcmp(input_filename, output_filename)==0)
{
printf("ERROR: encountered identical input and output file paths!\n"); // input file cannot be identical to output file
exit(253);
}
printf("Setting parameters:\n");
printf("\tInput file name : %s\n", input_filename);
printf("\tOutput file name : %s\n", output_filename);
if(AdaptorTrimming || gRNATrimming) printf("\tAdaptor sequence : %s\n", Adaptor);
if(verbose)
{
if(AdaptorTrimming || gRNATrimming) printf("\tMismatches = %d\n", mismatches);
printf("\tTrimming: ");
if(QualityTrimming) printf("Quality ");
if(AdaptorTrimming) printf("Adaptors ");
if(gRNATrimming) printf("Find gRNA");
printf("\n");
if(QualityTrimming)
{
printf("\tMinimum accepted base quality = %d; average quality of read after bad base = %d\n", MIN_BASE_QUAL, MIN_QUAL_AFTER_BAD);
}
printf("\tFastQ Quality Encoding: ");
if(!auto_phred_detect) printf("PHRED+%d\n", PHRED); else printf("<AUTO DETECTION>\n");
printf("\tMAX_READ_LENGTH : %d\n", MAX_READ_LENGTH);
printf("\tMinimum read length for output = %d\n", MIN_OUT_READ_LEN);
if(overwrite_outputfile) printf("\tForce overwrite output file if it exists.\n");
}
if(numGPUs>0)
{
printf("\nUsing CUDA devices: ");
for(int n=0; n<numGPUs; n++) printf("[%d]", device[n]);
printf("\n");
}
}
int main(int argc, char *argv[])
{
time_t start_t = time(NULL); // for timing the process
time_t end_t;
int reads_processed = 0;
parseCommandLineArguments(argc, argv); // get parameters from the command line
if(numGPUs == 0)
{
if(FindGPUs() == 0) // check for GPUs
{
printf("\nERROR: No suitable CUDA devices found ... exiting\n");
exit(1);
}
else
{
if(numGPUs == 1)
printf("\nOne CUDA device with at least compute capability 3.5 and 6 GB memory found.\n");
else
printf("\n%d CUDA devices with at least compute capability 3.5 and 6 GB memory found.\n", numGPUs);
}
}
if(auto_phred_detect)
{
if(Detect_FastQ_Format_PHRED())
{
if(verbose) printf("<AUTO DETECTION> FastQ Quality Encoding: PHRED+%d\n", PHRED);
}
else
{
printf("\nERROR: Unable to detect FastQ format version.\n");
exit(32);
}
}
OpenFiles(); // access files
InitGPUs();
SetAdaptorSequence(Adaptor, mismatches, PHRED, MIN_BASE_QUAL, MIN_QUAL_AFTER_BAD);
reads_processed = PreprocessReadsOnGPU();
printf("\nReads processed: %d\n", reads_processed);
printf("Reads in input file %s : %d\n",input_filename, hNumberOfReads);
printf("Reads written to output file %s : %d\n", output_filename, hReadsWritten);
if(FastQ_WARNING) printf("Encountered potential issues with the FastQ file format in %s\n", input_filename);
CloseFiles();
FreeGPUs();
end_t = time(NULL);
printf("\nTotal time elapsed: %.0lf seconds\n", difftime(end_t, start_t));
// printf("\n<Press RTN key to exit>\n");
// getchar();
return(0);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
/*
============================================================================
Name : review_chp4_2.cu
Author : freshield
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <stdio.h>
#define N 100
__global__ void add(int *a, int *b, int *c){
int tid = blockIdx.x;
if(tid < N){
c[tid] = a[tid] + b[tid];
}
}
__global__ void set_a(int *a){
int tid = blockIdx.x;
if(tid < N){
a[tid] = -tid;
}
}
__global__ void set_b(int *b){
int tid = blockIdx.x;
if(tid < N){
b[tid] = tid * tid;
}
}
int main(){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));
/*
for(int i = 0; i < N; i++){
a[i] = -i;
b[i] = i * i;
}
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
*/
set_a<<<N,1>>>(dev_a);
set_b<<<N,1>>>(dev_b);
add<<<N,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(a, dev_a, N * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(b, dev_b, N * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++){
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
/*
============================================================================
Name : review_chp4_2.cu
Author : freshield
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#define N 100
__global__ void add(int *a, int *b, int *c){
int tid = blockIdx.x;
if(tid < N){
c[tid] = a[tid] + b[tid];
}
}
__global__ void set_a(int *a){
int tid = blockIdx.x;
if(tid < N){
a[tid] = -tid;
}
}
__global__ void set_b(int *b){
int tid = blockIdx.x;
if(tid < N){
b[tid] = tid * tid;
}
}
int main(){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
hipMalloc((void**)&dev_a, N * sizeof(int));
hipMalloc((void**)&dev_b, N * sizeof(int));
hipMalloc((void**)&dev_c, N * sizeof(int));
/*
for(int i = 0; i < N; i++){
a[i] = -i;
b[i] = i * i;
}
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
*/
set_a<<<N,1>>>(dev_a);
set_b<<<N,1>>>(dev_b);
add<<<N,1>>>(dev_a, dev_b, dev_c);
hipMemcpy(a, dev_a, N * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(b, dev_b, N * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < N; i++){
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void empty_kernel()
{
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void empty_kernel()
{
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<stdio.h>
#include<stdlib.h>
#include "cuda.h"
__global__ void addVectors(int N, double *a, double *b, double *c) {
int thread = threadIdx.x;
int block = blockIdx.x;
int blockSize = blockDim.x;
int id = block*blockSize + thread;
__shared__ double s_a[32];
__shared__ double s_b[32];
__shared__ double s_c[32];
//populate shared memory cache
if (id<N) {
s_a[thread] = a[id];
s_b[thread] = b[id];
}
__syncthreads(); //make sure all threads have written to cache
//perform the addition
s_c[thread] = s_a[thread] + s_b[thread];
if (id<N) {
c[id] = s_c[thread];
}
}
int main (int argc, char** argv) {
int N = 100000; //vector size
int Nthreads = 32; //number of threads per block
int Nblocks = (N+Nthreads-1)/Nthreads; //WARNING integer division here
double *h_a, *h_b, *h_c; //host vectors
//allocate vectors
h_a = (double *) malloc(N*sizeof(double));
h_b = (double *) malloc(N*sizeof(double));
h_c = (double *) malloc(N*sizeof(double));
double *d_a, *d_b, *d_c; //device arrays
//allocate memory on device
cudaMalloc(&d_a, N*sizeof(double));
cudaMalloc(&d_b, N*sizeof(double));
cudaMalloc(&d_c, N*sizeof(double));
//populate our vectors a and b
for (int n=0;n<N;n++) {
h_a[n] = n;
h_b[n] = N-n;
}
//copy a and b to device
cudaMemcpy(d_a,h_a,N*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,N*sizeof(double),cudaMemcpyHostToDevice);
// c = a+b
// for (int n=0;n<N;n++) {
// c[n] = a[n] + b[n];
// }
//add the vectors on the device
addVectors<<<Nblocks,Nthreads>>>(N,d_a,d_b,d_c);
cudaMemcpy(h_c,d_c,N*sizeof(double),cudaMemcpyDeviceToHost);
int printId = 0;
printf("c[%d] = %f \n", printId, h_c[printId]);
//free up device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//free up memory
free(h_a);
free(h_b);
free(h_c);
}
|
#include<stdio.h>
#include<stdlib.h>
#include "hip/hip_runtime.h"
__global__ void addVectors(int N, double *a, double *b, double *c) {
int thread = threadIdx.x;
int block = blockIdx.x;
int blockSize = blockDim.x;
int id = block*blockSize + thread;
__shared__ double s_a[32];
__shared__ double s_b[32];
__shared__ double s_c[32];
//populate shared memory cache
if (id<N) {
s_a[thread] = a[id];
s_b[thread] = b[id];
}
__syncthreads(); //make sure all threads have written to cache
//perform the addition
s_c[thread] = s_a[thread] + s_b[thread];
if (id<N) {
c[id] = s_c[thread];
}
}
int main (int argc, char** argv) {
int N = 100000; //vector size
int Nthreads = 32; //number of threads per block
int Nblocks = (N+Nthreads-1)/Nthreads; //WARNING integer division here
double *h_a, *h_b, *h_c; //host vectors
//allocate vectors
h_a = (double *) malloc(N*sizeof(double));
h_b = (double *) malloc(N*sizeof(double));
h_c = (double *) malloc(N*sizeof(double));
double *d_a, *d_b, *d_c; //device arrays
//allocate memory on device
hipMalloc(&d_a, N*sizeof(double));
hipMalloc(&d_b, N*sizeof(double));
hipMalloc(&d_c, N*sizeof(double));
//populate our vectors a and b
for (int n=0;n<N;n++) {
h_a[n] = n;
h_b[n] = N-n;
}
//copy a and b to device
hipMemcpy(d_a,h_a,N*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(d_b,h_b,N*sizeof(double),hipMemcpyHostToDevice);
// c = a+b
// for (int n=0;n<N;n++) {
// c[n] = a[n] + b[n];
// }
//add the vectors on the device
addVectors<<<Nblocks,Nthreads>>>(N,d_a,d_b,d_c);
hipMemcpy(h_c,d_c,N*sizeof(double),hipMemcpyDeviceToHost);
int printId = 0;
printf("c[%d] = %f \n", printId, h_c[printId]);
//free up device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
//free up memory
free(h_a);
free(h_b);
free(h_c);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#define N 16
#include<stdio.h>
#include<stdlib.h>
__global__ void add(int *a, int *b, int *c){
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
printf("%d blockIdx=%d\n", c[blockIdx.x], blockIdx.x);
}
void random_ints(int *array, int size){
int i;
for(i = 0; i < size; i++)
array[i] = rand() % 10;
}
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
int i;
a = (int *) malloc(size);
b = (int *) malloc(size);
c = (int *) malloc(size);
random_ints(a, N);
random_ints(b, N);
random_ints(c, N);
cudaMalloc((void **) &d_a, size);
cudaMalloc((void **) &d_b, size);
cudaMalloc((void **) &d_c, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
add<<<N, 1>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
printf("\n\n\n");
for(i = 0; i < N; i++)
printf("%d index=%d\n", c[i], i);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
#define N 16
#include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
__global__ void add(int *a, int *b, int *c){
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
printf("%d blockIdx=%d\n", c[blockIdx.x], blockIdx.x);
}
void random_ints(int *array, int size){
int i;
for(i = 0; i < size; i++)
array[i] = rand() % 10;
}
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
int i;
a = (int *) malloc(size);
b = (int *) malloc(size);
c = (int *) malloc(size);
random_ints(a, N);
random_ints(b, N);
random_ints(c, N);
hipMalloc((void **) &d_a, size);
hipMalloc((void **) &d_b, size);
hipMalloc((void **) &d_c, size);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
add<<<N, 1>>>(d_a, d_b, d_c);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
printf("\n\n\n");
for(i = 0; i < N; i++)
printf("%d index=%d\n", c[i], i);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__global__ void cudaAcc_GPS_kernel_mod3( int NumDataPoints, float2* FreqData, float* PowerSpectrum)
{
const int sidx = (blockIdx.x * blockDim.x + threadIdx.x);
float ax,ay;
if ( sidx < NumDataPoints )
{
ax = FreqData[sidx].x;
ay = FreqData[sidx].y;
PowerSpectrum[sidx] = __fadd_rn( __fmul_rn(ax,ax),__fmul_rn(ay,ay));
}
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__global__ void cudaAcc_GPS_kernel_mod3( int NumDataPoints, float2* FreqData, float* PowerSpectrum)
{
const int sidx = (blockIdx.x * blockDim.x + threadIdx.x);
float ax,ay;
if ( sidx < NumDataPoints )
{
ax = FreqData[sidx].x;
ay = FreqData[sidx].y;
PowerSpectrum[sidx] = __fadd_rn( __fmul_rn(ax,ax),__fmul_rn(ay,ay));
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "includes.h"
__device__ unsigned char value( float n1, float n2, int hue ) {
if (hue > 360) hue -= 360;
else if (hue < 0) hue += 360;
if (hue < 60)
return (unsigned char)(255 * (n1 + (n2-n1)*hue/60));
if (hue < 180)
return (unsigned char)(255 * n2);
if (hue < 240)
return (unsigned char)(255 * (n1 + (n2-n1)*(240-hue)/60));
return (unsigned char)(255 * n1);
}
__global__ void float_to_color( unsigned char *optr, const float *outSrc ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float l = outSrc[offset];
float s = 1;
int h = (180 + (int)(360.0f * outSrc[offset])) % 360;
float m1, m2;
if (l <= 0.5f)
m2 = l * (1 + s);
else
m2 = l + s - l * s;
m1 = 2 * l - m2;
optr[offset*4 + 0] = value( m1, m2, h+120 );
optr[offset*4 + 1] = value( m1, m2, h );
optr[offset*4 + 2] = value( m1, m2, h -120 );
optr[offset*4 + 3] = 255;
}
|
#include <hip/hip_runtime.h>
#include "includes.h"
__device__ unsigned char value( float n1, float n2, int hue ) {
if (hue > 360) hue -= 360;
else if (hue < 0) hue += 360;
if (hue < 60)
return (unsigned char)(255 * (n1 + (n2-n1)*hue/60));
if (hue < 180)
return (unsigned char)(255 * n2);
if (hue < 240)
return (unsigned char)(255 * (n1 + (n2-n1)*(240-hue)/60));
return (unsigned char)(255 * n1);
}
__global__ void float_to_color( unsigned char *optr, const float *outSrc ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float l = outSrc[offset];
float s = 1;
int h = (180 + (int)(360.0f * outSrc[offset])) % 360;
float m1, m2;
if (l <= 0.5f)
m2 = l * (1 + s);
else
m2 = l + s - l * s;
m1 = 2 * l - m2;
optr[offset*4 + 0] = value( m1, m2, h+120 );
optr[offset*4 + 1] = value( m1, m2, h );
optr[offset*4 + 2] = value( m1, m2, h -120 );
optr[offset*4 + 3] = 255;
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include "3d-test.cuh"
#include<iostream>
#include<stdio.h>
__host__ __device__
void scalingFunction(int array[]) {
for(int i=0; i<8; i++) {
array[i] = array[i] * 2.0;
}
}
__host__ __device__
void distributeFunction(pencilComputation& p1,int x,int y){
for(int z=0; z<8; z++) {
p1.outputMatrix[x][y][z] = p1.pencilVector[z];
}
}
__host__ __device__
void tOPrint(int i,int j,int k) {
//pencilComputation p2;
printf("The value of i:%d, j:%d, inputMatrix:%d, pencilVector:%d",i,j);//,p2.inputMatrix[i][j][k],p2.pencilVector[k]);
}
__global__
void pencilComputationFunction(pencilComputation& p1){
//pencilComputation p1;
// auto p1 = *p1_in; // dereference the pointer
p1.DStats("At start of pencilComputationFunction call");
int i = threadIdx.x ;//+ blockIdx.x * blockDim.x;
int j = threadIdx.y ; // + blockIdx.y * blockDim.y;
int z = threadIdx.z;
for(int k=0; k<8 ; k++) {
p1.pencilVector[k] = p1.inputMatrix[i][j][k];
}
scalingFunction(p1.pencilVector);
p1.DStats("After scalingFunction in pencilComputationFunction call");
distributeFunction(p1,i,j);
p1.DStats("At END of pencilComputationFunction call");
// printf("**************************************\n");
}
void pencilComputation::launcher(){
//
// for(int i=0; i<2; i++) {
// for(int j=0; j<2; j++) {
// for(int k=0; k<2; k++) {
// inputMatrix[i][j][k] = 10;
// }
// }
// }
dim3 grid(1,1,1);
dim3 block(2,2,2);
// #pragma acc enter data copyin(inputMatrix) copyout(outputMatrix)
//pencilComputationFunction<<<grid,block>>>(*this);
pencilComputationFunction<<<1,1>>>(*this);
// pencilComputationFunction<<<1,1>>>();
cudaDeviceSynchronize();
}
__host__
void pencilComputation::CStats(const char* message) {
// Get sum, average and num-nonzero for each of inputMatrix & outputMatrix:
printf("[CPU] Stats: %s\n", message);
printf(" Sum Average NNZ\n");
int sumInput = 0;
int sumOutput = 0;
int nnzInput = 0;
int nnzOutput = 0;
for (int i = 0; i < matrix_size; i++) {
for (int j = 0; j < matrix_size; j++) {
for (int k = 0; k < matrix_size; k++) {
if (inputMatrix[i][j][k] != 0) { nnzInput++; }
if (outputMatrix[i][j][k] != 0) { nnzOutput++; }
sumInput += inputMatrix[i][j][k];
sumOutput += outputMatrix[i][j][k];
}
}
}
double averageInput = sumInput / (double)(matrix_size * matrix_size * matrix_size);
double averageOutput = sumOutput / (double)(matrix_size * matrix_size * matrix_size);
printf(" inputMatrix %d %g %d\n", sumInput, averageInput, nnzInput);
printf(" outputMatrix %d %g %d\n", sumOutput, averageOutput, nnzOutput);
int sumVector = 0;
int nnzVector = 0;
for (int i = 0; i < (matrix_size * matrix_size * matrix_size); i++) {
sumVector += pencilVector[i];
if (pencilVector[i] != 0) { nnzVector++; }
}
double averageVector = sumVector / (double)(matrix_size * matrix_size * matrix_size);
printf(" pencilVector %d %g %d\n", sumVector, averageVector, nnzVector);
printf("\n");
}
__device__
void pencilComputation::DStats(const char* message) {
// Get sum, average and num-nonzero for each of inputMatrix & outputMatrix:
printf("[Device] Matrix Stats: %s\n", message);
printf(" Sum Average NNZ\n");
int sumInput = 0;
int sumOutput = 0;
int nnzInput = 0;
int nnzOutput = 0;
for (int i = 0; i < matrix_size; i++) {
for (int j = 0; j < matrix_size; j++) {
for (int k = 0; k < matrix_size; k++) {
if (inputMatrix[i][j][k] != 0) { nnzInput++; }
if (outputMatrix[i][j][k] != 0) { nnzOutput++; }
sumInput += inputMatrix[i][j][k];
sumOutput += outputMatrix[i][j][k];
}
}
}
double averageInput = sumInput / (double)(matrix_size * matrix_size * matrix_size);
double averageOutput = sumOutput / (double)(matrix_size * matrix_size * matrix_size);
printf(" inputMatrix %d %g %d\n", sumInput, averageInput, nnzInput);
printf(" outputMatrix %d %g %d\n", sumOutput, averageOutput, nnzOutput);
int sumVector = 0;
int nnzVector = 0;
for (int i = 0; i < (matrix_size * matrix_size * matrix_size); i++) {
sumVector += pencilVector[i];
if (pencilVector[i] != 0) { nnzVector++; }
}
double averageVector = sumVector / (double)(matrix_size * matrix_size * matrix_size);
printf(" pencilVector %d %g %d\n", sumVector, averageVector, nnzVector);
printf("\n");
}
|
#include <hip/hip_runtime.h>
#include<iostream>
// Size of our matrix:
#define matrix_size 2
#define initial_value 1
class pencilComputation {
public:
int inputMatrix[matrix_size][matrix_size][matrix_size] = { { {initial_value} } };
int outputMatrix[matrix_size][matrix_size][matrix_size] = { { { 0 } } };
int pencilVector[matrix_size * matrix_size * matrix_size] = { 0 };
void launcher();
void CStats(const char *message);
void DStats(const char *message);
};
|
Convert the following CUDA code to AMD GPU code:
cuda
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{76.80,141.84},{73.91,133.16},{65.59,135.84},{77.08,144.27},
{83.32,166.24},{72.64,139.99},{69.42,137.04},{82.61,146.08},
{73.55,125.13},{68.93,133.75},{65.26,120.97},{78.19,141.47},
{ 1.86,40.06},{69.95,122.74},{ 2.32,35.00},{53.17,120.38},
{29.55,74.36},{73.51,119.81},{73.29,129.87},{99.93,182.89},
{28.58,80.09},{98.15,165.02},{87.87,154.79},{52.68,90.48},
{95.88,175.41},{85.56,155.25},{70.52,118.85},{ 2.72,20.43},
{58.10,100.25},{62.74,118.35},{18.29,50.38},{15.05,60.11},
{22.10,47.04},{25.33,65.98},{65.87,128.00},{51.66,127.14},
{79.95,133.20},{38.12,88.95},{98.50,159.87},{21.00,52.36},
{43.80,91.36},{85.22,138.76},{39.41,89.16},{15.74,26.40},
{67.15,108.89},{37.24,88.37},{35.35,67.93},{91.42,158.11},
{46.60,82.54},{37.68,85.78},{55.62,113.91},{ 2.02,21.36},
{84.91,166.55},{ 8.85,35.27},{ 6.00,35.70},{98.54,172.74},
{33.24,67.28},{15.37,54.73},{81.85,138.13},{13.21,35.51},
{18.19,39.85},{74.19,133.01},{84.49,162.54},{90.24,167.41},
{61.38,121.57},{20.98,61.98},{29.03,76.72},{53.11,110.71},
{38.99,82.43},{59.75,101.30},{25.68,53.90},{34.02,67.91},
{84.81,131.19},{77.47,145.16},{58.10,92.30},{56.57,94.05},
{74.41,158.89},{53.02,107.64},{23.68,77.01},{48.88,102.20},
{83.06,143.91},{15.93,61.90},{27.01,59.22},{78.96,134.04},
{75.43,127.47},{94.50,158.75},{40.92,78.41},{91.71,151.42},
{ 1.97,41.60},{11.47,45.38},{54.42,114.94},{80.83,150.03},
{30.04,64.02},{44.17,94.37},{10.27,43.22},{88.84,139.06},
{33.72,89.85},{97.14,172.86},{75.24,136.22},{58.14,130.17},
{71.66,146.04},{39.01,85.49},{12.53,74.58},{19.86,59.84},
{90.36,162.15},{42.05,85.60},{11.34,46.50},{38.25,82.33},
{56.03,102.81},{79.53,152.62},{45.92,73.14},{73.10,120.38},
{38.44,69.17},{ 3.18,46.09},{89.02,151.21},{79.64,140.20},
{59.32,115.04},{ 4.82,28.94},{22.23,90.79},{78.46,119.28},
{94.31,160.98},{76.89,141.23},{ 5.95,29.48},{67.27,133.32},
{44.10,89.40},{69.11,137.57},{79.19,151.24},{30.05,67.92},
{52.81,128.68},{65.71,116.48},{79.58,134.68},{56.10,103.72},
{25.96,87.41},{99.04,171.89},{55.01,100.03},{52.79,107.16},
{79.91,144.45},{32.81,65.02},{73.50,118.30},{64.85,123.67},
{67.87,114.03},{37.47,82.32},{13.30,56.88},{26.45,57.69},
{83.68,125.67},{ 2.12,39.21},{ 6.53,35.89},{76.61,118.93},
{11.18,29.40},{33.59,70.34},{49.78,110.01},{ 4.33,36.51},
{62.64,126.51},{17.96,64.00},{36.71,66.56},{88.59,159.96},
{95.07,175.93},{74.10,142.50},{74.76,135.49},{10.21,48.66},
{25.75,85.88},{50.01,94.95},{39.80,93.54},{14.44,62.55},
{79.73,147.16},{29.17,65.58},{18.52,52.66},{54.73,100.30},
{56.56,89.64},{87.15,137.47},{37.12,74.12},{62.75,120.94},
{60.14,110.71},{95.94,170.44},{66.99,137.00},{31.35,85.48},
{79.39,130.65},{40.20,91.54},{68.85,136.78},{16.09,58.59},
{39.57,77.44},{88.74,164.96},{51.84,74.76},{14.10,52.03},
{66.02,117.94},{ 8.71,49.97},{87.28,144.57},{34.63,63.25},
{80.07,154.95},{67.92,127.80},{ 1.57,38.91},{12.79,50.94},
{19.52,53.00},{68.04,127.41},{20.66,60.34},{48.99,117.19},
{20.29,60.77},{64.41,123.36},{52.94,101.32},{29.32,63.73},
{86.66,154.85},{73.95,127.20},{88.87,152.73},{80.97,146.15},
{53.59,100.46},{92.23,150.49},{61.22,120.55},{46.66,107.47},
{70.35,133.38},{77.13,146.97},{15.05,47.88},{15.43,63.59},
{60.54,131.30},{45.81,87.73},{76.11,144.77},{39.78,84.86},
{18.05,38.73},{96.55,179.51},{13.75,56.30},{71.24,133.26},
{ 6.04,48.27},{21.18,46.80},{53.76,123.66},{82.45,125.86},
{18.49,53.38},{10.93,58.21},{79.28,134.70},{90.84,163.49},
{88.23,157.72},{10.24,37.48},{ 4.06,34.97},{52.32,110.39},
{30.49,63.88},{32.90,77.32},{80.03,135.88},{ 7.99,39.79},
{46.58,75.04},{68.28,118.04},{36.46,79.32},{57.91,100.57},
{42.31,97.60},{73.06,135.84},{26.16,74.49},{58.33,122.36},
{21.83,59.63},{90.91,167.94},{67.31,103.49},{83.28,151.87},
{18.74,52.50},{25.28,87.07},{ 0.04,48.99},{15.70,57.91},
{69.08,122.75},{61.44,130.76},{99.28,170.25},{ 4.70,44.28},
{21.01,51.11},{83.12,148.84},{94.96,171.58},{52.57,102.65},
{73.17,141.20},{52.02,108.60},{89.72,160.15},{18.17,55.31},
{37.16,79.58},{85.51,165.97},{13.61,62.15},{50.21,115.56},
{37.08,71.23},{61.61,114.52},{50.45,91.25},{62.31,107.83},
{89.71,143.58},{24.52,50.59},{68.68,131.27},{64.42,129.75},
{15.32,50.66},{31.93,68.03},{73.46,139.28},{ 3.37,27.10},
{49.84,109.19},{15.24,52.48},{63.01,128.75},{87.87,163.91},
{72.28,129.27},{55.87,113.20},{50.08,98.45},{88.77,156.30},
{40.90,90.24},{52.45,121.75},{34.18,75.42},{ 2.08,41.22},
{97.76,164.01},{49.10,97.53},{ 5.78,58.18},{50.77,92.78},
{29.77,74.05},{57.32,95.04},{62.64,127.56},{58.64,115.55},
{39.39,109.48},{ 4.66,47.66},{16.72,56.61},{92.34,145.17},
{42.98,105.02},{85.37,144.96},{81.34,150.80},{69.35,113.25},
{13.61,55.21},{64.56,129.05},{99.87,174.79},{91.63,164.57},
{23.05,91.57},{ 5.46,43.28},{27.43,84.68},{52.33,90.64},
{20.48,69.31},{78.49,157.01},{99.77,179.69},{62.42,123.76},
{58.35,118.29},{14.99,70.97},{62.30,121.40},{22.72,60.52},
{99.76,161.94},{38.45,70.05},{97.83,166.09},{57.61,134.00},
{36.54,80.11},{88.36,165.33},{29.18,83.77},{57.23,108.37},
{72.49,135.62},{ 3.47,38.93},{65.63,129.64},{90.85,167.02},
{87.52,172.65},{ 4.62,37.46},{18.33,43.25},{75.19,153.75},
{45.61,100.25},{85.86,163.44},{55.67,111.10},{25.74,79.05},
{68.37,123.11},{28.28,69.28},{38.78,98.75},{41.30,74.09},
{ 8.75,51.61},{77.69,125.88},{32.13,65.51},{58.65,108.48},
{89.71,150.18},{47.96,93.88},{51.00,80.92},{46.89,103.89},
{46.26,96.89},{13.87,35.50},{49.68,82.47},{84.04,140.36},
{37.19,76.46},{ 5.07,56.07},{86.56,149.09},{92.96,159.47},
{40.03,82.41},{ 2.90,13.57},{49.34,98.62},{ 3.27,32.40},
{11.55,37.57},{97.95,159.99},{57.72,108.86},{57.86,110.39},
{98.70,169.60},{88.71,148.15},{19.49,65.21},{54.49,101.01},
{19.52,58.02},{46.56,79.03},{31.47,63.96},{61.20,128.64},
{40.12,94.46},{46.43,96.10},{95.94,161.45},{ 6.65,38.08},
{ 0.43,36.11},{20.73,67.54},{38.92,99.40},{86.38,161.23},
{66.40,123.71},{93.10,158.11},{99.87,171.41},{52.58,94.12},
{98.77,172.28},{96.98,177.97},{38.77,71.09},{81.98,138.21},
{95.55,158.03},{94.06,159.42},{73.09,136.27},{90.48,180.71},
{48.31,90.76},{19.54,72.85},{92.72,164.87},{13.27,36.49},
{ 6.85,33.02},{15.48,57.51},{ 1.16,13.57},{88.43,161.05},
{86.72,151.66},{63.94,112.18},{ 1.25,24.67},{74.26,138.29},
{ 1.10,29.32},{91.18,142.29},{38.38,92.64},{26.63,67.12},
{72.40,139.89},{ 8.29,31.60},{ 0.02,39.77},{91.48,151.26},
{42.17,86.16},{26.42,43.92},{40.27,91.64},{10.38,51.42},
{20.00,54.18},{78.75,145.54},{12.44,47.88},{95.58,176.01},
{27.10,66.61},{20.58,71.93},{97.79,156.01},{11.65,64.15},
{59.69,122.96},{35.39,81.41},{22.81,50.30},{16.16,46.29},
{84.75,142.39},{46.08,74.86},{25.67,52.99},{97.77,155.99},
{87.77,160.64},{33.83,67.16},{37.26,85.91},{74.81,128.92},
{68.78,132.78},{ 3.84,35.74},{21.67,53.12},{89.23,163.96},
{80.66,156.05},{ 2.80,31.53},{33.31,45.40},{41.13,87.83},
{23.59,74.18},{24.78,61.40},{78.06,125.39},{23.63,67.79},
{97.24,163.05},{57.61,92.44},{99.91,182.09},{81.92,142.72},
{ 3.80,39.87},{22.59,62.84},{40.81,89.25},{54.14,103.07},
{75.21,113.13},{49.96,95.61},{67.06,129.33},{55.40,87.85},
{31.59,75.65},{48.21,96.10},{41.34,99.65},{56.25,106.02},
{ 9.52,53.66},{70.69,131.01},{47.96,107.16},{18.06,52.70},
{20.40,43.03},{79.46,158.10},{22.82,68.78},{84.27,158.87},
{ 7.56,48.96},{21.12,68.79},{39.89,84.94},{86.02,147.43},
{14.47,64.44},{90.07,154.50},{63.38,133.42},{37.80,76.64},
{68.66,130.16},{62.35,131.18},{14.86,43.80},{ 6.96,17.52},
{16.70,50.42},{ 9.81,27.11},{12.19,36.12},{44.33,78.86},
{31.61,82.77},{97.48,168.20},{10.81,27.75},{13.75,56.21},
{34.29,80.84},{43.69,105.87},{54.68,108.96},{79.73,147.53},
{61.62,128.04},{73.20,127.82},{36.97,87.76},{12.32,58.22},
{34.46,100.48},{22.89,59.72},{84.91,151.54},{43.43,96.84},
{51.08,113.87},{92.00,143.99},{76.91,123.46},{45.28,88.12},
{27.89,79.00},{ 4.47,55.66},{25.29,66.38},{88.23,154.76},
{48.29,97.80},{73.62,116.98},{79.61,137.75},{86.57,154.09},
{67.17,129.19},{25.80,70.83},{87.25,161.52},{64.78,127.78},
{67.09,130.55},{85.80,135.92},{46.81,87.55},{71.45,149.02},
{75.36,137.01},{30.13,73.87},{ 7.97,45.84},{66.93,135.67},
{ 6.84,52.61},{63.42,119.19},{33.74,78.18},{ 6.98,39.25},
{98.47,171.90},{28.73,66.90},{94.63,157.45},{95.85,170.74},
{31.42,77.86},{10.33,43.96},{ 7.50,28.74},{85.43,160.97},
{72.92,120.06},{70.63,141.20},{89.19,154.32},{ 1.28,49.29},
{13.59,46.03},{61.11,125.53},{ 5.27,64.32},{19.77,44.45},
{95.49,158.30},{10.00,39.59},{97.35,181.66},{96.40,159.11},
{25.14,69.61},{89.18,141.99},{90.52,154.82},{69.02,143.17},
{72.48,135.19},{87.45,149.80},{97.18,163.59},{30.97,68.55},
{20.60,72.67},{47.12,94.02},{51.85,96.36},{23.80,78.13},
{87.26,150.01},{14.46,59.40},{99.77,144.05},{46.96,88.39},
{58.25,109.93},{85.37,147.30},{23.46,90.32},{98.69,171.96},
{16.95,46.18},{42.41,101.69},{10.42,59.19},{75.26,126.84},
{30.39,81.77},{37.02,93.26},{58.49,110.09},{89.10,162.93},
{68.61,132.29},{76.17,144.98},{45.37,91.14},{39.45,89.34},
{63.16,129.10},{19.58,53.00},{23.00,64.87},{88.56,157.52},
{80.32,141.54},{55.62,115.72},{49.44,109.66},{98.69,175.29},
{88.65,166.47},{59.01,127.46},{34.62,73.17},{41.17,99.55},
{87.75,147.26},{94.03,156.18},{55.08,108.49},{98.89,173.47},
{49.82,90.69},{87.73,160.65},{16.47,46.46},{41.34,79.62},
{83.15,166.44},{14.92,57.61},{21.80,67.82},{37.69,69.32},
{49.33,86.80},{90.91,147.04},{93.07,149.61},{25.44,59.18},
{17.22,49.18},{28.17,72.65},{ 0.77,38.97},{90.87,163.43},
{74.63,137.34},{16.55,49.30},{ 1.12,35.94},{91.42,163.41},
{ 7.28,48.60},{43.66,104.54},{ 2.20,40.26},{63.34,124.06},
{14.44,41.91},{21.21,88.98},{13.05,38.15},{90.07,165.55},
{14.23,59.03},{97.65,177.44},{52.59,89.72},{79.61,144.27},
{30.57,63.58},{99.86,169.58},{14.72,51.55},{31.54,70.10},
{59.28,109.68},{99.01,155.79},{ 4.13,26.79},{74.04,116.03},
{70.44,139.98},{64.71,123.78},{ 5.33,42.21},{71.19,126.62},
{50.18,98.86},{ 2.53,39.51},{23.81,77.92},{40.89,81.47},
{98.40,187.24},{39.88,73.90},{39.42,76.83},{30.46,75.54},
{59.20,109.15},{89.00,145.34},{46.42,88.82},{32.54,72.77},
{ 4.00,45.27},{ 4.85,30.22},{81.77,135.31},{ 0.16,30.49},
{67.78,133.13},{ 0.90,25.09},{58.59,118.38},{15.94,58.65},
{14.91,46.73},{43.82,89.21},{16.87,46.15},{43.14,96.83},
{ 6.28,27.61},{47.25,99.92},{ 4.17,57.60},{90.64,166.35},
{91.91,170.54},{ 8.13,34.07},{76.90,154.01},{12.52,41.40},
{95.64,176.97},{95.90,168.69},{88.69,167.66},{48.93,105.62},
{79.17,139.57},{67.41,107.70},{61.38,117.56},{89.48,166.48},
{19.16,57.11},{66.62,133.08},{44.79,102.21},{16.93,63.03},
{ 8.98,39.98},{66.95,123.43},{53.25,116.97},{93.25,163.17},
{ 1.37,32.85},{ 2.97,34.85},{80.87,150.60},{ 0.78,41.96},
{72.69,143.99},{26.02,85.06},{75.36,139.16},{85.18,162.42},
{36.34,73.88},{ 8.84,34.15},{84.81,148.96},{78.96,137.06},
{92.35,178.55},{54.26,127.97},{78.63,131.07},{59.43,105.79},
{52.22,96.59},{26.93,59.49},{50.87,91.55},{45.79,94.03},
{ 6.65,28.84},{56.94,103.37},{81.17,150.08},{35.22,80.75},
{25.29,67.81},{45.85,94.53},{88.97,170.12},{83.69,126.64},
{87.32,142.75},{95.98,184.02},{91.57,173.77},{31.69,64.55},
{ 3.54,23.12},{50.07,94.48},{18.35,47.95},{30.13,68.41},
{68.27,105.85},{93.84,164.65},{59.83,123.21},{11.37,48.82},
{16.11,42.53},{43.48,97.29},{46.11,93.28},{15.92,54.20},
{47.99,82.39},{52.76,92.39},{54.61,98.69},{26.05,62.64},
{ 2.70,27.78},{45.88,101.97},{69.70,133.74},{93.08,148.81},
{94.21,145.15},{26.78,87.99},{39.36,75.81},{62.67,103.44},
{60.39,105.91},{31.61,91.69},{46.66,102.22},{40.21,71.78},
{17.32,59.38},{89.24,159.24},{ 8.69,37.85},{41.27,94.31},
{92.40,160.41},{13.84,42.44},{90.70,156.55},{ 0.42,24.58},
{16.73,57.77},{98.89,164.23},{50.47,87.52},{61.55,99.37},
{66.83,139.43},{97.54,179.55},{78.85,130.58},{50.54,91.24},
{29.76,72.61},{76.44,150.84},{17.98,50.71},{60.01,128.80},
{86.74,135.73},{23.03,79.65},{90.98,148.41},{32.64,66.55},
{88.30,137.91},{72.69,131.75},{78.37,138.56},{ 3.06,46.75},
{47.35,94.38},{86.94,155.23},{56.80,110.40},{27.56,54.63},
{17.18,65.78},{88.88,160.44},{94.22,139.98},{38.53,89.02},
{65.36,112.75},{80.71,133.50},{15.96,42.45},{48.83,95.69},
{73.66,129.33},{45.90,98.06},{ 6.36,41.17},{ 7.74,32.66},
{ 9.30,42.57},{90.82,137.41},{19.67,52.81},{22.39,51.17},
{42.95,93.53},{65.18,116.03},{41.10,71.11},{ 8.09,29.31},
{84.62,146.49},{29.68,80.89},{50.05,97.61},{81.14,135.28},
{15.61,47.81},{98.10,186.60},{39.06,87.72},{80.94,131.21},
{15.49,33.59},{36.01,82.96},{20.29,78.53},{64.39,98.31},
{70.45,114.03},{50.06,104.96},{97.71,173.93},{67.51,126.77},
{27.84,68.02},{68.61,115.91},{94.33,163.94},{81.11,153.84},
{78.52,153.73},{51.69,126.17},{19.24,50.87},{27.23,75.02},
{17.33,62.66},{59.72,139.84},{36.70,80.89},{47.17,89.34},
{ 9.61,45.28},{45.38,84.42},{70.09,125.18},{27.52,78.87},
{12.20,36.42},{89.21,147.16},{44.13,91.63},{99.17,166.39},
{94.87,160.37},{24.21,75.30},{23.41,49.17},{62.28,109.53},
{13.91,49.57},{25.50,66.32},{63.04,121.17},{38.17,74.32},
{28.15,79.85},{77.84,157.44},{50.06,117.94},{88.97,164.45},
{58.29,121.06},{30.98,76.85},{54.15,108.46},{46.74,115.39},
{28.18,70.58},{98.37,157.20},{82.66,133.94},{34.16,79.28},
{71.70,139.93},{ 9.66,38.94},{20.02,70.45},{83.99,164.25},
{57.41,91.87},{93.45,161.27},{15.09,52.25},{46.67,104.19},
{15.83,48.09},{56.40,115.31},{75.99,129.90},{71.95,137.67},
{62.19,125.27},{64.79,128.82},{40.04,71.35},{37.52,78.35},
{57.41,110.12},{59.51,113.76},{82.35,155.78},{68.11,115.06},
{63.82,135.64},{79.09,132.29},{31.90,68.73},{86.51,140.48},
{94.15,165.22},{25.25,68.16},{85.44,148.52},{42.71,76.69},
{35.97,61.23},{64.06,114.99},{63.34,123.75},{45.82,103.23},
{45.00,91.90},{ 5.05,31.45},{79.00,131.76},{37.62,72.79},
{54.83,98.22},{ 2.45,42.63},{87.14,144.97},{16.61,58.22},
{25.40,67.97},{52.02,109.33},{94.70,165.30},{24.56,69.39},
{26.65,95.29},{20.21,74.69},{32.51,93.53},{77.67,150.18},
{ 7.97,53.99},{17.95,45.32},{14.08,44.40},{97.68,172.42},
{81.04,157.46},{67.94,124.06},{15.28,61.69},{65.24,111.24},
{ 9.81,47.35},{53.35,105.71},{51.27,116.77},{92.44,176.67},
{92.75,157.71},{96.63,170.59},{50.96,102.10},{12.59,56.64},
{87.99,154.97},{53.27,104.83},{89.34,156.25},{89.43,144.96},
{ 4.31,29.94},{38.53,76.07},{71.29,126.18},{48.55,98.93},
{75.68,134.51},{43.97,100.37},{49.42,94.90},{ 3.19,46.01},
{45.93,84.87},{55.20,99.30},{52.74,104.53},{65.60,126.25},
{ 1.83,30.62},{78.75,147.10},{44.84,90.34},{94.01,165.47},
{12.81,46.00},{ 3.20,46.31},{92.04,165.41},{24.39,70.09},
{76.21,145.59},{42.07,99.74},{ 7.83,32.08},{98.32,168.32},
{59.36,126.16},{63.97,128.90},{46.78,97.92},{ 6.73,29.83},
{19.71,40.05},{33.58,73.65},{95.76,177.24},{15.76,35.10},
{ 5.13,57.23},{80.36,145.85},{81.75,164.69},{ 1.42,38.61},
{49.30,97.65},{13.35,36.82},{27.95,63.49},{92.39,172.97},
{69.59,122.40},{79.07,153.47},{83.63,162.86},{37.18,88.83},
{69.71,134.76},{57.08,95.74},{88.42,154.68},{79.00,152.84},
{85.75,142.50},{57.33,108.36},{44.82,93.00},{56.97,102.79},
{36.56,73.41},{66.46,112.74},{ 4.01,59.76},{75.72,144.06},
{89.60,175.98},{90.10,153.07},{16.49,51.91},{87.96,128.17},
{31.01,67.42},{ 5.77,45.91},{ 2.92,34.29},{68.82,132.71}
};
double residual_error(double r, double a, double m, double c) {
double e = (m * r) + c - a;
return e * e;
}
__device__ double d_residual_error(double r, double a, double m, double c) {
double e = (m * r) + c - a;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c,double *error_sum_arr,point_t *d_data) {
int i = threadIdx.x + blockIdx.x *blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x,d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0){
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(){
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be= rms_error(bm,bc);
error=cudaMalloc(&d_dm,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_dm returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_dc,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_dc returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_error_sum_arr,(sizeof(double) * 1000));
if(error){
fprintf(stderr,"cudaMalloc on d_error_sum_arr returned %d %s\n",error, //371
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_data,sizeof(data)); //376
if(error){
fprintf(stderr,"cudaMalloc on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i]= bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm,dm,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_dm returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc,dc,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_dc returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data,sizeof(data), cudaMemcpyHostToDevice); //401
if(error){
fprintf(stderr,"cudaMemcpy to d_data returned %d %s\n",error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++){
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i],&d_dc[i],d_error_sum_arr,d_data);
cudaDeviceSynchronize();
error =cudaMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000),
cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr,"cudaMemcpy to error_sum returned %d %s\n",error,
cudaGetErrorString(error));
}
for(int j=0;j<n_data;j++){
error_sum_total+= h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] =sqrt(error_sum_mean);
if(e[i] < best_error){
best_error = e[i];
error_sum_total +=h_error_sum_arr[i];
}
error_sum_mean = error_sum_total /n_data;//431
e[i] = sqrt(error_sum_mean); //432
if(e[i]<best_error){ //434
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0; //438
}
if(best_error <be){
be=best_error;
bm =dm[best_error_i];
bc= dc[best_error_i];
}else {
minimum_found = 1;
}
}
error = cudaFree(d_dm);
if(error){
fprintf(stderr,"cudaFree on d_dm returned %d %s\n",error,
cudaGetErrorString(error)); //453
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr,"cudaFree on d_dc returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr,"cudaFree on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr,"cudaFree on d_error_sum_arr returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
;
|
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <hip/hip_runtime_api.h>
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{76.80,141.84},{73.91,133.16},{65.59,135.84},{77.08,144.27},
{83.32,166.24},{72.64,139.99},{69.42,137.04},{82.61,146.08},
{73.55,125.13},{68.93,133.75},{65.26,120.97},{78.19,141.47},
{ 1.86,40.06},{69.95,122.74},{ 2.32,35.00},{53.17,120.38},
{29.55,74.36},{73.51,119.81},{73.29,129.87},{99.93,182.89},
{28.58,80.09},{98.15,165.02},{87.87,154.79},{52.68,90.48},
{95.88,175.41},{85.56,155.25},{70.52,118.85},{ 2.72,20.43},
{58.10,100.25},{62.74,118.35},{18.29,50.38},{15.05,60.11},
{22.10,47.04},{25.33,65.98},{65.87,128.00},{51.66,127.14},
{79.95,133.20},{38.12,88.95},{98.50,159.87},{21.00,52.36},
{43.80,91.36},{85.22,138.76},{39.41,89.16},{15.74,26.40},
{67.15,108.89},{37.24,88.37},{35.35,67.93},{91.42,158.11},
{46.60,82.54},{37.68,85.78},{55.62,113.91},{ 2.02,21.36},
{84.91,166.55},{ 8.85,35.27},{ 6.00,35.70},{98.54,172.74},
{33.24,67.28},{15.37,54.73},{81.85,138.13},{13.21,35.51},
{18.19,39.85},{74.19,133.01},{84.49,162.54},{90.24,167.41},
{61.38,121.57},{20.98,61.98},{29.03,76.72},{53.11,110.71},
{38.99,82.43},{59.75,101.30},{25.68,53.90},{34.02,67.91},
{84.81,131.19},{77.47,145.16},{58.10,92.30},{56.57,94.05},
{74.41,158.89},{53.02,107.64},{23.68,77.01},{48.88,102.20},
{83.06,143.91},{15.93,61.90},{27.01,59.22},{78.96,134.04},
{75.43,127.47},{94.50,158.75},{40.92,78.41},{91.71,151.42},
{ 1.97,41.60},{11.47,45.38},{54.42,114.94},{80.83,150.03},
{30.04,64.02},{44.17,94.37},{10.27,43.22},{88.84,139.06},
{33.72,89.85},{97.14,172.86},{75.24,136.22},{58.14,130.17},
{71.66,146.04},{39.01,85.49},{12.53,74.58},{19.86,59.84},
{90.36,162.15},{42.05,85.60},{11.34,46.50},{38.25,82.33},
{56.03,102.81},{79.53,152.62},{45.92,73.14},{73.10,120.38},
{38.44,69.17},{ 3.18,46.09},{89.02,151.21},{79.64,140.20},
{59.32,115.04},{ 4.82,28.94},{22.23,90.79},{78.46,119.28},
{94.31,160.98},{76.89,141.23},{ 5.95,29.48},{67.27,133.32},
{44.10,89.40},{69.11,137.57},{79.19,151.24},{30.05,67.92},
{52.81,128.68},{65.71,116.48},{79.58,134.68},{56.10,103.72},
{25.96,87.41},{99.04,171.89},{55.01,100.03},{52.79,107.16},
{79.91,144.45},{32.81,65.02},{73.50,118.30},{64.85,123.67},
{67.87,114.03},{37.47,82.32},{13.30,56.88},{26.45,57.69},
{83.68,125.67},{ 2.12,39.21},{ 6.53,35.89},{76.61,118.93},
{11.18,29.40},{33.59,70.34},{49.78,110.01},{ 4.33,36.51},
{62.64,126.51},{17.96,64.00},{36.71,66.56},{88.59,159.96},
{95.07,175.93},{74.10,142.50},{74.76,135.49},{10.21,48.66},
{25.75,85.88},{50.01,94.95},{39.80,93.54},{14.44,62.55},
{79.73,147.16},{29.17,65.58},{18.52,52.66},{54.73,100.30},
{56.56,89.64},{87.15,137.47},{37.12,74.12},{62.75,120.94},
{60.14,110.71},{95.94,170.44},{66.99,137.00},{31.35,85.48},
{79.39,130.65},{40.20,91.54},{68.85,136.78},{16.09,58.59},
{39.57,77.44},{88.74,164.96},{51.84,74.76},{14.10,52.03},
{66.02,117.94},{ 8.71,49.97},{87.28,144.57},{34.63,63.25},
{80.07,154.95},{67.92,127.80},{ 1.57,38.91},{12.79,50.94},
{19.52,53.00},{68.04,127.41},{20.66,60.34},{48.99,117.19},
{20.29,60.77},{64.41,123.36},{52.94,101.32},{29.32,63.73},
{86.66,154.85},{73.95,127.20},{88.87,152.73},{80.97,146.15},
{53.59,100.46},{92.23,150.49},{61.22,120.55},{46.66,107.47},
{70.35,133.38},{77.13,146.97},{15.05,47.88},{15.43,63.59},
{60.54,131.30},{45.81,87.73},{76.11,144.77},{39.78,84.86},
{18.05,38.73},{96.55,179.51},{13.75,56.30},{71.24,133.26},
{ 6.04,48.27},{21.18,46.80},{53.76,123.66},{82.45,125.86},
{18.49,53.38},{10.93,58.21},{79.28,134.70},{90.84,163.49},
{88.23,157.72},{10.24,37.48},{ 4.06,34.97},{52.32,110.39},
{30.49,63.88},{32.90,77.32},{80.03,135.88},{ 7.99,39.79},
{46.58,75.04},{68.28,118.04},{36.46,79.32},{57.91,100.57},
{42.31,97.60},{73.06,135.84},{26.16,74.49},{58.33,122.36},
{21.83,59.63},{90.91,167.94},{67.31,103.49},{83.28,151.87},
{18.74,52.50},{25.28,87.07},{ 0.04,48.99},{15.70,57.91},
{69.08,122.75},{61.44,130.76},{99.28,170.25},{ 4.70,44.28},
{21.01,51.11},{83.12,148.84},{94.96,171.58},{52.57,102.65},
{73.17,141.20},{52.02,108.60},{89.72,160.15},{18.17,55.31},
{37.16,79.58},{85.51,165.97},{13.61,62.15},{50.21,115.56},
{37.08,71.23},{61.61,114.52},{50.45,91.25},{62.31,107.83},
{89.71,143.58},{24.52,50.59},{68.68,131.27},{64.42,129.75},
{15.32,50.66},{31.93,68.03},{73.46,139.28},{ 3.37,27.10},
{49.84,109.19},{15.24,52.48},{63.01,128.75},{87.87,163.91},
{72.28,129.27},{55.87,113.20},{50.08,98.45},{88.77,156.30},
{40.90,90.24},{52.45,121.75},{34.18,75.42},{ 2.08,41.22},
{97.76,164.01},{49.10,97.53},{ 5.78,58.18},{50.77,92.78},
{29.77,74.05},{57.32,95.04},{62.64,127.56},{58.64,115.55},
{39.39,109.48},{ 4.66,47.66},{16.72,56.61},{92.34,145.17},
{42.98,105.02},{85.37,144.96},{81.34,150.80},{69.35,113.25},
{13.61,55.21},{64.56,129.05},{99.87,174.79},{91.63,164.57},
{23.05,91.57},{ 5.46,43.28},{27.43,84.68},{52.33,90.64},
{20.48,69.31},{78.49,157.01},{99.77,179.69},{62.42,123.76},
{58.35,118.29},{14.99,70.97},{62.30,121.40},{22.72,60.52},
{99.76,161.94},{38.45,70.05},{97.83,166.09},{57.61,134.00},
{36.54,80.11},{88.36,165.33},{29.18,83.77},{57.23,108.37},
{72.49,135.62},{ 3.47,38.93},{65.63,129.64},{90.85,167.02},
{87.52,172.65},{ 4.62,37.46},{18.33,43.25},{75.19,153.75},
{45.61,100.25},{85.86,163.44},{55.67,111.10},{25.74,79.05},
{68.37,123.11},{28.28,69.28},{38.78,98.75},{41.30,74.09},
{ 8.75,51.61},{77.69,125.88},{32.13,65.51},{58.65,108.48},
{89.71,150.18},{47.96,93.88},{51.00,80.92},{46.89,103.89},
{46.26,96.89},{13.87,35.50},{49.68,82.47},{84.04,140.36},
{37.19,76.46},{ 5.07,56.07},{86.56,149.09},{92.96,159.47},
{40.03,82.41},{ 2.90,13.57},{49.34,98.62},{ 3.27,32.40},
{11.55,37.57},{97.95,159.99},{57.72,108.86},{57.86,110.39},
{98.70,169.60},{88.71,148.15},{19.49,65.21},{54.49,101.01},
{19.52,58.02},{46.56,79.03},{31.47,63.96},{61.20,128.64},
{40.12,94.46},{46.43,96.10},{95.94,161.45},{ 6.65,38.08},
{ 0.43,36.11},{20.73,67.54},{38.92,99.40},{86.38,161.23},
{66.40,123.71},{93.10,158.11},{99.87,171.41},{52.58,94.12},
{98.77,172.28},{96.98,177.97},{38.77,71.09},{81.98,138.21},
{95.55,158.03},{94.06,159.42},{73.09,136.27},{90.48,180.71},
{48.31,90.76},{19.54,72.85},{92.72,164.87},{13.27,36.49},
{ 6.85,33.02},{15.48,57.51},{ 1.16,13.57},{88.43,161.05},
{86.72,151.66},{63.94,112.18},{ 1.25,24.67},{74.26,138.29},
{ 1.10,29.32},{91.18,142.29},{38.38,92.64},{26.63,67.12},
{72.40,139.89},{ 8.29,31.60},{ 0.02,39.77},{91.48,151.26},
{42.17,86.16},{26.42,43.92},{40.27,91.64},{10.38,51.42},
{20.00,54.18},{78.75,145.54},{12.44,47.88},{95.58,176.01},
{27.10,66.61},{20.58,71.93},{97.79,156.01},{11.65,64.15},
{59.69,122.96},{35.39,81.41},{22.81,50.30},{16.16,46.29},
{84.75,142.39},{46.08,74.86},{25.67,52.99},{97.77,155.99},
{87.77,160.64},{33.83,67.16},{37.26,85.91},{74.81,128.92},
{68.78,132.78},{ 3.84,35.74},{21.67,53.12},{89.23,163.96},
{80.66,156.05},{ 2.80,31.53},{33.31,45.40},{41.13,87.83},
{23.59,74.18},{24.78,61.40},{78.06,125.39},{23.63,67.79},
{97.24,163.05},{57.61,92.44},{99.91,182.09},{81.92,142.72},
{ 3.80,39.87},{22.59,62.84},{40.81,89.25},{54.14,103.07},
{75.21,113.13},{49.96,95.61},{67.06,129.33},{55.40,87.85},
{31.59,75.65},{48.21,96.10},{41.34,99.65},{56.25,106.02},
{ 9.52,53.66},{70.69,131.01},{47.96,107.16},{18.06,52.70},
{20.40,43.03},{79.46,158.10},{22.82,68.78},{84.27,158.87},
{ 7.56,48.96},{21.12,68.79},{39.89,84.94},{86.02,147.43},
{14.47,64.44},{90.07,154.50},{63.38,133.42},{37.80,76.64},
{68.66,130.16},{62.35,131.18},{14.86,43.80},{ 6.96,17.52},
{16.70,50.42},{ 9.81,27.11},{12.19,36.12},{44.33,78.86},
{31.61,82.77},{97.48,168.20},{10.81,27.75},{13.75,56.21},
{34.29,80.84},{43.69,105.87},{54.68,108.96},{79.73,147.53},
{61.62,128.04},{73.20,127.82},{36.97,87.76},{12.32,58.22},
{34.46,100.48},{22.89,59.72},{84.91,151.54},{43.43,96.84},
{51.08,113.87},{92.00,143.99},{76.91,123.46},{45.28,88.12},
{27.89,79.00},{ 4.47,55.66},{25.29,66.38},{88.23,154.76},
{48.29,97.80},{73.62,116.98},{79.61,137.75},{86.57,154.09},
{67.17,129.19},{25.80,70.83},{87.25,161.52},{64.78,127.78},
{67.09,130.55},{85.80,135.92},{46.81,87.55},{71.45,149.02},
{75.36,137.01},{30.13,73.87},{ 7.97,45.84},{66.93,135.67},
{ 6.84,52.61},{63.42,119.19},{33.74,78.18},{ 6.98,39.25},
{98.47,171.90},{28.73,66.90},{94.63,157.45},{95.85,170.74},
{31.42,77.86},{10.33,43.96},{ 7.50,28.74},{85.43,160.97},
{72.92,120.06},{70.63,141.20},{89.19,154.32},{ 1.28,49.29},
{13.59,46.03},{61.11,125.53},{ 5.27,64.32},{19.77,44.45},
{95.49,158.30},{10.00,39.59},{97.35,181.66},{96.40,159.11},
{25.14,69.61},{89.18,141.99},{90.52,154.82},{69.02,143.17},
{72.48,135.19},{87.45,149.80},{97.18,163.59},{30.97,68.55},
{20.60,72.67},{47.12,94.02},{51.85,96.36},{23.80,78.13},
{87.26,150.01},{14.46,59.40},{99.77,144.05},{46.96,88.39},
{58.25,109.93},{85.37,147.30},{23.46,90.32},{98.69,171.96},
{16.95,46.18},{42.41,101.69},{10.42,59.19},{75.26,126.84},
{30.39,81.77},{37.02,93.26},{58.49,110.09},{89.10,162.93},
{68.61,132.29},{76.17,144.98},{45.37,91.14},{39.45,89.34},
{63.16,129.10},{19.58,53.00},{23.00,64.87},{88.56,157.52},
{80.32,141.54},{55.62,115.72},{49.44,109.66},{98.69,175.29},
{88.65,166.47},{59.01,127.46},{34.62,73.17},{41.17,99.55},
{87.75,147.26},{94.03,156.18},{55.08,108.49},{98.89,173.47},
{49.82,90.69},{87.73,160.65},{16.47,46.46},{41.34,79.62},
{83.15,166.44},{14.92,57.61},{21.80,67.82},{37.69,69.32},
{49.33,86.80},{90.91,147.04},{93.07,149.61},{25.44,59.18},
{17.22,49.18},{28.17,72.65},{ 0.77,38.97},{90.87,163.43},
{74.63,137.34},{16.55,49.30},{ 1.12,35.94},{91.42,163.41},
{ 7.28,48.60},{43.66,104.54},{ 2.20,40.26},{63.34,124.06},
{14.44,41.91},{21.21,88.98},{13.05,38.15},{90.07,165.55},
{14.23,59.03},{97.65,177.44},{52.59,89.72},{79.61,144.27},
{30.57,63.58},{99.86,169.58},{14.72,51.55},{31.54,70.10},
{59.28,109.68},{99.01,155.79},{ 4.13,26.79},{74.04,116.03},
{70.44,139.98},{64.71,123.78},{ 5.33,42.21},{71.19,126.62},
{50.18,98.86},{ 2.53,39.51},{23.81,77.92},{40.89,81.47},
{98.40,187.24},{39.88,73.90},{39.42,76.83},{30.46,75.54},
{59.20,109.15},{89.00,145.34},{46.42,88.82},{32.54,72.77},
{ 4.00,45.27},{ 4.85,30.22},{81.77,135.31},{ 0.16,30.49},
{67.78,133.13},{ 0.90,25.09},{58.59,118.38},{15.94,58.65},
{14.91,46.73},{43.82,89.21},{16.87,46.15},{43.14,96.83},
{ 6.28,27.61},{47.25,99.92},{ 4.17,57.60},{90.64,166.35},
{91.91,170.54},{ 8.13,34.07},{76.90,154.01},{12.52,41.40},
{95.64,176.97},{95.90,168.69},{88.69,167.66},{48.93,105.62},
{79.17,139.57},{67.41,107.70},{61.38,117.56},{89.48,166.48},
{19.16,57.11},{66.62,133.08},{44.79,102.21},{16.93,63.03},
{ 8.98,39.98},{66.95,123.43},{53.25,116.97},{93.25,163.17},
{ 1.37,32.85},{ 2.97,34.85},{80.87,150.60},{ 0.78,41.96},
{72.69,143.99},{26.02,85.06},{75.36,139.16},{85.18,162.42},
{36.34,73.88},{ 8.84,34.15},{84.81,148.96},{78.96,137.06},
{92.35,178.55},{54.26,127.97},{78.63,131.07},{59.43,105.79},
{52.22,96.59},{26.93,59.49},{50.87,91.55},{45.79,94.03},
{ 6.65,28.84},{56.94,103.37},{81.17,150.08},{35.22,80.75},
{25.29,67.81},{45.85,94.53},{88.97,170.12},{83.69,126.64},
{87.32,142.75},{95.98,184.02},{91.57,173.77},{31.69,64.55},
{ 3.54,23.12},{50.07,94.48},{18.35,47.95},{30.13,68.41},
{68.27,105.85},{93.84,164.65},{59.83,123.21},{11.37,48.82},
{16.11,42.53},{43.48,97.29},{46.11,93.28},{15.92,54.20},
{47.99,82.39},{52.76,92.39},{54.61,98.69},{26.05,62.64},
{ 2.70,27.78},{45.88,101.97},{69.70,133.74},{93.08,148.81},
{94.21,145.15},{26.78,87.99},{39.36,75.81},{62.67,103.44},
{60.39,105.91},{31.61,91.69},{46.66,102.22},{40.21,71.78},
{17.32,59.38},{89.24,159.24},{ 8.69,37.85},{41.27,94.31},
{92.40,160.41},{13.84,42.44},{90.70,156.55},{ 0.42,24.58},
{16.73,57.77},{98.89,164.23},{50.47,87.52},{61.55,99.37},
{66.83,139.43},{97.54,179.55},{78.85,130.58},{50.54,91.24},
{29.76,72.61},{76.44,150.84},{17.98,50.71},{60.01,128.80},
{86.74,135.73},{23.03,79.65},{90.98,148.41},{32.64,66.55},
{88.30,137.91},{72.69,131.75},{78.37,138.56},{ 3.06,46.75},
{47.35,94.38},{86.94,155.23},{56.80,110.40},{27.56,54.63},
{17.18,65.78},{88.88,160.44},{94.22,139.98},{38.53,89.02},
{65.36,112.75},{80.71,133.50},{15.96,42.45},{48.83,95.69},
{73.66,129.33},{45.90,98.06},{ 6.36,41.17},{ 7.74,32.66},
{ 9.30,42.57},{90.82,137.41},{19.67,52.81},{22.39,51.17},
{42.95,93.53},{65.18,116.03},{41.10,71.11},{ 8.09,29.31},
{84.62,146.49},{29.68,80.89},{50.05,97.61},{81.14,135.28},
{15.61,47.81},{98.10,186.60},{39.06,87.72},{80.94,131.21},
{15.49,33.59},{36.01,82.96},{20.29,78.53},{64.39,98.31},
{70.45,114.03},{50.06,104.96},{97.71,173.93},{67.51,126.77},
{27.84,68.02},{68.61,115.91},{94.33,163.94},{81.11,153.84},
{78.52,153.73},{51.69,126.17},{19.24,50.87},{27.23,75.02},
{17.33,62.66},{59.72,139.84},{36.70,80.89},{47.17,89.34},
{ 9.61,45.28},{45.38,84.42},{70.09,125.18},{27.52,78.87},
{12.20,36.42},{89.21,147.16},{44.13,91.63},{99.17,166.39},
{94.87,160.37},{24.21,75.30},{23.41,49.17},{62.28,109.53},
{13.91,49.57},{25.50,66.32},{63.04,121.17},{38.17,74.32},
{28.15,79.85},{77.84,157.44},{50.06,117.94},{88.97,164.45},
{58.29,121.06},{30.98,76.85},{54.15,108.46},{46.74,115.39},
{28.18,70.58},{98.37,157.20},{82.66,133.94},{34.16,79.28},
{71.70,139.93},{ 9.66,38.94},{20.02,70.45},{83.99,164.25},
{57.41,91.87},{93.45,161.27},{15.09,52.25},{46.67,104.19},
{15.83,48.09},{56.40,115.31},{75.99,129.90},{71.95,137.67},
{62.19,125.27},{64.79,128.82},{40.04,71.35},{37.52,78.35},
{57.41,110.12},{59.51,113.76},{82.35,155.78},{68.11,115.06},
{63.82,135.64},{79.09,132.29},{31.90,68.73},{86.51,140.48},
{94.15,165.22},{25.25,68.16},{85.44,148.52},{42.71,76.69},
{35.97,61.23},{64.06,114.99},{63.34,123.75},{45.82,103.23},
{45.00,91.90},{ 5.05,31.45},{79.00,131.76},{37.62,72.79},
{54.83,98.22},{ 2.45,42.63},{87.14,144.97},{16.61,58.22},
{25.40,67.97},{52.02,109.33},{94.70,165.30},{24.56,69.39},
{26.65,95.29},{20.21,74.69},{32.51,93.53},{77.67,150.18},
{ 7.97,53.99},{17.95,45.32},{14.08,44.40},{97.68,172.42},
{81.04,157.46},{67.94,124.06},{15.28,61.69},{65.24,111.24},
{ 9.81,47.35},{53.35,105.71},{51.27,116.77},{92.44,176.67},
{92.75,157.71},{96.63,170.59},{50.96,102.10},{12.59,56.64},
{87.99,154.97},{53.27,104.83},{89.34,156.25},{89.43,144.96},
{ 4.31,29.94},{38.53,76.07},{71.29,126.18},{48.55,98.93},
{75.68,134.51},{43.97,100.37},{49.42,94.90},{ 3.19,46.01},
{45.93,84.87},{55.20,99.30},{52.74,104.53},{65.60,126.25},
{ 1.83,30.62},{78.75,147.10},{44.84,90.34},{94.01,165.47},
{12.81,46.00},{ 3.20,46.31},{92.04,165.41},{24.39,70.09},
{76.21,145.59},{42.07,99.74},{ 7.83,32.08},{98.32,168.32},
{59.36,126.16},{63.97,128.90},{46.78,97.92},{ 6.73,29.83},
{19.71,40.05},{33.58,73.65},{95.76,177.24},{15.76,35.10},
{ 5.13,57.23},{80.36,145.85},{81.75,164.69},{ 1.42,38.61},
{49.30,97.65},{13.35,36.82},{27.95,63.49},{92.39,172.97},
{69.59,122.40},{79.07,153.47},{83.63,162.86},{37.18,88.83},
{69.71,134.76},{57.08,95.74},{88.42,154.68},{79.00,152.84},
{85.75,142.50},{57.33,108.36},{44.82,93.00},{56.97,102.79},
{36.56,73.41},{66.46,112.74},{ 4.01,59.76},{75.72,144.06},
{89.60,175.98},{90.10,153.07},{16.49,51.91},{87.96,128.17},
{31.01,67.42},{ 5.77,45.91},{ 2.92,34.29},{68.82,132.71}
};
double residual_error(double r, double a, double m, double c) {
double e = (m * r) + c - a;
return e * e;
}
__device__ double d_residual_error(double r, double a, double m, double c) {
double e = (m * r) + c - a;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c,double *error_sum_arr,point_t *d_data) {
int i = threadIdx.x + blockIdx.x *blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x,d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0){
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(){
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be= rms_error(bm,bc);
error=hipMalloc(&d_dm,(sizeof(double) * 8));
if(error){
fprintf(stderr,"hipMalloc on d_dm returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error=hipMalloc(&d_dc,(sizeof(double) * 8));
if(error){
fprintf(stderr,"hipMalloc on d_dc returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error=hipMalloc(&d_error_sum_arr,(sizeof(double) * 1000));
if(error){
fprintf(stderr,"hipMalloc on d_error_sum_arr returned %d %s\n",error, //371
hipGetErrorString(error));
exit(1);
}
error=hipMalloc(&d_data,sizeof(data)); //376
if(error){
fprintf(stderr,"hipMalloc on d_data returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i]= bc + (oc[i] * step);
}
error = hipMemcpy(d_dm,dm,(sizeof(double)*8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr,"hipMemcpy to d_dm returned %d %s\n",error,
hipGetErrorString(error));
}
error = hipMemcpy(d_dc,dc,(sizeof(double)*8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr,"hipMemcpy to d_dc returned %d %s\n",error,
hipGetErrorString(error));
}
error = hipMemcpy(d_data, data,sizeof(data), hipMemcpyHostToDevice); //401
if(error){
fprintf(stderr,"hipMemcpy to d_data returned %d %s\n",error,
hipGetErrorString(error));
}
for(i=0;i<8;i++){
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i],&d_dc[i],d_error_sum_arr,d_data);
hipDeviceSynchronize();
error =hipMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000),
hipMemcpyDeviceToHost);
if(error){
fprintf(stderr,"hipMemcpy to error_sum returned %d %s\n",error,
hipGetErrorString(error));
}
for(int j=0;j<n_data;j++){
error_sum_total+= h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] =sqrt(error_sum_mean);
if(e[i] < best_error){
best_error = e[i];
error_sum_total +=h_error_sum_arr[i];
}
error_sum_mean = error_sum_total /n_data;//431
e[i] = sqrt(error_sum_mean); //432
if(e[i]<best_error){ //434
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0; //438
}
if(best_error <be){
be=best_error;
bm =dm[best_error_i];
bc= dc[best_error_i];
}else {
minimum_found = 1;
}
}
error = hipFree(d_dm);
if(error){
fprintf(stderr,"hipFree on d_dm returned %d %s\n",error,
hipGetErrorString(error)); //453
exit(1);
}
error = hipFree(d_dc);
if(error){
fprintf(stderr,"hipFree on d_dc returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_data);
if(error){
fprintf(stderr,"hipFree on d_data returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_error_sum_arr);
if(error){
fprintf(stderr,"hipFree on d_error_sum_arr returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
;
|
Convert the following CUDA code to AMD GPU code:
cuda
/*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
namespace chainer_trt {
namespace plugin {
__global__ void transpose_kernel(const float* d_src, float* d_dst,
int* d_indexes, int in_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < in_size)
d_dst[blockIdx.y * in_size + d_indexes[idx]] =
d_src[blockIdx.y * in_size + idx];
}
__global__ void transpose_indexes(int* d_dst, int* i_strides, int* shuffle,
int* i_d, int* o_strides, int id_size,
int in_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // _h
if(idx < in_size) {
int out_idx = 0;
for(int i = 0; i < id_size; i++)
out_idx += (idx / i_strides[shuffle[i]] % i_d[shuffle[i]]) *
o_strides[i];
d_dst[idx] = out_idx;
}
}
void apply_transpose(const float* d_src, float* d_dst, int* d_indexes,
int in_size, int batch_size, cudaStream_t stream) {
const int thread_size = 1024;
const int block_size = (int)std::ceil(1.0 * in_size / thread_size);
dim3 grid(block_size, batch_size);
transpose_kernel<<<grid, thread_size, 0, stream>>>(d_src, d_dst,
d_indexes, in_size);
}
void initialize_transpose_indexes(int* d_dst, int* i_strides, int* shuffle,
int* i_d, int* o_strides, int in_size,
int id_size) {
const int thread_size = 1024;
const int block_size = (int)std::ceil(1.0 * in_size / thread_size);
transpose_indexes<<<block_size, thread_size>>>(
d_dst, i_strides, shuffle, i_d, o_strides, id_size, in_size);
}
}
}
|
#include <hip/hip_runtime.h>
/*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
namespace chainer_trt {
namespace plugin {
__global__ void transpose_kernel(const float* d_src, float* d_dst,
int* d_indexes, int in_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < in_size)
d_dst[blockIdx.y * in_size + d_indexes[idx]] =
d_src[blockIdx.y * in_size + idx];
}
__global__ void transpose_indexes(int* d_dst, int* i_strides, int* shuffle,
int* i_d, int* o_strides, int id_size,
int in_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // _h
if(idx < in_size) {
int out_idx = 0;
for(int i = 0; i < id_size; i++)
out_idx += (idx / i_strides[shuffle[i]] % i_d[shuffle[i]]) *
o_strides[i];
d_dst[idx] = out_idx;
}
}
void apply_transpose(const float* d_src, float* d_dst, int* d_indexes,
int in_size, int batch_size, hipStream_t stream) {
const int thread_size = 1024;
const int block_size = (int)std::ceil(1.0 * in_size / thread_size);
dim3 grid(block_size, batch_size);
transpose_kernel<<<grid, thread_size, 0, stream>>>(d_src, d_dst,
d_indexes, in_size);
}
void initialize_transpose_indexes(int* d_dst, int* i_strides, int* shuffle,
int* i_d, int* o_strides, int in_size,
int id_size) {
const int thread_size = 1024;
const int block_size = (int)std::ceil(1.0 * in_size / thread_size);
transpose_indexes<<<block_size, thread_size>>>(
d_dst, i_strides, shuffle, i_d, o_strides, id_size, in_size);
}
}
}
|
Convert the following CUDA code to AMD GPU code:
cuda
// This example demonstrates parallel floating point vector
// addition with a simple __global__ function.
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define BLOCK_SIZE 512
// this kernel computes the vector sum c = a + b
// each thread performs one pair-wise addition
void vector_reduction_seq(const float *a,
float *c,
const size_t n){
for(int i = 0; i < n; i++){
c[0] += a[i];
}
}
__device__ void warp_reduce(volatile float* sD, int tid) { //unroll last warp (32 threads)
sD[tid] += sD[tid + 32];
sD[tid] += sD[tid + 16];
sD[tid] += sD[tid + 8];
sD[tid] += sD[tid + 4];
sD[tid] += sD[tid + 2];
sD[tid] += sD[tid + 1];
}
__global__ void vector_reduction(float *a,
float *c,
const size_t n){
// compute the global element index this thread should process
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
for(unsigned int s=blockDim.x/2; s > 0; s >>= 1) { //binary reduction
if (tid < s) {
a[i] += a[i + s];
}
__syncthreads();
}
if (tid == 0) atomicAdd(c, a[i]);
}
__global__ void vector_reduction_shared(const float* a, float* c, const size_t n) {
extern __shared__ float sD[];
unsigned int tid = threadIdx.x;
unsigned int blockSize = blockDim.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
sD[tid] = a[i] + a[i+blockSize]; //add on first load
__syncthreads();
for(unsigned int s=blockSize/2; s > 32; s >>= 1) { //binary reduction
if (tid < s) {
sD[tid] += sD[tid + s];
}
__syncthreads();
}
if (tid < 32) warp_reduce(sD, tid); //unroll last warp for block
if (tid == 0) atomicAdd(c,sD[0]); //add each block value to final value
}
int main(void){
// create arrays of 1M elements
const int num_elements = 1<<20;
// compute the size of the arrays in bytes
const int num_bytes = num_elements * sizeof(int);
// points to host & device arrays
float *device_array_a = 0;
float *device_c = 0;
float *host_array_a = 0;
float *host_c = 0;
// malloc the host arrays
host_array_a = (float*)malloc(num_bytes);
host_c = (float*)malloc(sizeof(float));
// cudaMalloc the device arrays
cudaMalloc((void**)&device_array_a, num_bytes);
cudaMalloc((void**)&device_c, sizeof(float));
// if any memory allocation failed, report an error message
if(host_array_a == 0 || host_c == 0 ||
device_array_a == 0 || device_c == 0){
printf("couldn't allocate memory\n");
return 1;
}
// initialize host_array_a & host_array_b
for(int i = 0; i < num_elements; ++i){
// make array a a linear ramp
host_array_a[i] = 1;
}
// copy arrays a & b to the device memory space
cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice);
const size_t num_launches = 1;
double average_seq_time;
struct timespec start, end;
std::cout << "Timing sequential implementation...";
if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
for(int i = 0; i < num_launches; i++){
vector_reduction_seq(host_array_a, host_c, num_elements);
}
if( clock_gettime( CLOCK_REALTIME, &end) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
float serialAns = host_c[0];
//compute the time in s
average_seq_time = ( end.tv_sec - start.tv_sec )
+ (double)( end.tv_nsec - start.tv_nsec ) / 1e+9;
//take the average
average_seq_time /= num_launches;
std::cout << " done." << std::endl;
std::cout << average_seq_time << "s" << std::endl;
// compute c = a + b on the device
const size_t block_size = BLOCK_SIZE;
size_t grid_size = num_elements / block_size;
// deal with a possible partial final block
if(num_elements % block_size) ++grid_size;
// time the kernel launches using CUDA events
cudaEvent_t launch_begin, launch_end;
cudaEventCreate(&launch_begin);
cudaEventCreate(&launch_end);
float average_time_simple = 0.0;
std::cout << "Timing simple implementation...";
for(int i = 0; i < num_launches; ++i){
// record a CUDA event immediately before and after the kernel launch
cudaEventRecord(launch_begin,0);
// launch the kernel
vector_reduction<<<grid_size, block_size>>>(device_array_a, device_c, num_elements);
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
float time = 0.0;
// measure the time (ms) spent in the kernel
cudaEventElapsedTime(&time, launch_begin, launch_end);
average_time_simple += time;
}
// copy the result back to the host memory space
cudaMemcpy(host_c, device_c, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << serialAns << " " << host_c[0] << std::endl;
if (serialAns != host_c[0]) return 0;
average_time_simple /= num_launches;
std::cout << " done." << std::endl;
std::cout << average_time_simple << "ms" << std::endl;
cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice);
host_c[0] = 0;
cudaMemcpy(device_c, host_c, sizeof(float), cudaMemcpyHostToDevice);
float average_time_shared = 0.0;
std::cout << "Timing shared implementation...";
for(int i = 0; i < num_launches; ++i){
// record a CUDA event immediately before and after the kernel launch
cudaEventRecord(launch_begin,0);
// launch the kernel
vector_reduction_shared<<<grid_size, block_size/2, (block_size/2)*sizeof(float)>>>(device_array_a, device_c, num_elements);
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
float time = 0.0;
// measure the time (ms) spent in the kernel
cudaEventElapsedTime(&time, launch_begin, launch_end);
average_time_shared += time;
}
// copy the result back to the host memory space
cudaMemcpy(host_c, device_c, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << serialAns << " " << host_c[0] << std::endl;
if (serialAns != host_c[0]) return 0;
average_time_shared /= num_launches;
std::cout << " done." << std::endl;
std::cout << average_time_shared << "ms" << std::endl;
float num_ops=num_elements;
float seq_throughput = num_ops / (average_seq_time) / 1000000000.0f;
float simple_throughput = num_ops / (average_time_simple / 1000.0f) / 1000000000.0f;
float shared_throughput = num_ops / (average_time_shared / 1000.0f) / 1000000000.0f;
std::cout << "Throughput of sequential: " << seq_throughput << " GB/s" << std::endl;
std::cout << "Throughput of simple kernel: " << simple_throughput << " GB/s" << std::endl;
std::cout << "Simple performance improvement: " << simple_throughput / seq_throughput << "x" << std::endl;
std::cout << "Throughput of shared kernel: " << shared_throughput << " GB/s" << std::endl;
std::cout << "Shared performance improvement: " << shared_throughput / seq_throughput << "x" << std::endl;
std::cout << "Shared performance over simple improvement: " << shared_throughput / simple_throughput << "x" << std::endl;
cudaEventDestroy(launch_begin);
cudaEventDestroy(launch_end);
// deallocate memory
free(host_array_a);
free(host_c);
cudaFree(device_array_a);
cudaFree(device_c);
}
|
// This example demonstrates parallel floating point vector
// addition with a simple __global__ function.
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define BLOCK_SIZE 512
// this kernel computes the vector sum c = a + b
// each thread performs one pair-wise addition
void vector_reduction_seq(const float *a,
float *c,
const size_t n){
for(int i = 0; i < n; i++){
c[0] += a[i];
}
}
__device__ void warp_reduce(volatile float* sD, int tid) { //unroll last warp (32 threads)
sD[tid] += sD[tid + 32];
sD[tid] += sD[tid + 16];
sD[tid] += sD[tid + 8];
sD[tid] += sD[tid + 4];
sD[tid] += sD[tid + 2];
sD[tid] += sD[tid + 1];
}
__global__ void vector_reduction(float *a,
float *c,
const size_t n){
// compute the global element index this thread should process
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
for(unsigned int s=blockDim.x/2; s > 0; s >>= 1) { //binary reduction
if (tid < s) {
a[i] += a[i + s];
}
__syncthreads();
}
if (tid == 0) atomicAdd(c, a[i]);
}
__global__ void vector_reduction_shared(const float* a, float* c, const size_t n) {
extern __shared__ float sD[];
unsigned int tid = threadIdx.x;
unsigned int blockSize = blockDim.x;
unsigned int i = blockIdx.x*(blockSize*2) + tid;
sD[tid] = a[i] + a[i+blockSize]; //add on first load
__syncthreads();
for(unsigned int s=blockSize/2; s > 32; s >>= 1) { //binary reduction
if (tid < s) {
sD[tid] += sD[tid + s];
}
__syncthreads();
}
if (tid < 32) warp_reduce(sD, tid); //unroll last warp for block
if (tid == 0) atomicAdd(c,sD[0]); //add each block value to final value
}
int main(void){
// create arrays of 1M elements
const int num_elements = 1<<20;
// compute the size of the arrays in bytes
const int num_bytes = num_elements * sizeof(int);
// points to host & device arrays
float *device_array_a = 0;
float *device_c = 0;
float *host_array_a = 0;
float *host_c = 0;
// malloc the host arrays
host_array_a = (float*)malloc(num_bytes);
host_c = (float*)malloc(sizeof(float));
// cudaMalloc the device arrays
hipMalloc((void**)&device_array_a, num_bytes);
hipMalloc((void**)&device_c, sizeof(float));
// if any memory allocation failed, report an error message
if(host_array_a == 0 || host_c == 0 ||
device_array_a == 0 || device_c == 0){
printf("couldn't allocate memory\n");
return 1;
}
// initialize host_array_a & host_array_b
for(int i = 0; i < num_elements; ++i){
// make array a a linear ramp
host_array_a[i] = 1;
}
// copy arrays a & b to the device memory space
hipMemcpy(device_array_a, host_array_a, num_bytes, hipMemcpyHostToDevice);
const size_t num_launches = 1;
double average_seq_time;
struct timespec start, end;
std::cout << "Timing sequential implementation...";
if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
for(int i = 0; i < num_launches; i++){
vector_reduction_seq(host_array_a, host_c, num_elements);
}
if( clock_gettime( CLOCK_REALTIME, &end) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
float serialAns = host_c[0];
//compute the time in s
average_seq_time = ( end.tv_sec - start.tv_sec )
+ (double)( end.tv_nsec - start.tv_nsec ) / 1e+9;
//take the average
average_seq_time /= num_launches;
std::cout << " done." << std::endl;
std::cout << average_seq_time << "s" << std::endl;
// compute c = a + b on the device
const size_t block_size = BLOCK_SIZE;
size_t grid_size = num_elements / block_size;
// deal with a possible partial final block
if(num_elements % block_size) ++grid_size;
// time the kernel launches using CUDA events
hipEvent_t launch_begin, launch_end;
hipEventCreate(&launch_begin);
hipEventCreate(&launch_end);
float average_time_simple = 0.0;
std::cout << "Timing simple implementation...";
for(int i = 0; i < num_launches; ++i){
// record a CUDA event immediately before and after the kernel launch
hipEventRecord(launch_begin,0);
// launch the kernel
vector_reduction<<<grid_size, block_size>>>(device_array_a, device_c, num_elements);
hipEventRecord(launch_end,0);
hipEventSynchronize(launch_end);
float time = 0.0;
// measure the time (ms) spent in the kernel
hipEventElapsedTime(&time, launch_begin, launch_end);
average_time_simple += time;
}
// copy the result back to the host memory space
hipMemcpy(host_c, device_c, sizeof(float), hipMemcpyDeviceToHost);
std::cout << serialAns << " " << host_c[0] << std::endl;
if (serialAns != host_c[0]) return 0;
average_time_simple /= num_launches;
std::cout << " done." << std::endl;
std::cout << average_time_simple << "ms" << std::endl;
hipMemcpy(device_array_a, host_array_a, num_bytes, hipMemcpyHostToDevice);
host_c[0] = 0;
hipMemcpy(device_c, host_c, sizeof(float), hipMemcpyHostToDevice);
float average_time_shared = 0.0;
std::cout << "Timing shared implementation...";
for(int i = 0; i < num_launches; ++i){
// record a CUDA event immediately before and after the kernel launch
hipEventRecord(launch_begin,0);
// launch the kernel
vector_reduction_shared<<<grid_size, block_size/2, (block_size/2)*sizeof(float)>>>(device_array_a, device_c, num_elements);
hipEventRecord(launch_end,0);
hipEventSynchronize(launch_end);
float time = 0.0;
// measure the time (ms) spent in the kernel
hipEventElapsedTime(&time, launch_begin, launch_end);
average_time_shared += time;
}
// copy the result back to the host memory space
hipMemcpy(host_c, device_c, sizeof(float), hipMemcpyDeviceToHost);
std::cout << serialAns << " " << host_c[0] << std::endl;
if (serialAns != host_c[0]) return 0;
average_time_shared /= num_launches;
std::cout << " done." << std::endl;
std::cout << average_time_shared << "ms" << std::endl;
float num_ops=num_elements;
float seq_throughput = num_ops / (average_seq_time) / 1000000000.0f;
float simple_throughput = num_ops / (average_time_simple / 1000.0f) / 1000000000.0f;
float shared_throughput = num_ops / (average_time_shared / 1000.0f) / 1000000000.0f;
std::cout << "Throughput of sequential: " << seq_throughput << " GB/s" << std::endl;
std::cout << "Throughput of simple kernel: " << simple_throughput << " GB/s" << std::endl;
std::cout << "Simple performance improvement: " << simple_throughput / seq_throughput << "x" << std::endl;
std::cout << "Throughput of shared kernel: " << shared_throughput << " GB/s" << std::endl;
std::cout << "Shared performance improvement: " << shared_throughput / seq_throughput << "x" << std::endl;
std::cout << "Shared performance over simple improvement: " << shared_throughput / simple_throughput << "x" << std::endl;
hipEventDestroy(launch_begin);
hipEventDestroy(launch_end);
// deallocate memory
free(host_array_a);
free(host_c);
hipFree(device_array_a);
hipFree(device_c);
}
|
Convert the following CUDA code to AMD GPU code:
cuda
#include<stdio.h>
#define iteration_max 100
#define CEIL(a, b) (((a) + (b) - 1)/(b))
// Function using of the internet
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
if( cudaSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
/* Traz a GLOBAL para cá */
/* Calc_mandelbrot NVIDIA_CUDA */
__global__ void kernel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int *buffer){
int index_vector = (blockIdx.x * blockDim.x) + threadIdx.x;
int val_for_cal = buffer[index_vector];
int i = val_for_cal / width;
int j = val_for_cal % width;
float del_x = (max_real - min_real)/width;
float del_y= (max_imag - min_imag)/height;
int iteration = 0;
float x_point = min_real + del_x * i;
float y_point = max_imag - del_y * j;
float z_y = y_point;
float z_x = x_point;
float z_x2 = z_x * z_x;
float z_y2 = z_y * z_y;
for(iteration = 0; iteration < iteration_max && ((z_x2 + z_y2) <= 4); iteration++){
z_y = (2.0 * z_x * z_y)+ y_point;
z_x = z_x2 - z_y2 + x_point;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
}
buffer[index_vector] = iteration;
};
extern "C" void invoke_cuda(int tam_vetor, int width, int height, float *min_real, float *min_imag, float *max_real, float *max_imag, int threads, int *vetor){
int *d_vetor;
/* Aloco na placa de video */
checkCudaErrors(cudaMalloc(&d_vetor, tam_vetor*sizeof(int)));
checkCudaErrors(cudaMemcpy(d_vetor, vetor, tam_vetor*sizeof(int), cudaMemcpyHostToDevice));
/* Setar os bocos para trabalhar na placa de video */
int threads_per_block = threads;
int num_blocks = CEIL((tam_vetor), threads_per_block);
kernel<<<num_blocks, threads_per_block>>>(width, height, *min_real, *min_imag, *max_real, *max_imag, d_vetor);
checkCudaErrors(cudaDeviceSynchronize());
/* Pego o buffer da Placa de video e trago para o PC */
checkCudaErrors(cudaMemcpy(vetor, d_vetor, tam_vetor*sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_vetor));
}
|
#include <hip/hip_runtime.h>
#include<stdio.h>
#define iteration_max 100
#define CEIL(a, b) (((a) + (b) - 1)/(b))
// Function using of the internet
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( hipError_t err, const char *file, const int line )
{
if( hipSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
/* Traz a GLOBAL para cá */
/* Calc_mandelbrot NVIDIA_CUDA */
__global__ void kernel(int width, int height, float min_real, float min_imag, float max_real, float max_imag, int *buffer){
int index_vector = (blockIdx.x * blockDim.x) + threadIdx.x;
int val_for_cal = buffer[index_vector];
int i = val_for_cal / width;
int j = val_for_cal % width;
float del_x = (max_real - min_real)/width;
float del_y= (max_imag - min_imag)/height;
int iteration = 0;
float x_point = min_real + del_x * i;
float y_point = max_imag - del_y * j;
float z_y = y_point;
float z_x = x_point;
float z_x2 = z_x * z_x;
float z_y2 = z_y * z_y;
for(iteration = 0; iteration < iteration_max && ((z_x2 + z_y2) <= 4); iteration++){
z_y = (2.0 * z_x * z_y)+ y_point;
z_x = z_x2 - z_y2 + x_point;
z_x2 = z_x * z_x;
z_y2 = z_y * z_y;
}
buffer[index_vector] = iteration;
};
extern "C" void invoke_cuda(int tam_vetor, int width, int height, float *min_real, float *min_imag, float *max_real, float *max_imag, int threads, int *vetor){
int *d_vetor;
/* Aloco na placa de video */
checkCudaErrors(hipMalloc(&d_vetor, tam_vetor*sizeof(int)));
checkCudaErrors(hipMemcpy(d_vetor, vetor, tam_vetor*sizeof(int), hipMemcpyHostToDevice));
/* Setar os bocos para trabalhar na placa de video */
int threads_per_block = threads;
int num_blocks = CEIL((tam_vetor), threads_per_block);
kernel<<<num_blocks, threads_per_block>>>(width, height, *min_real, *min_imag, *max_real, *max_imag, d_vetor);
checkCudaErrors(hipDeviceSynchronize());
/* Pego o buffer da Placa de video e trago para o PC */
checkCudaErrors(hipMemcpy(vetor, d_vetor, tam_vetor*sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(d_vetor));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.