-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathadd_array_thread.cu
More file actions
59 lines (47 loc) · 1.25 KB
/
add_array_thread.cu
File metadata and controls
59 lines (47 loc) · 1.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#include <stdio.h>
#include <cuda_runtime.h>
#define N 64
__global__ void add(int *a, int *b, int *c)
{
int idx = threadIdx.x;
if (idx < N)
{
c[idx] = a[idx] + b[idx];
}
}
int main()
{
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
// Allocate memory space for host a, b, and c
a = (int *)malloc(N*sizeof(int));
b = (int *)malloc(N*sizeof(int));
c = (int *)malloc(N*sizeof(int));
// Allocate memory space for device copies of a, b, and c
cudaMalloc((void **) &dev_a, N*sizeof(int));
cudaMalloc((void **) &dev_b, N*sizeof(int));
cudaMalloc((void **) &dev_c, N*sizeof(int));
// Fill host Arrays
for (int i = 0; i < N; i++)
{
a[i] = i;
b[i] = 1;
}
// Copy from host to device
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
// Launch the add() kernel on GPU
add<<<1,N>>>(dev_a, dev_b, dev_c); // 1 block of N threads
// Copy result back to host
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// Print the result
for (int i = 0; i < N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
return 0;
}