Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 90 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,96 @@ CUDA Stream Compaction

**University of Pennsylvania, CIS 565: GPU Programming and Architecture, Project 2**

* (TODO) YOUR NAME HERE
* Tested on: (TODO) Windows 22, i7-2222 @ 2.22GHz 22GB, GTX 222 222MB (Moore 2222 Lab)
* Mohamed Soudy
* Tested on: Windows 10 Enterprise, i7 @ 2.7 GHz 16GB, GT 650M 1024 MB

### (TODO: Your README)
### Description

Include analysis, etc. (Remember, this is public, so don't put
anything here that you don't want to share with the world.)
An implementation of CPU and GPU compaction. Two versions of the Scan (Prefix Sum) algorithm were implemented for GPU compaction and compared against CPU and thrust implementations. These include naive and work-efficient implementations.

### Performance Analysis

The following tables show comparisons between CPU, naive, work-efficient and thrust implementations with arrays of up to approximately 1 million elements.

![](img/power2_table.png)

![](img/non_power2_table.png)

![](img/power2_chart.png)

![](img/non_power2_chart.png)

#### CPU vs Naive vs Work-Efficient

Surprisingly, the CPU implementation is much faster than all the other implementations. The work efficient implementation should be faster but this isn't the case mainly because in both the down sweep and up sweep kernels most of the threads are not being occupied because the thread indexes are skipped by powers of 2 depending on the depth. Therefore, the threads aren't being utilized efficiently causing it to be much slower than the CPU implementation.

#### Power of 2 vs Non Power of 2 Array Sizes

The performance between power of 2 array sizes and non power of 2 array sizes is very similar in all implementations except with thrust. When tested with power of 2 array size the thrust implementation is significantly slower than non power of 2 array size.

### Program Output

```
****************
** SCAN TESTS **
****************
[ 48 5 46 40 32 43 10 49 32 34 3 41 49 ... 38 0 ]
==== cpu scan, power-of-two ====
elapsed time: 0.00076ms (std::chrono Measured)
[ 0 48 53 99 139 171 214 224 273 305 339 342 383 ... 6200 6238 ]
==== cpu scan, non-power-of-two ====
elapsed time: 0.000761ms (std::chrono Measured)
[ 0 48 53 99 139 171 214 224 273 305 339 342 383 ... 6132 6147 ]
passed
==== naive scan, power-of-two ====
elapsed time: 0.037632ms (CUDA Measured)
[ 0 48 53 99 139 171 214 224 273 305 339 342 383 ... 6200 6238 ]
passed
==== naive scan, non-power-of-two ====
elapsed time: 0.0376ms (CUDA Measured)
[ 0 48 53 99 139 171 214 224 273 305 339 342 383 ... 6132 6147 ]
passed
==== work-efficient scan, power-of-two ====
elapsed time: 0.142432ms (CUDA Measured)
[ 0 48 53 99 139 171 214 224 273 305 339 342 383 ... 6200 6238 ]
passed
==== work-efficient scan, non-power-of-two ====
elapsed time: 0.14176ms (CUDA Measured)
[ 0 48 53 99 139 171 214 224 273 305 339 342 383 ... 6132 6147 ]
passed
==== thrust scan, power-of-two ====
elapsed time: 2.66266ms (CUDA Measured)
[ 0 48 53 99 139 171 214 224 273 305 339 342 383 ... 6200 6238 ]
passed
==== thrust scan, non-power-of-two ====
elapsed time: 0.020128ms (CUDA Measured)
[ 0 48 53 99 139 171 214 224 273 305 339 342 383 ... 6132 6147 ]
passed

*****************************
** STREAM COMPACTION TESTS **
*****************************
[ 0 3 2 2 0 1 0 3 2 0 3 3 3 ... 2 0 ]
==== cpu compact without scan, power-of-two ====
elapsed time: 0.00076ms (std::chrono Measured)
[ 3 2 2 1 3 2 3 3 3 1 2 3 1 ... 3 2 ]
passed
==== cpu compact without scan, non-power-of-two ====
elapsed time: 0.001141ms (std::chrono Measured)
[ 3 2 2 1 3 2 3 3 3 1 2 3 1 ... 1 3 ]
passed
==== cpu compact with scan ====
elapsed time: 0.0019ms (std::chrono Measured)
[ 3 2 2 1 3 2 3 3 3 1 2 3 1 ... 3 2 ]
passed
==== work-efficient compact, power-of-two ====
elapsed time: 0.519744ms (CUDA Measured)
[ 3 2 2 1 3 2 3 3 3 1 2 3 1 ... 3 2 ]
passed
==== work-efficient compact, non-power-of-two ====
elapsed time: 0.769056ms (CUDA Measured)
[ 3 2 2 1 3 2 3 3 3 1 2 3 1 ... 1 3 ]
passed
Press any key to continue . . .

```
Binary file added img/non_power2_chart.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added img/non_power2_table.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added img/power2_chart.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added img/power2_table.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
32 changes: 23 additions & 9 deletions src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,10 @@ int main(int argc, char* argv[]) {

genArray(SIZE - 1, a, 50); // Leave a 0 at the end to test that edge case
a[SIZE - 1] = 0;
//for (int i = 0; i < SIZE; i++) {
// a[i] = i;
//}

printArray(SIZE, a, true);

// initialize b using StreamCompaction::CPU::scan you implement
Expand All @@ -42,49 +46,57 @@ int main(int argc, char* argv[]) {
printDesc("cpu scan, non-power-of-two");
StreamCompaction::CPU::scan(NPOT, c, a);
printElapsedTime(StreamCompaction::CPU::timer().getCpuElapsedTimeForPreviousOperation(), "(std::chrono Measured)");
printArray(NPOT, b, true);
printArray(NPOT, c, true);
printCmpResult(NPOT, b, c);

zeroArray(SIZE, c);
printDesc("naive scan, power-of-two");
StreamCompaction::Naive::scan(SIZE, c, a);
printElapsedTime(StreamCompaction::Naive::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)");
//printArray(SIZE, c, true);
printArray(SIZE, c, true);
printCmpResult(SIZE, b, c);

zeroArray(SIZE, c);
printDesc("naive scan, non-power-of-two");
StreamCompaction::Naive::scan(NPOT, c, a);
printElapsedTime(StreamCompaction::Naive::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)");
//printArray(SIZE, c, true);
printArray(NPOT, c, true);
printCmpResult(NPOT, b, c);

zeroArray(SIZE, c);
printDesc("work-efficient scan, power-of-two");
StreamCompaction::Efficient::scan(SIZE, c, a);
printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)");
//printArray(SIZE, c, true);
//printArray(SIZE, a, true);
//printArray(SIZE, b, true);
printArray(SIZE, c, true);
printCmpResult(SIZE, b, c);

zeroArray(SIZE, c);
printDesc("work-efficient scan, non-power-of-two");
StreamCompaction::Efficient::scan(NPOT, c, a);
printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)");
//printArray(NPOT, c, true);
//printArray(NPOT, a, true);
//printArray(NPOT, b, true);
printArray(NPOT, c, true);
printCmpResult(NPOT, b, c);

zeroArray(SIZE, c);
printDesc("thrust scan, power-of-two");
StreamCompaction::Thrust::scan(SIZE, c, a);
printElapsedTime(StreamCompaction::Thrust::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)");
//printArray(SIZE, c, true);
//printArray(SIZE, a, true);
//printArray(SIZE, b, true);
printArray(SIZE, c, true);
printCmpResult(SIZE, b, c);

zeroArray(SIZE, c);
printDesc("thrust scan, non-power-of-two");
StreamCompaction::Thrust::scan(NPOT, c, a);
printElapsedTime(StreamCompaction::Thrust::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)");
//printArray(NPOT, c, true);
//printArray(NPOT, a, true);
//printArray(NPOT, b, true);
printArray(NPOT, c, true);
printCmpResult(NPOT, b, c);

printf("\n");
Expand Down Expand Up @@ -129,14 +141,16 @@ int main(int argc, char* argv[]) {
printDesc("work-efficient compact, power-of-two");
count = StreamCompaction::Efficient::compact(SIZE, c, a);
printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)");
//printArray(count, c, true);
//printArray(count, a, true);
//printArray(count, b, true);
printArray(count, c, true);
printCmpLenResult(count, expectedCount, b, c);

zeroArray(SIZE, c);
printDesc("work-efficient compact, non-power-of-two");
count = StreamCompaction::Efficient::compact(NPOT, c, a);
printElapsedTime(StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation(), "(CUDA Measured)");
//printArray(count, c, true);
printArray(count, c, true);
printCmpLenResult(count, expectedNPOT, b, c);

system("pause"); // stop Win32 console from closing on exit
Expand Down
9 changes: 7 additions & 2 deletions stream_compaction/common.cu
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,9 @@ namespace StreamCompaction {
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n) return;
bools[index] = idata[index] == 0 ? 0 : 1;
}

/**
Expand All @@ -32,7 +34,10 @@ namespace StreamCompaction {
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n) return;
if (bools[index] == 1)
odata[indices[index]] = idata[index];
}

}
Expand Down
55 changes: 42 additions & 13 deletions stream_compaction/cpu.cu
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
#include <cstdio>
#include "cpu.h"

#include "common.h"
#include "common.h"

namespace StreamCompaction {
namespace CPU {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
using StreamCompaction::Common::PerformanceTimer;
bool compactTest = false;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}

/**
Expand All @@ -18,9 +19,11 @@ namespace StreamCompaction {
* (Optional) For better understanding before starting moving to GPU, you can simulate your GPU scan in this function first.
*/
void scan(int n, int *odata, const int *idata) {
timer().startCpuTimer();
// TODO
timer().endCpuTimer();
if (!compactTest) timer().startCpuTimer();
odata[0] = 0;
for (size_t k = 1; k < n; ++k)
odata[k] = odata[k - 1] + idata[k-1];
if (!compactTest) timer().endCpuTimer();
}

/**
Expand All @@ -30,9 +33,12 @@ namespace StreamCompaction {
*/
int compactWithoutScan(int n, int *odata, const int *idata) {
timer().startCpuTimer();
// TODO
int odataIdx = 0;
for (size_t k = 0; k < n; ++k)
if (idata[k] != 0)
odata[odataIdx++] = idata[k];
timer().endCpuTimer();
return -1;
return odataIdx;
}

/**
Expand All @@ -41,10 +47,33 @@ namespace StreamCompaction {
* @returns the number of elements remaining after compaction.
*/
int compactWithScan(int n, int *odata, const int *idata) {
compactTest = true;
int* tdata = new int[n];
int* sdata = new int[n];

timer().startCpuTimer();
// TODO

for (size_t k = 0; k < n; ++k)
if (idata[k] != 0)
tdata[k] = 1;
else
tdata[k] = 0;

scan(n, sdata, tdata);

int sdataLastIdx = 0;
for (size_t k = 0; k < n; ++k) {
if (tdata[k] == 1) {
odata[sdata[k]] = idata[k];
sdataLastIdx = sdata[k];
}
}

timer().endCpuTimer();
return -1;

compactTest = false;
delete[] tdata, sdata;
return sdataLastIdx+1;
}
}
}
Loading