From 6667e478544ac7cb4df9d587a53003feb81839e3 Mon Sep 17 00:00:00 2001 From: Patrick Musau Date: Mon, 25 Oct 2021 17:07:06 -0500 Subject: [PATCH] it runs atleast --- pywrapper/setup.py | 22 ++++++++++++---------- pywrapper/test.py | 33 ++++++++++++++++----------------- 2 files changed, 28 insertions(+), 27 deletions(-) diff --git a/pywrapper/setup.py b/pywrapper/setup.py index 852f596..214e356 100644 --- a/pywrapper/setup.py +++ b/pywrapper/setup.py @@ -3,6 +3,7 @@ from Cython.Distutils import build_ext import numpy, os, platform, sys from os.path import join as pjoin +import six # Obtain the numpy include directory. This logic works across numpy versions. @@ -18,10 +19,10 @@ def check_for_flag(flag_str, truemsg=False, falsemsg=False): enabled = False if enabled and not truemsg == False: - print truemsg + print(truemsg) elif not enabled and not falsemsg == False: - print falsemsg - print " $ sudo "+flag_str+"=ON python setup.py install" + print(falsemsg) + print(" $ sudo "+flag_str+"=ON python setup.py install") return enabled use_cuda = check_for_flag("WITH_CUDA", \ @@ -31,9 +32,9 @@ def check_for_flag(flag_str, truemsg=False, falsemsg=False): "Compiling with trace enabled for Bresenham's Line", \ "Compiling without trace enabled for Bresenham's Line") -print -print "--------------" -print +print() +print("--------------") +print() # support for compiling in clang if platform.system().lower() == "darwin": @@ -62,10 +63,10 @@ def locate_cuda(): # print os.environ # first check if the CUDAHOME env variable is in use if os.path.isdir("/usr/local/cuda-7.5"): - home = "/usr/local/cuda-7.5" + home = "/usr/local/cuda-7.5" nvcc = pjoin(home, 'bin', 'nvcc') elif os.path.isdir("/usr/local/cuda"): - home = "/usr/local/cuda" + home = "/usr/local/cuda" nvcc = pjoin(home, 'bin', 'nvcc') elif 'CUDAHOME' in os.environ: home = os.environ['CUDAHOME'] @@ -81,7 +82,8 @@ def locate_cuda(): cudaconfig = {'home':home, 'nvcc':nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, 'lib64')} - for k, v in cudaconfig.iteritems(): + + for k, v in six.iteritems(cudaconfig): if not os.path.exists(v): raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v)) @@ -93,7 +95,7 @@ def locate_cuda(): # compiler_flags = ["-w","-std=c++11", "-march=native", "-ffast-math", "-fno-math-errno"] compiler_flags = ["-w","-std=c++11", "-march=native", "-ffast-math", "-fno-math-errno", "-O3"] -nvcc_flags = ['-arch=sm_20', '--ptxas-options=-v', '-c', '--compiler-options', "'-fPIC'", "-w","-std=c++11"] +nvcc_flags = ['-arch=sm_61', '--ptxas-options=-v', '-c', '--compiler-options', "'-fPIC'", "-w","-std=c++11"] include_dirs = ["../", numpy_include] depends = ["../includes/*.h"] sources = ["RangeLibc.pyx","../vendor/lodepng/lodepng.cpp"] diff --git a/pywrapper/test.py b/pywrapper/test.py index 12e995a..c48d12e 100644 --- a/pywrapper/test.py +++ b/pywrapper/test.py @@ -27,8 +27,8 @@ # print range_libc.USE_LRU_CACHE # print range_libc.LRU_CACHE_SIZE -# testMap = range_libc.PyOMap("../maps/basement_hallways_5cm.png",1) -testMap = range_libc.PyOMap("../maps/synthetic.map.png",1) +# testMap = range_libc.PyOMap(b"../maps/basement_hallways_5cm.png",1) +testMap = range_libc.PyOMap(b"../maps/synthetic.map.png",1) # testMap = range_libc.PyOMap("/home/racecar/racecar-ws/src/TA_examples/lab5/maps/basement.png",1) if testMap.error(): @@ -56,19 +56,19 @@ # make_scan(510,520,np.pi/2.0,61) -print "Init: bl" +print("Init: bl") bl = range_libc.PyBresenhamsLine(testMap, 500) -print "Init: rm" +print("Init: rm") rm = range_libc.PyRayMarching(testMap, 500) -print "Init: cddt" +print("Init: cddt") cddt = range_libc.PyCDDTCast(testMap, 500, 108) cddt.prune() -print "Init: glt" +print("Init: glt") glt = range_libc.PyGiantLUTCast(testMap, 500, 108) # this is for testing the amount of raw functional call overhead, does not compute ranges # null = range_libc.PyNull(testMap, 500, 108) -for x in xrange(10): +for x in range(10): vals = np.random.random((3,num_vals)).astype(np.float32) vals[0,:] *= (testMap.width() - 2.0) vals[1,:] *= (testMap.height() - 2.0) @@ -78,28 +78,27 @@ ranges = np.zeros(num_vals, dtype=np.float32) test_states = [None]*num_vals - for i in xrange(num_vals): + for i in range(num_vals): test_states[i] = (vals[0,i], vals[1,i], vals[2,i]) def bench(obj,name): - print "Running:", name + print("Running:", name) start = time.clock() obj.calc_range_many(vals, ranges) end = time.clock() dur_np = end - start - print ",,,"+name+" np: finished computing", ranges.shape[0], "ranges in", dur_np, "sec" + print(",,,"+name+" np: finished computing", ranges.shape[0], "ranges in", dur_np, "sec") start = time.clock() - ranges_slow = map(lambda x: obj.calc_range(*x), test_states) + ranges_slow = [*map(lambda x: obj.calc_range(*x), test_states)] end = time.clock() dur = end - start - - diff = np.linalg.norm(ranges - np.array(ranges_slow)) + diff = np.linalg.norm(np.array(ranges) - np.array(ranges_slow)) if diff > 0.001: - print ",,,"+"Numpy result different from slow result, investigation possibly required. norm:", diff + print(",,,"+"Numpy result different from slow result, investigation possibly required. norm:", diff) # print "DIFF:", diff - print ",,,"+name+": finished computing", ranges.shape[0], "ranges in", dur, "sec" - print ",,,"+"Numpy speedup:", dur/dur_np + print(",,,"+name+": finished computing", ranges.shape[0], "ranges in", dur, "sec") + print(",,,"+"Numpy speedup:", dur/dur_np) bench(bl, "bl") bench(rm, "rm") @@ -129,4 +128,4 @@ def bench(obj,name): # this is for testing the amount of raw functional call overhead, does not compute ranges # bench(null, "null") -print "DONE" \ No newline at end of file +print("DONE") \ No newline at end of file