diff --git a/.gitignore b/.gitignore index 82b8e5c..1797505 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,8 @@ **__pycache__ **.swp **.pyc +**.DS_Store +uv.lock artifacts build devel diff --git a/kamera/postflight/dat_to_csv.py b/kamera/postflight/dat_to_csv.py index 09ea121..b3c9e96 100644 --- a/kamera/postflight/dat_to_csv.py +++ b/kamera/postflight/dat_to_csv.py @@ -417,8 +417,11 @@ def parse_gsof_stream(buf): def maybe_gsof(buf): # type: (bytes) -> bool - if struct.unpack("B", buf[0:1])[0] == START_TX: - return True + try: + if struct.unpack('B', buf[0:1])[0] == START_TX: + return True + except: + pass return False diff --git a/src/cams/phase_one/CMakeLists.txt b/src/cams/phase_one/CMakeLists.txt index de5db34..96e2189 100644 --- a/src/cams/phase_one/CMakeLists.txt +++ b/src/cams/phase_one/CMakeLists.txt @@ -71,6 +71,56 @@ find_package(catkin REQUIRED COMPONENTS ## System dependencies are found with CMake's conventions find_package(Boost REQUIRED COMPONENTS system) +## Find CUDA +find_package(CUDA QUIET) +if(CUDA_FOUND) + enable_language(CUDA) + message(STATUS "CUDA found: ${CUDA_VERSION}") + message(STATUS "CUDA libraries: ${CUDA_LIBRARIES}") +else() + message(WARNING "CUDA not found - nvjpeg will not be available") +endif() + +## Find nvjpeg (part of CUDA Toolkit) +if(CUDA_FOUND) + find_library(NVJPEG_LIBRARY + NAMES nvjpeg + PATHS + ${CUDA_TOOLKIT_ROOT_DIR}/lib64 + ${CUDA_TOOLKIT_ROOT_DIR}/lib + /usr/local/cuda/lib64 + /usr/local/cuda/lib + NO_DEFAULT_PATH + ) + + if(NVJPEG_LIBRARY) + message(STATUS "nvjpeg library found: ${NVJPEG_LIBRARY}") + set(NVJPEG_FOUND TRUE) + else() + message(WARNING "nvjpeg library not found - nvjpeg encoding will not be available") + set(NVJPEG_FOUND FALSE) + endif() + + # Find nvjpeg header + find_path(NVJPEG_INCLUDE_DIR + NAMES nvjpeg.h + PATHS + ${CUDA_TOOLKIT_ROOT_DIR}/include + /usr/local/cuda/include + NO_DEFAULT_PATH + ) + + if(NVJPEG_INCLUDE_DIR) + message(STATUS "nvjpeg headers found: ${NVJPEG_INCLUDE_DIR}") + add_definitions(-DHAVE_NVJPEG) + else() + message(WARNING "nvjpeg headers not found") + set(NVJPEG_FOUND FALSE) + endif() +else() + set(NVJPEG_FOUND FALSE) +endif() + ## Uncomment this if the package has a setup.py. This macro ensures ## modules and global scripts declared therein get installed @@ -174,6 +224,9 @@ catkin_package( ## Specify additional locations of header files ## Your package locations should be listed before other locations include_directories(include ${Boost_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} ${catkin_INCLUDE_DIRS}) +if(NVJPEG_FOUND AND NVJPEG_INCLUDE_DIR) + include_directories(${NVJPEG_INCLUDE_DIR} ${CUDA_INCLUDE_DIRS}) +endif() ## Declare a C++ library # add_library(${PROJECT_NAME} @@ -192,7 +245,8 @@ set(SRC # build and install the nodelet -add_library(phase_one ${SRC}) +add_library(${PROJECT_NAME} ${SRC}) +add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) ## Specify libraries to link a library or executable target against target_link_libraries(phase_one PRIVATE @@ -201,6 +255,13 @@ target_link_libraries(phase_one PRIVATE CameraSDK::CameraSdkCpp ImageSDK::ImageSdkCpp ) +if(NVJPEG_FOUND) + target_link_libraries(phase_one PRIVATE + ${NVJPEG_LIBRARY} + ${CUDA_LIBRARIES} + ${CUDA_CUDART_LIBRARY} + ) +endif() ## Declare a C++ executable ## With catkin_make all packages are built within a single CMake context @@ -218,6 +279,13 @@ target_link_libraries(phase_one_standalone PRIVATE CameraSDK::CameraSdkCpp ImageSDK::ImageSdkCpp ) +if(NVJPEG_FOUND) + target_link_libraries(phase_one_standalone PRIVATE + ${NVJPEG_LIBRARY} + ${CUDA_LIBRARIES} + ${CUDA_CUDART_LIBRARY} + ) +endif() ## Rename C++ executable without prefix ## The above recommended prefix causes long target names, the following renames the diff --git a/src/cams/phase_one/include/phase_one/phase_one.h b/src/cams/phase_one/include/phase_one/phase_one.h index 7c3669a..392ccab 100644 --- a/src/cams/phase_one/include/phase_one/phase_one.h +++ b/src/cams/phase_one/include/phase_one/phase_one.h @@ -74,6 +74,11 @@ namespace phase_one const std::string &filename, std::string format); + // Compress JPEG using nvjpeg (GPU-accelerated) + bool compressJpegNvjpeg(const cv::Mat& bgr_image, + std::vector& output, + int quality = 90); + // Fill in the entries of the maps 'property_to_id_' and 'property_to_type_' given // a 'camera' handle. These maps define the values used for setting/getting the // parameters in the ROS service calls @@ -135,6 +140,8 @@ namespace phase_one std::string effort; std::string project; std::string base_dir; + std::string to_process_filename_; + std::string processed_filename_; double auto_trigger_rate_; int num_threads_; // Internal data structures / sync structures diff --git a/src/cams/phase_one/src/phase_one_standalone.cpp b/src/cams/phase_one/src/phase_one_standalone.cpp index 8724bd1..44d1060 100755 --- a/src/cams/phase_one/src/phase_one_standalone.cpp +++ b/src/cams/phase_one/src/phase_one_standalone.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include #include @@ -41,9 +42,198 @@ #include #include +#ifdef HAVE_NVJPEG +#include +#include +#include +#endif +#ifdef HAVE_NVJPEG namespace phase_one { + // Helper function to convert nvjpeg status to string + const char* nvjpeg_error_string(nvjpegStatus_t s) { + switch (s) { + case NVJPEG_STATUS_SUCCESS: return "NVJPEG_STATUS_SUCCESS"; + case NVJPEG_STATUS_NOT_INITIALIZED: return "NVJPEG_STATUS_NOT_INITIALIZED"; + case NVJPEG_STATUS_INVALID_PARAMETER: return "NVJPEG_STATUS_INVALID_PARAMETER"; + case NVJPEG_STATUS_BAD_JPEG: return "NVJPEG_STATUS_BAD_JPEG"; + case NVJPEG_STATUS_JPEG_NOT_SUPPORTED: return "NVJPEG_STATUS_JPEG_NOT_SUPPORTED"; + case NVJPEG_STATUS_ALLOCATOR_FAILURE: return "NVJPEG_STATUS_ALLOCATOR_FAILURE"; + case NVJPEG_STATUS_EXECUTION_FAILED: return "NVJPEG_STATUS_EXECUTION_FAILED"; + case NVJPEG_STATUS_ARCH_MISMATCH: return "NVJPEG_STATUS_ARCH_MISMATCH"; + case NVJPEG_STATUS_INTERNAL_ERROR: return "NVJPEG_STATUS_INTERNAL_ERROR"; + default: return "NVJPEG_STATUS_UNKNOWN"; + } + } + + // Helper class for nvjpeg encoding + class NvJpegEncoder { + public: + NvJpegEncoder(int width, int height, int quality = 90) + : width_(width), height_(height), quality_(quality) + { + // Create nvJPEG handle (try hardware backend first, fall back to default) + nvjpegStatus_t st = nvjpegCreateEx( + NVJPEG_BACKEND_HARDWARE, + nullptr, nullptr, 0, &handle_); + if (st == NVJPEG_STATUS_ARCH_MISMATCH) { + st = nvjpegCreateEx( + NVJPEG_BACKEND_DEFAULT, + nullptr, nullptr, 0, &handle_); + } + if (st != NVJPEG_STATUS_SUCCESS) { + ROS_ERROR("Failed to create nvJPEG handle: %s", nvjpeg_error_string(st)); + handle_ = nullptr; + return; + } + + nvjpegStatus_t enc_st = nvjpegEncoderStateCreate(handle_, &encState_, nullptr); + if (enc_st != NVJPEG_STATUS_SUCCESS) { + ROS_ERROR("Failed to create encoder state: %s", nvjpeg_error_string(enc_st)); + nvjpegDestroy(handle_); + handle_ = nullptr; + return; + } + + enc_st = nvjpegEncoderParamsCreate(handle_, &encParams_, nullptr); + if (enc_st != NVJPEG_STATUS_SUCCESS) { + ROS_ERROR("Failed to create encoder params: %s", nvjpeg_error_string(enc_st)); + nvjpegEncoderStateDestroy(encState_); + nvjpegDestroy(handle_); + handle_ = nullptr; + return; + } + + nvjpegEncoderParamsSetQuality(encParams_, quality_, nullptr); + nvjpegEncoderParamsSetOptimizedHuffman(encParams_, 1, nullptr); + nvjpegEncoderParamsSetSamplingFactors( + encParams_, NVJPEG_CSS_420, nullptr); + nvjpegEncoderParamsSetEncoding( + encParams_, NVJPEG_ENCODING_BASELINE_DCT, nullptr); + + // Allocate a single GPU buffer large enough for interleaved BGR + cudaError_t cuda_st = cudaMallocPitch( + (void**)&d_img_, &pitch_, + width_ * 3 * sizeof(unsigned char), + height_); + if (cuda_st != cudaSuccess) { + ROS_ERROR("Failed to allocate CUDA memory: %s", cudaGetErrorString(cuda_st)); + nvjpegEncoderParamsDestroy(encParams_); + nvjpegEncoderStateDestroy(encState_); + nvjpegDestroy(handle_); + handle_ = nullptr; + return; + } + + std::memset(&nvImage_, 0, sizeof(nvImage_)); + nvImage_.channel[0] = d_img_; + nvImage_.pitch[0] = static_cast(pitch_); + + input_format_ = NVJPEG_INPUT_BGRI; + + cuda_st = cudaStreamCreate(&stream_); + if (cuda_st != cudaSuccess) { + ROS_ERROR("Failed to create CUDA stream: %s", cudaGetErrorString(cuda_st)); + cudaFree(d_img_); + nvjpegEncoderParamsDestroy(encParams_); + nvjpegEncoderStateDestroy(encState_); + nvjpegDestroy(handle_); + handle_ = nullptr; + return; + } + } + + ~NvJpegEncoder() + { + if (d_img_) cudaFree(d_img_); + if (stream_) cudaStreamDestroy(stream_); + if (encParams_) nvjpegEncoderParamsDestroy(encParams_); + if (encState_) nvjpegEncoderStateDestroy(encState_); + if (handle_) nvjpegDestroy(handle_); + } + + bool encode(const cv::Mat& bgr, std::vector& output) + { + if (!handle_) { + ROS_ERROR("NvJpegEncoder: handle not initialized"); + return false; + } + + if (bgr.cols != width_ || bgr.rows != height_ || bgr.type() != CV_8UC3) { + ROS_ERROR("NvJpegEncoder: input size/type mismatch. Expected %dx%d CV_8UC3, got %dx%d type %d", + width_, height_, bgr.cols, bgr.rows, bgr.type()); + return false; + } + + const unsigned char* h_bgr = bgr.ptr(0); + + // Upload + cudaError_t cuda_st = cudaMemcpy2DAsync( + d_img_, pitch_, + h_bgr, width_ * 3 * sizeof(unsigned char), + width_ * 3 * sizeof(unsigned char), + height_, + cudaMemcpyHostToDevice, + stream_); + if (cuda_st != cudaSuccess) { + ROS_ERROR("Failed to copy to device: %s", cudaGetErrorString(cuda_st)); + return false; + } + + // Encode + nvjpegStatus_t st = nvjpegEncodeImage( + handle_, encState_, encParams_, + &nvImage_, input_format_, + width_, height_, stream_); + if (st != NVJPEG_STATUS_SUCCESS) { + ROS_ERROR("Failed to encode image: %s", nvjpeg_error_string(st)); + return false; + } + + cudaStreamSynchronize(stream_); + + // Retrieve bitstream + size_t jpegSize = 0; + st = nvjpegEncodeRetrieveBitstream( + handle_, encState_, nullptr, &jpegSize, stream_); + if (st != NVJPEG_STATUS_SUCCESS) { + ROS_ERROR("Failed to retrieve bitstream size: %s", nvjpeg_error_string(st)); + return false; + } + cudaStreamSynchronize(stream_); + + output.resize(jpegSize); + st = nvjpegEncodeRetrieveBitstream( + handle_, encState_, output.data(), &jpegSize, stream_); + if (st != NVJPEG_STATUS_SUCCESS) { + ROS_ERROR("Failed to retrieve bitstream: %s", nvjpeg_error_string(st)); + return false; + } + cudaStreamSynchronize(stream_); + + return true; + } + + bool isValid() const { return handle_ != nullptr; } + + private: + int width_; + int height_; + int quality_; + + nvjpegHandle_t handle_ = nullptr; + nvjpegEncoderState_t encState_ = nullptr; + nvjpegEncoderParams_t encParams_ = nullptr; + nvjpegImage_t nvImage_{}; + + unsigned char* d_img_ = nullptr; + size_t pitch_ = 0; + nvjpegInputFormat_t input_format_; + cudaStream_t stream_ = nullptr; + }; +#endif // HAVE_NVJPEG + PhaseOne::PhaseOne() { } @@ -127,29 +317,47 @@ namespace phase_one static double min_image_delay = 0.95; // should depend on exposure time event_cache.set_delay(min_image_delay); event_cache.set_tolerance(ros::Duration(0.49)); - std::string to_process_filename = "/mnt/data/to_process.txt"; - std::string processed_filename = "/mnt/data/processed.txt"; - std::vector to_process_images = loadFile(to_process_filename); - std::vector processed_images = loadFile(processed_filename); + to_process_filename_ = base_dir + "/to_process.txt"; + processed_filename_ = base_dir + "/processed.txt"; + std::vector to_process_images = loadFile(to_process_filename_); + std::vector processed_images = loadFile(processed_filename_); // write out any files that are left, as well as add to queue std::ofstream write_intersection; - write_intersection.open(to_process_filename); - for (auto& toProcess : to_process_images) { - if (std::find(processed_images.begin(), processed_images.end(), - toProcess) != processed_images.end()) { - } else { - write_intersection << toProcess << std::endl; - // sequence doesn't matter if we've cycled, just set to 0 - filename_to_seq_map_[toProcess] = 0; - total_counter++; + write_intersection.open(to_process_filename_); + if (!write_intersection.is_open()) { + ROS_ERROR("Failed to open %s for writing. Check disk space and permissions.", to_process_filename_.c_str()); + } else { + for (auto& toProcess : to_process_images) { + if (std::find(processed_images.begin(), processed_images.end(), + toProcess) != processed_images.end()) { + } else { + write_intersection << toProcess << std::endl; + if (write_intersection.fail()) { + ROS_ERROR("Failed to write to %s. Check disk space.", to_process_filename_.c_str()); + break; + } + // sequence doesn't matter if we've cycled, just set to 0 + filename_to_seq_map_[toProcess] = 0; + total_counter++; + } } + write_intersection.flush(); + if (write_intersection.fail()) { + ROS_ERROR("Failed to flush %s. Data may not be written to disk.", to_process_filename_.c_str()); + } + write_intersection.close(); } - write_intersection.close(); // Just delete processed image file - remove(processed_filename.c_str()); + remove(processed_filename_.c_str()); // Append new entries to cache - to_process_out.open(to_process_filename, std::ios_base::app); - processed_out.open(processed_filename, std::ios_base::app); + to_process_out.open(to_process_filename_, std::ios_base::app); + if (!to_process_out.is_open()) { + ROS_ERROR("Failed to open %s in append mode. Check disk space and permissions.", to_process_filename_.c_str()); + } + processed_out.open(processed_filename_, std::ios_base::app); + if (!processed_out.is_open()) { + ROS_ERROR("Failed to open %s in append mode. Check disk space and permissions.", processed_filename_.c_str()); + } // Throw results into redis for access std::string base = "/sys/" + hostname + "/p1debayerq/"; @@ -323,10 +531,29 @@ namespace phase_one std::lock_guard lock(mtx); filename_to_seq_map_[fname] = seq; // write out every file received here, reduce set size with processed_out - to_process_out << fname << std::endl; + if (!to_process_out.is_open()) { + ROS_ERROR("|CAPTURE| to_process_out stream is not open. Attempting to reopen."); + to_process_out.open(to_process_filename_, std::ios_base::app); + if (!to_process_out.is_open()) { + ROS_ERROR("|CAPTURE| Failed to reopen %s. Check disk space and mount.", to_process_filename_.c_str()); + } + } + if (to_process_out.is_open()) { + to_process_out << fname << std::endl; + if (to_process_out.fail()) { + ROS_ERROR("|CAPTURE| Failed to write to %s. Check disk space.", to_process_filename_.c_str()); + } else { + to_process_out.flush(); + if (to_process_out.fail()) { + ROS_ERROR("|CAPTURE| Failed to flush %s. Data may not be written to disk.", to_process_filename_.c_str()); + } + } + } total_counter++; } catch (boost::filesystem::filesystem_error &e) { ROS_ERROR("|CAPTURE| Archive Failed [%d]: %s", e.code().value(), e.what()); + } catch (const std::ios_base::failure& e) { + ROS_ERROR("|CAPTURE| I/O error writing IIQ file: %s", e.what()); } } @@ -351,11 +578,11 @@ namespace phase_one } catch (...) { ROS_ERROR("|CAPTURE| Failed to convert preview."); }; - - // grab all tags for image, and shove into frame ID. faster + + // grab all tags for image, and shove into frame ID. faster // than adding to EXIF, can do that later auto ticid = std::chrono::high_resolution_clock::now(); - + P1::ImageSdk::ImageTag tag; std::stringstream frame_id; tag = raw_img.GetTag(ids.Make); @@ -385,7 +612,7 @@ namespace phase_one auto tocid = std::chrono::high_resolution_clock::now(); auto dtid = tocid - ticid; ROS_INFO_STREAM("|CAPTURE| adding tags time: " << dtid.count() / 1e9 << "s"); - + output_msg.header.frame_id = frame_id.str(); image_pub.publish(output_msg); stat_pub_.publish(stat_msg); @@ -527,7 +754,24 @@ namespace phase_one if ( success ) { // delete IIQ file that's been processed remove(filename_iiq.c_str()); - processed_out << filename_iiq << std::endl; + if (!processed_out.is_open()) { + ROS_ERROR("|DEMOSAIC| processed_out stream is not open. Attempting to reopen."); + processed_out.open(processed_filename_, std::ios_base::app); + if (!processed_out.is_open()) { + ROS_ERROR("|DEMOSAIC| Failed to reopen %s. Check disk space and mount.", processed_filename_.c_str()); + } + } + if (processed_out.is_open()) { + processed_out << filename_iiq << std::endl; + if (processed_out.fail()) { + ROS_ERROR("|DEMOSAIC| Failed to write to %s. Check disk space.", processed_filename_.c_str()); + } else { + processed_out.flush(); + if (processed_out.fail()) { + ROS_ERROR("|DEMOSAIC| Failed to flush %s. Data may not be written to disk.", processed_filename_.c_str()); + } + } + } processed_counter++; } else { ROS_ERROR_STREAM("IIQ File: " << filename_iiq << " failed to save to disk as jpg."); @@ -547,6 +791,44 @@ namespace phase_one return 0; }; + bool PhaseOne::compressJpegNvjpeg(const cv::Mat& bgr_image, + std::vector& output, + int quality) + { +#ifdef HAVE_NVJPEG + try { + if (bgr_image.empty() || bgr_image.type() != CV_8UC3) { + ROS_ERROR("compressJpegNvjpeg: Invalid input image (empty or wrong type)"); + return false; + } + + // Create encoder for this image size + NvJpegEncoder encoder(bgr_image.cols, bgr_image.rows, quality); + if (!encoder.isValid()) { + ROS_ERROR("compressJpegNvjpeg: Failed to create nvjpeg encoder"); + return false; + } + + // Encode the image + if (!encoder.encode(bgr_image, output)) { + ROS_ERROR("compressJpegNvjpeg: Failed to encode image"); + return false; + } + + return true; + } catch (const std::exception& e) { + ROS_ERROR_STREAM("compressJpegNvjpeg: Exception: " << e.what()); + return false; + } catch (...) { + ROS_ERROR("compressJpegNvjpeg: Unknown exception"); + return false; + } +#else + ROS_ERROR("compressJpegNvjpeg: nvjpeg not available (compiled without HAVE_NVJPEG)"); + return false; +#endif + } + bool PhaseOne::dumpImage(P1::ImageSdk::RawImage rawImage, P1::ImageSdk::BitmapImage bitmap, const std::string &filename, @@ -561,9 +843,40 @@ namespace phase_one jpegConfig.quality = quality; auto ticj = std::chrono::high_resolution_clock::now(); int use_p1 = std::stoi(envoy_->get("/sys/arch/use_p1jpeg")); + int use_nvjpeg = std::stoi(envoy_->get("/sys/arch/use_nvjpeg")); + // This function for some reason takes ~5x as long if ( use_p1 == 1 ) { P1::ImageSdk::JpegWriter(filename, bitmap, rawImage, jpegConfig); + } else if ( use_nvjpeg == 1 ) { + // Use nvjpeg for GPU-accelerated encoding + cv::Mat cvImage_raw = cv::Mat(cv::Size( + bitmap.Width(), bitmap.Height()), CV_8UC3); + cvImage_raw.data = bitmap.Data().get(); + cv::Mat cvImage = cv::Mat(cv::Size( + bitmap.Width(), bitmap.Height()), CV_8UC3); + cv::cvtColor(cvImage_raw, cvImage, cv::COLOR_BGR2RGB); + + std::vector jpeg_buffer; + if (compressJpegNvjpeg(cvImage, jpeg_buffer, quality)) { + // Write the compressed JPEG to file + std::ofstream out_file(filename, std::ios::binary); + if (out_file.is_open()) { + out_file.write(reinterpret_cast(jpeg_buffer.data()), + jpeg_buffer.size()); + out_file.close(); + } else { + ROS_ERROR("|DUMP| Failed to open file for writing: %s", filename.c_str()); + return false; + } + } else { + ROS_ERROR("|DUMP| nvjpeg compression failed, falling back to OpenCV"); + // Fallback to OpenCV + std::vector compression_params; + compression_params.push_back(cv::IMWRITE_JPEG_QUALITY); + compression_params.push_back(quality); + cv::imwrite(filename, cvImage, compression_params); + } } else { cv::Mat cvImage_raw = cv::Mat(cv::Size( bitmap.Width(), bitmap.Height()), CV_8UC3); @@ -785,14 +1098,36 @@ namespace phase_one output_msg.format = "jpg"; output_msg.header = header; int quality = std::stoi(envoy_->get("/sys/arch/jpg/quality")); - std::vector compression_params; - compression_params.push_back(cv::IMWRITE_JPEG_QUALITY); - compression_params.push_back(quality); + int use_nvjpeg = std::stoi(envoy_->get("/sys/arch/use_nvjpeg")); + std::vector buffer; - int MB = 1024 * 1024; - buffer.resize(100 * MB); - ROS_INFO("Calling imencode"); - cv::imencode(".jpg", cvImage_raw, buffer, compression_params); + if (use_nvjpeg == 1) { + // Use nvjpeg for GPU-accelerated encoding + ROS_INFO("Calling nvjpeg encode"); + std::vector nvjpeg_buffer; + if (compressJpegNvjpeg(cvImage_raw, nvjpeg_buffer, quality)) { + buffer.assign(nvjpeg_buffer.begin(), nvjpeg_buffer.end()); + ROS_INFO("nvjpeg encode successful, size: %zu bytes", buffer.size()); + } else { + ROS_WARN("nvjpeg encode failed, falling back to OpenCV"); + // Fallback to OpenCV + int MB = 1024 * 1024; + buffer.resize(100 * MB); + std::vector compression_params; + compression_params.push_back(cv::IMWRITE_JPEG_QUALITY); + compression_params.push_back(quality); + cv::imencode(".jpg", cvImage_raw, buffer, compression_params); + } + } else { + // Use OpenCV imencode + int MB = 1024 * 1024; + buffer.resize(100 * MB); + std::vector compression_params; + compression_params.push_back(cv::IMWRITE_JPEG_QUALITY); + compression_params.push_back(quality); + ROS_INFO("Calling imencode"); + cv::imencode(".jpg", cvImage_raw, buffer, compression_params); + } ROS_INFO("Calling tocompressimage"); output_msg.data = buffer; img_bridge.toCompressedImageMsg(output_msg); @@ -1016,7 +1351,24 @@ namespace phase_one if ( count > 0 ) { // add to 'processed' file so that it won't attempt to be processed // after a reboot - processed_out << filename_iiq << std::endl; + if (!processed_out.is_open()) { + ROS_ERROR("|DETECTION| processed_out stream is not open. Attempting to reopen."); + processed_out.open(processed_filename_, std::ios_base::app); + if (!processed_out.is_open()) { + ROS_ERROR("|DETECTION| Failed to reopen %s. Check disk space and mount.", processed_filename_.c_str()); + } + } + if (processed_out.is_open()) { + processed_out << filename_iiq << std::endl; + if (processed_out.fail()) { + ROS_ERROR("|DETECTION| Failed to write to %s. Check disk space.", processed_filename_.c_str()); + } else { + processed_out.flush(); + if (processed_out.fail()) { + ROS_ERROR("|DETECTION| Failed to flush %s. Data may not be written to disk.", processed_filename_.c_str()); + } + } + } processed_counter++; // then remove IIQ file remove(filename_iiq.c_str()); @@ -1031,7 +1383,7 @@ namespace phase_one } else { ROS_WARN("Detections were found on an image that does not exist locally."); } - } + } lock.unlock(); // "touch" jpeg file, so timestamp still exists std::ofstream output(fname); diff --git a/src/process/view_server/src/view_server/image_view_server.py b/src/process/view_server/src/view_server/image_view_server.py index 70d546b..d3dcc4b 100755 --- a/src/process/view_server/src/view_server/image_view_server.py +++ b/src/process/view_server/src/view_server/image_view_server.py @@ -56,27 +56,35 @@ # Kamera Imports from custom_msgs.msg import SynchronizedImages, SyncedPathImages -from custom_msgs.srv import RequestImageMetadata, RequestCompressedImageView, \ - RequestImageView -from nexus.pathimg_bridge import PathImgBridge, ExtendedBridge, coerce_message, InMemBridge +from custom_msgs.srv import ( + RequestImageMetadata, + RequestCompressedImageView, + RequestImageView, +) +from nexus.pathimg_bridge import ( + PathImgBridge, + ExtendedBridge, + coerce_message, + InMemBridge, +) from view_server.img_nexus import Nexus -MEM_TRANSPORT_DIR = os.environ.get('MEM_TRANSPORT_DIR', False) -MEM_TRANSPORT_NS = os.environ.get('MEM_TRANSPORT_NS', False) +MEM_TRANSPORT_DIR = os.environ.get("MEM_TRANSPORT_DIR", False) +MEM_TRANSPORT_NS = os.environ.get("MEM_TRANSPORT_NS", False) if MEM_TRANSPORT_DIR: - print('MEM_TRANSPORT_DIR', MEM_TRANSPORT_DIR) - membridge = PathImgBridge(name='VS') + print("MEM_TRANSPORT_DIR", MEM_TRANSPORT_DIR) + membridge = PathImgBridge(name="VS") membridge.dirname = MEM_TRANSPORT_DIR SyncedImageMsg = SyncedPathImages elif MEM_TRANSPORT_NS: - print('MEM_TRANSPORT_NS', MEM_TRANSPORT_NS) - membridge = InMemBridge(name='VS') + print("MEM_TRANSPORT_NS", MEM_TRANSPORT_NS) + membridge = InMemBridge(name="VS") membridge.dirname = MEM_TRANSPORT_NS SyncedImageMsg = SyncedPathImages else: - membridge = ExtendedBridge(name='VS') + membridge = ExtendedBridge(name="VS") SyncedImageMsg = SynchronizedImages bridge = CvBridge() @@ -88,6 +96,7 @@ import threading from contextlib import contextmanager + class NopContext(object): @property @@ -119,7 +128,6 @@ def release(self): self._lock.release() - def get_interpolation(interpolation): if interpolation == 4: flags = cv2.INTER_LANCZOS4 | cv2.WARP_INVERSE_MAP @@ -141,10 +149,18 @@ class ImageViewServer(object): received by this node. """ - def __init__(self, sync_image_topic, rgb_service_topic=None, - rgb_metadata_service_topic=None, ir_service_topic=None, - ir_metadata_service_topic=None, uv_service_topic=None, - uv_metadata_service_topic=None, rgb_queue=None): + + def __init__( + self, + sync_image_topic, + rgb_service_topic=None, + rgb_metadata_service_topic=None, + ir_service_topic=None, + ir_metadata_service_topic=None, + uv_service_topic=None, + uv_metadata_service_topic=None, + rgb_queue=None, + ): """ :param sync_image_topic: Topic providing SynchronizedImages messages. :type sync_image_topic: str @@ -163,74 +179,104 @@ def __init__(self, sync_image_topic, rgb_service_topic=None, self.ir_msg = None self.uv_msg = None - self.frame2newimg = {"rgb_msg":dict(), "ir_msg":dict(), "uv_msg":dict()} - self.frame2hash = {"rgb_msg":dict(), "ir_msg":dict(), "uv_msg":dict()} + self.frame2newimg = {"rgb_msg": dict(), "ir_msg": dict(), "uv_msg": dict()} + self.frame2hash = {"rgb_msg": dict(), "ir_msg": dict(), "uv_msg": dict()} if rgb_queue is None: raise ValueError("You must provide a rgb_queue parameter") self.rgb_queue = rgb_queue # type: queue.dequeue - self.queue = {"rgb_msg": Queue(1), "ir_msg": Queue(1), "uv_msg": Queue(1), } - self.img_stamp_blocks = {"rgb_msg": ConditionalRendezvous(1), "ir_msg": ConditionalRendezvous(1), "uv_msg": ConditionalRendezvous(1), } - self.req_hash_blocks = {"rgb_msg": ConditionalRendezvous(1), "ir_msg": ConditionalRendezvous(1), "uv_msg": ConditionalRendezvous(1), } - + self.queue = { + "rgb_msg": Queue(1), + "ir_msg": Queue(1), + "uv_msg": Queue(1), + } + self.img_stamp_blocks = { + "rgb_msg": ConditionalRendezvous(1), + "ir_msg": ConditionalRendezvous(1), + "uv_msg": ConditionalRendezvous(1), + } + self.req_hash_blocks = { + "rgb_msg": ConditionalRendezvous(1), + "ir_msg": ConditionalRendezvous(1), + "uv_msg": ConditionalRendezvous(1), + } if isinstance(SyncedImageMsg(), SyncedPathImages): - sync_image_topic += '_shm' + sync_image_topic += "_shm" - hostname_ns = '/' + socket.gethostname() + hostname_ns = "/" + socket.gethostname() # rospy.loginfo('Subscribing to SynchronizedImages topic \'{}\''.format(sync_image_topic)) # rospy.Subscriber(sync_image_topic, SyncedImageMsg, self.sync_images_callback, queue_size=1) - self.enabled = {'rgb': True, 'uv': True, 'ir': True} + self.enabled = {"rgb": True, "uv": True, "ir": True} # todo: deal with channel enable config - def subscribe_to_single_image(modality='rgb'): + def subscribe_to_single_image(modality="rgb"): if self.enabled[modality]: - topic = rospy.get_param('_topic'.format(modality), - hostname_ns + '/{}/image_raw'.format(modality)) - - rospy.loginfo('Subscribing to Images topic \'{}\''.format(topic)) - rospy.Subscriber(topic, Image, self.any_queue_callback, - callback_args=modality, queue_size=1) - - def subscribe_to_image_service(service_topic, metadata_service_topic, - key): + topic = rospy.get_param( + "_topic".format(modality), + hostname_ns + "/{}/image_raw".format(modality), + ) + + rospy.loginfo("Subscribing to Images topic '{}'".format(topic)) + rospy.Subscriber( + topic, + Image, + self.any_queue_callback, + callback_args=modality, + queue_size=1, + ) + + def subscribe_to_image_service(service_topic, metadata_service_topic, key): if service_topic is not None: - rospy.loginfo('Creating RequestImageView service to provide ' - '\'%s\' image views on topic \'%s\'' % - (key,service_topic)) - rospy.Service(service_topic, RequestImageView, - lambda req: self.image_patch_service_request(req, - key, - False)) + rospy.loginfo( + "Creating RequestImageView service to provide " + "'%s' image views on topic '%s'" % (key, service_topic) + ) + rospy.Service( + service_topic, + RequestImageView, + lambda req: self.image_patch_service_request(req, key, False), + ) if service_topic is not None: - compressed_service_topic = '%s/compressed' % service_topic - rospy.loginfo('Creating RequestCompressedImageView service to ' - 'provide \'%s\' image views on topic \'%s\'' % - (key,compressed_service_topic)) - rospy.Service(compressed_service_topic, - RequestCompressedImageView, - lambda req: self.image_patch_service_request(req, - key, - True)) + compressed_service_topic = "%s/compressed" % service_topic + rospy.loginfo( + "Creating RequestCompressedImageView service to " + "provide '%s' image views on topic '%s'" + % (key, compressed_service_topic) + ) + rospy.Service( + compressed_service_topic, + RequestCompressedImageView, + lambda req: self.image_patch_service_request(req, key, True), + ) if metadata_service_topic is not None: - rospy.loginfo('Creating RequestImageMetadata service to ' - 'provide \'%s\' image metadata via ' - 'RequestImageMetadata on topic \'%s\'' % - (key,metadata_service_topic)) - rospy.Service(metadata_service_topic, RequestImageMetadata, - lambda req: self.metadata_service_topic_request(req, - key)) + rospy.loginfo( + "Creating RequestImageMetadata service to " + "provide '%s' image metadata via " + "RequestImageMetadata on topic '%s'" % (key, metadata_service_topic) + ) + rospy.Service( + metadata_service_topic, + RequestImageMetadata, + lambda req: self.metadata_service_topic_request(req, key), + ) for modality in self.enabled: subscribe_to_single_image(modality) - subscribe_to_image_service(rgb_service_topic, rgb_metadata_service_topic, 'rgb_msg') - subscribe_to_image_service(ir_service_topic, ir_metadata_service_topic, 'ir_msg') - subscribe_to_image_service(uv_service_topic, uv_metadata_service_topic, 'uv_msg') + subscribe_to_image_service( + rgb_service_topic, rgb_metadata_service_topic, "rgb_msg" + ) + subscribe_to_image_service( + ir_service_topic, ir_metadata_service_topic, "ir_msg" + ) + subscribe_to_image_service( + uv_service_topic, uv_metadata_service_topic, "uv_msg" + ) @property def nop_lock(self): @@ -241,8 +287,8 @@ def nop_lock(self): def nopContext(self): yield True - def any_queue_callback(self, msg, modality='rgb'): - modality = modality.lower() + '_msg' + def any_queue_callback(self, msg, modality="rgb"): + modality = modality.lower() + "_msg" rospy.loginfo("image callback {}".format(modality)) for frame in self.frame2newimg[modality]: try: @@ -256,44 +302,50 @@ def sync_images_callback(self, msg): rospy.loginfo("sync images callback") with self.nop_lock: try: - rgb_str = ('RGB=%ix%i %s' % (msg.image_rgb.width, - msg.image_rgb.height, - msg.image_rgb.encoding)) + rgb_str = "RGB=%ix%i %s" % ( + msg.image_rgb.width, + msg.image_rgb.height, + msg.image_rgb.encoding, + ) modality = "rgb_msg" img_rendezvous = self.img_stamp_blocks[modality] img_rendezvous.put(msg.header.stamp) # queue = self.queue[modality] # if queue.empty(): # rospy.loginfo("Enqueued: {}".format(modality)) - # queue.put(True) + # queue.put(True) # else: # rospy.loginfo("Full or something: {}".format(modality)) except Exception as exc: - rospy.logerr('RGB fail: {}: {}'.format(exc.__class__.__name__, exc)) - rgb_str = 'No RGB' + rospy.logerr("RGB fail: {}: {}".format(exc.__class__.__name__, exc)) + rgb_str = "No RGB" try: - ir_str = ('IR=%ix%i %s' % (msg.image_ir.width, - msg.image_ir.height, - msg.image_ir.encoding)) + ir_str = "IR=%ix%i %s" % ( + msg.image_ir.width, + msg.image_ir.height, + msg.image_ir.encoding, + ) modality = "ir_msg" img_rendezvous = self.img_stamp_blocks[modality] img_rendezvous.put(msg.header.stamp) # queue = self.queue[modality] # if queue.empty(): - # rospy.loginfo("Enqueued: {}".format(modality)) - # queue.put(True) + # rospy.loginfo("Enqueued: {}".format(modality)) + # queue.put(True) # else: # rospy.loginfo("Full or something: {}".format(modality)) except Exception as exc: - rospy.logerr('IR fail {}: {}'.format(exc.__class__.__name__, exc)) - ir_str = 'No IR' + rospy.logerr("IR fail {}: {}".format(exc.__class__.__name__, exc)) + ir_str = "No IR" try: - uv_str = ('UV=%ix%i %s' % (msg.image_uv.width, - msg.image_uv.height, - msg.image_uv.encoding)) + uv_str = "UV=%ix%i %s" % ( + msg.image_uv.width, + msg.image_uv.height, + msg.image_uv.encoding, + ) modality = "uv_msg" img_rendezvous = self.img_stamp_blocks[modality] @@ -305,11 +357,13 @@ def sync_images_callback(self, msg): # else: # rospy.loginfo("Full or something: {}".format(modality)) except Exception as exc: - rospy.logerr('UV fail {}: {}'.format(exc.__class__.__name__, exc)) - uv_str = 'No UV' + rospy.logerr("UV fail {}: {}".format(exc.__class__.__name__, exc)) + uv_str = "No UV" - rospy.loginfo('Received SynchronizedImages message with [%s] [%s] ' - '[%s]' % (rgb_str,ir_str,uv_str)) + rospy.loginfo( + "Received SynchronizedImages message with [%s] [%s] " + "[%s]" % (rgb_str, ir_str, uv_str) + ) self.rgb_msg = msg.image_rgb self.ir_msg = msg.image_ir self.uv_msg = msg.image_uv @@ -324,13 +378,13 @@ def image_patch_service_request(self, req, modality, compress): """ tic = time.time() req_hash = hash_genpy_msg(req) - #rospy.loginfo('Requesting {} \'{}\' image view of size {} x {}: {}'.format( + # rospy.loginfo('Requesting {} \'{}\' image view of size {} x {}: {}'.format( # 'compressed' if compress else '', modality, # req.output_width, req.output_height, req_hash)) with self.nop_lock: # rospy.logwarn('pre lock') - if modality == 'rgb_msg': + if modality == "rgb_msg": try: img_msg = self.rgb_queue[0] except IndexError as exc: @@ -343,7 +397,7 @@ def image_patch_service_request(self, req, modality, compress): # rospy.logwarn('post lock') if img_msg is None: - #rospy.logerr('exit early due to lack of encoding') + # rospy.logerr('exit early due to lack of encoding') return False, Image() try: @@ -357,9 +411,9 @@ def image_patch_service_request(self, req, modality, compress): if newimg or not stale_hash: try: - image = membridge.imgmsg_to_cv2(img_msg, 'passthrough') + image = membridge.imgmsg_to_cv2(img_msg, "passthrough") except Exception as exc: - rospy.logerr('{}: {}'.format(exc.__class__.__name__, exc)) + rospy.logerr("{}: {}".format(exc.__class__.__name__, exc)) return False, Image() else: return True, Image() @@ -377,22 +431,23 @@ def image_patch_service_request(self, req, modality, compress): dsize = (req.output_width, req.output_height) if modality == "ir_msg": if req.apply_clahe: - bright_px = 5 + bright_px = 20 h, w = image.shape pxs = h * w - top_percentile = ((pxs - bright_px) / pxs) * 100 + #top_percentile = ((pxs - bright_px) / pxs) * 100 + top_percentile = 100 stretch_percentiles = [1, top_percentile] - img = image.astype("float32") - mi = np.percentile( img, stretch_percentiles[0] ) - ma = np.percentile( img, stretch_percentiles[1] ) - normalized = ( img - mi ) / ( ma - mi) - normalized = np.clip(normalized, 0, 1) + img = image.astype("uint16") + mi = np.percentile(img, stretch_percentiles[0]) + ma = np.percentile(img, stretch_percentiles[1]) + normalized = (img - mi) / (ma - mi) + #normalized = np.clip(normalized, 0, 1) normalized = normalized * 255 + normalized[normalized < 0] = 0 image = np.round(normalized).astype("uint8") - homography = np.reshape(req.homography, (3,3)).astype(np.float32) - raw_image = cv2.warpPerspective(image, homography, dsize=dsize, - flags=flags) + homography = np.reshape(req.homography, (3, 3)).astype(np.float32) + raw_image = cv2.warpPerspective(image, homography, dsize=dsize, flags=flags) if modality == "ir_msg": # Don't need mono16 for display @@ -425,7 +480,7 @@ def image_patch_service_request(self, req, modality, compress): # raise NotImplementedError('disabled for now') out_msg = CompressedImage() out_msg.format = "jpeg" - out_msg.data = np.array(cv2.imencode('.jpg', image2)[1]).tostring() + out_msg.data = np.array(cv2.imencode(".jpg", image2)[1]).tostring() out_msg.header = img_msg.header # rospy.logwarn("Compressed") else: @@ -435,7 +490,11 @@ def image_patch_service_request(self, req, modality, compress): # Cache the request hash so we can block the next time around toc = time.time() - rospy.loginfo("{:.2f} Releasing {: >3}".format(img_msg.header.stamp.to_sec(), modality[:3])) + rospy.loginfo( + "{:.2f} Releasing {: >3}".format( + img_msg.header.stamp.to_sec(), modality[:3] + ) + ) print("Time to process request was %0.3fs" % (toc - tic)) return True, out_msg @@ -453,9 +512,9 @@ def metadata_service_topic_request(self, req, modality): img_msg0 = getattr(self, modality) if img_msg0 is None: - msg = (False,0,0,'') + msg = (False, 0, 0, "") else: - msg = (True,img_msg0.height,img_msg0.width,img_msg0.encoding) + msg = (True, img_msg0.height, img_msg0.width, img_msg0.encoding) # print('metadata: {}'.format(msg)) # invalidate cache lanes @@ -468,9 +527,10 @@ def metadata_service_topic_request(self, req, modality): def set_up_nexus(rgb_queue): import socket + # node = 'img_nexus' # rospy.init_node(node) - print('Parent', rospy.get_namespace()) + print("Parent", rospy.get_namespace()) node_name = rospy.get_name() # for param in rospy.get_param_names(): # print(param) @@ -479,20 +539,29 @@ def set_up_nexus(rgb_queue): root@nuvo0:~/kamera_ws# rosparam get /nuvo0/img_nexus {ir_topic: ir/image_raw, max_wait: 0.9, out_topic: synched, rgb_topic: rgb/image_raw, uv_topic: uv/image_raw, verbosity: 9}""" - hostname_ns = '/' + socket.gethostname() - sys_prefix = hostname_ns + '/img_nexus' - verbosity = rospy.get_param('verbosity', 9) - rgb_topic = rospy.get_param('rgb_topic', hostname_ns + '/rgb/image_raw') - ir_topic = rospy.get_param('ir_topic', hostname_ns + '/ir/image_raw') - uv_topic = rospy.get_param('uv_topic', hostname_ns + '/uv/image_raw') + hostname_ns = "/" + socket.gethostname() + sys_prefix = hostname_ns + "/img_nexus" + verbosity = rospy.get_param("verbosity", 9) + rgb_topic = rospy.get_param("rgb_topic", hostname_ns + "/rgb/image_raw") + ir_topic = rospy.get_param("ir_topic", hostname_ns + "/ir/image_raw") + uv_topic = rospy.get_param("uv_topic", hostname_ns + "/uv/image_raw") # out_topic = rospy.get_param('out_topic', sys_prefix + 'synced') - out_topic = hostname_ns + '/synched' - max_wait = rospy.get_param('/max_frame_period', 444) / 1000.0 - send_image_data = rospy.get_param('~send_image_data') - compress_imagery = rospy.get_param('~compress_imagery') - - nexus = Nexus(rgb_topic, ir_topic, uv_topic, out_topic, compress_imagery, - send_image_data, max_wait, rgb_queue=rgb_queue, verbosity=verbosity) + out_topic = hostname_ns + "/synched" + max_wait = rospy.get_param("/max_frame_period", 444) / 1000.0 + send_image_data = rospy.get_param("~send_image_data") + compress_imagery = rospy.get_param("~compress_imagery") + + nexus = Nexus( + rgb_topic, + ir_topic, + uv_topic, + out_topic, + compress_imagery, + send_image_data, + max_wait, + rgb_queue=rgb_queue, + verbosity=verbosity, + ) return nexus @@ -503,51 +572,69 @@ def rospy_spin(delay=1.0): """ if not rospy.core.is_initialized(): - raise rospy.exceptions.ROSInitException("client code must call rospy.init_node() first") - rospy.logdebug("node[%s, %s] entering spin(), pid[%s]", rospy.core.get_caller_id(), rospy.core.get_node_uri(), - os.getpid()) + raise rospy.exceptions.ROSInitException( + "client code must call rospy.init_node() first" + ) + rospy.logdebug( + "node[%s, %s] entering spin(), pid[%s]", + rospy.core.get_caller_id(), + rospy.core.get_node_uri(), + os.getpid(), + ) try: while not rospy.core.is_shutdown(): rospy.rostime.wallsleep(delay) - rospy.loginfo('spin') + rospy.loginfo("spin") # print('.', end='') except KeyboardInterrupt: rospy.logdebug("keyboard interrupt, shutting down") - rospy.core.signal_shutdown('keyboard interrupt') + rospy.core.signal_shutdown("keyboard interrupt") def main(): # Launch the node. - node = 'image_view_server' + node = "image_view_server" rospy.init_node(node, anonymous=False) node_name = rospy.get_name() # -------------------------- Read Parameters ----------------------------- - sync_image_topic = rospy.get_param('%s/sync_image_topic' % node_name) - rgb_service_topic = rospy.get_param('%s/rgb_service_topic' % node_name) - ir_service_topic = rospy.get_param('%s/ir_service_topic' % node_name) - uv_service_topic = rospy.get_param('%s/uv_service_topic' % node_name) - - rgb_metadata_service_topic = rospy.get_param('%s/rgb_metadata_service_topic' % node_name) - ir_metadata_service_topic = rospy.get_param('%s/ir_metadata_service_topic' % node_name) - uv_metadata_service_topic = rospy.get_param('%s/uv_metadata_service_topic' % node_name) + sync_image_topic = rospy.get_param("%s/sync_image_topic" % node_name) + rgb_service_topic = rospy.get_param("%s/rgb_service_topic" % node_name) + ir_service_topic = rospy.get_param("%s/ir_service_topic" % node_name) + uv_service_topic = rospy.get_param("%s/uv_service_topic" % node_name) + + rgb_metadata_service_topic = rospy.get_param( + "%s/rgb_metadata_service_topic" % node_name + ) + ir_metadata_service_topic = rospy.get_param( + "%s/ir_metadata_service_topic" % node_name + ) + uv_metadata_service_topic = rospy.get_param( + "%s/uv_metadata_service_topic" % node_name + ) # ------------------------------------------------------------------------ # Share debayered RGB images between nexus and image view rgb_queue = deque(maxlen=1) nexus = set_up_nexus(rgb_queue) - ImageViewServer(sync_image_topic, rgb_service_topic, - rgb_metadata_service_topic, ir_service_topic, - ir_metadata_service_topic, uv_service_topic, - uv_metadata_service_topic, rgb_queue=rgb_queue) + ImageViewServer( + sync_image_topic, + rgb_service_topic, + rgb_metadata_service_topic, + ir_service_topic, + ir_metadata_service_topic, + uv_service_topic, + uv_metadata_service_topic, + rgb_queue=rgb_queue, + ) rospy_spin() -if __name__ == '__main__': +if __name__ == "__main__": try: main() except rospy.ROSInterruptException: - print('Interrupt') + print("Interrupt") pass diff --git a/src/run_scripts/entry/cam_phaseone.sh b/src/run_scripts/entry/cam_phaseone.sh index d955267..3c1ed22 100755 --- a/src/run_scripts/entry/cam_phaseone.sh +++ b/src/run_scripts/entry/cam_phaseone.sh @@ -104,6 +104,10 @@ if [[ $(redis-cli --raw -h $REDIS_HOST get /debug/rebuild ) == "true" ]]; then fi fi +echo "Building to_process.txt" +rm -f /mnt/data/to_process.txt +find /mnt/data/iiq_buffer -name "*.IIQ" > /mnt/data/to_process.txt + export ROS_NAMESPACE="/${NODE_HOSTNAME}/${CAM_MODE}" exec roslaunch "${ROSWAIT}" phase_one phase_one_standalone.launch \ ip_address:=${CAM_IP} \