BosonCamera.cpp
Go to the documentation of this file.
00001 /*
00002  * Copyright © 2019 AutonomouStuff, LLC
00003  * 
00004  * Permission is hereby granted, free of charge, to any person obtaining a copy of this
00005  * software and associated documentation files (the “Software”), to deal in the Software
00006  * without restriction, including without limitation the rights to use, copy, modify,
00007  * merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
00008  * permit persons to whom the Software is furnished to do so, subject to the following conditions:
00009  * 
00010  * The above copyright notice and this permission notice shall be included in all copies
00011  * or substantial portions of the Software.
00012  * 
00013  * THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
00014  * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
00015  * PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
00016  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
00017  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
00018  * OR OTHER DEALINGS IN THE SOFTWARE.
00019  */
00020 
00021 #include <pluginlib/class_list_macros.h>
00022 #include "flir_boson_usb/BosonCamera.h"
00023 
00024 PLUGINLIB_EXPORT_CLASS(flir_boson_usb::BosonCamera, nodelet::Nodelet)
00025 
00026 using namespace cv;
00027 using namespace flir_boson_usb;
00028 
00029 BosonCamera::BosonCamera() :
00030   cv_img()
00031 {
00032 }
00033 
00034 BosonCamera::~BosonCamera()
00035 {
00036   closeCamera();
00037 }
00038 
00039 void BosonCamera::onInit()
00040 {
00041   nh = getNodeHandle();
00042   pnh = getPrivateNodeHandle();
00043   camera_info = std::shared_ptr<camera_info_manager::CameraInfoManager>(
00044       new camera_info_manager::CameraInfoManager(nh));
00045   it = std::shared_ptr<image_transport::ImageTransport>(new image_transport::ImageTransport(nh));
00046   image_pub = it->advertiseCamera("image_raw", 1);
00047 
00048   bool exit = false;
00049 
00050   pnh.param<std::string>("frame_id", frame_id, "boson_camera");
00051   pnh.param<std::string>("dev", dev_path, "/dev/video0");
00052   pnh.param<float>("frame_rate", frame_rate, 60.0);
00053   pnh.param<std::string>("video_mode", video_mode_str, "RAW16");
00054   pnh.param<bool>("zoon_enable", zoom_enable, false);
00055   pnh.param<std::string>("sensor_type", sensor_type_str, "Boson_640");
00056   pnh.param<std::string>("camera_info_url", camera_info_url, "");
00057 
00058   ROS_INFO("flir_boson_usb - Got frame_id: %s.", frame_id.c_str());
00059   ROS_INFO("flir_boson_usb - Got dev: %s.", dev_path.c_str());
00060   ROS_INFO("flir_boson_usb - Got frame rate: %f.", frame_rate);
00061   ROS_INFO("flir_boson_usb - Got video mode: %s.", video_mode_str.c_str());
00062   ROS_INFO("flir_boson_usb - Got zoom enable: %s.", (zoom_enable ? "true" : "false"));
00063   ROS_INFO("flir_boson_usb - Got sensor type: %s.", sensor_type_str.c_str());
00064   ROS_INFO("flir_boson_usb - Got camera_info_url: %s.", camera_info_url.c_str());
00065 
00066   if (video_mode_str == "RAW16")
00067   {
00068     video_mode = RAW16;
00069   }
00070   else if (video_mode_str == "YUV")
00071   {
00072     video_mode = YUV;
00073   }
00074   else
00075   {
00076     exit = true;
00077     ROS_ERROR("flir_boson_usb - Invalid video_mode value provided. Exiting.");
00078   }
00079 
00080   if (sensor_type_str == "Boson_320" ||
00081       sensor_type_str == "boson_320")
00082   {
00083     sensor_type = Boson320;
00084     camera_info->setCameraName("Boson320");
00085   }
00086   else if (sensor_type_str == "Boson_640" ||
00087            sensor_type_str == "boson_640")
00088   {
00089     sensor_type = Boson640;
00090     camera_info->setCameraName("Boson640");
00091   }
00092   else
00093   {
00094     exit = true;
00095     ROS_ERROR("flir_boson_usb - Invalid sensor_type value provided. Exiting.");
00096   }
00097 
00098   if (camera_info->validateURL(camera_info_url))
00099   {
00100     camera_info->loadCameraInfo(camera_info_url);
00101   }
00102   else
00103   {
00104     ROS_INFO("flir_boson_usb - camera_info_url could not be validated. Publishing with unconfigured camera.");
00105   }
00106 
00107   if (!exit)
00108     exit = openCamera() ? exit : true;
00109 
00110   if (exit)
00111   {
00112     ros::shutdown();
00113     return;
00114   }
00115   else
00116   {
00117     capture_timer = nh.createTimer(ros::Duration(1.0 / frame_rate),
00118         boost::bind(&BosonCamera::captureAndPublish, this, _1));
00119   }
00120 }
00121 
00122 // AGC Sample ONE: Linear from min to max.
00123 // Input is a MATRIX (height x width) of 16bits. (OpenCV mat)
00124 // Output is a MATRIX (height x width) of 8 bits (OpenCV mat)
00125 void BosonCamera::agcBasicLinear(const Mat& input_16,
00126                                  Mat* output_8,
00127                                  const int& height,
00128                                  const int& width)
00129 {
00130   int i, j;  // aux variables
00131 
00132   // auxiliary variables for AGC calcultion
00133   unsigned int max1 = 0;         // 16 bits
00134   unsigned int min1 = 0xFFFF;    // 16 bits
00135   unsigned int value1, value2, value3, value4;
00136 
00137   // RUN a super basic AGC
00138   for (i = 0; i < height; i++)
00139   {
00140     for (j = 0; j < width; j++)
00141     {
00142       value1 = input_16.at<uchar>(i, j * 2 + 1) & 0xFF;  // High Byte
00143       value2 = input_16.at<uchar>(i, j * 2) & 0xFF;      // Low Byte
00144       value3 = (value1 << 8) + value2;
00145 
00146       if (value3 <= min1)
00147         min1 = value3;
00148 
00149       if (value3 >= max1)
00150         max1 = value3;
00151     }
00152   }
00153 
00154   for (int i = 0; i < height; i++)
00155   {
00156     for (int j = 0; j < width; j++)
00157     {
00158       value1 = input_16.at<uchar>(i, j * 2 + 1) & 0xFF;     // High Byte
00159       value2 = input_16.at<uchar>(i, j * 2) & 0xFF;         // Low Byte
00160       value3 = (value1 << 8) + value2;
00161       value4 = ((255 * (value3 - min1))) / (max1 - min1);
00162 
00163       output_8->at<uchar>(i, j) = static_cast<uint8_t>(value4 & 0xFF);
00164     }
00165   }
00166 }
00167 
00168 bool BosonCamera::openCamera()
00169 {
00170   // Open the Video device
00171   if ((fd = open(dev_path.c_str(), O_RDWR)) < 0)
00172   {
00173     ROS_ERROR("flir_boson_usb - ERROR : OPEN. Invalid Video Device.");
00174     return false;
00175   }
00176 
00177   // Check VideoCapture mode is available
00178   if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0)
00179   {
00180     ROS_ERROR("flir_boson_usb - ERROR : VIDIOC_QUERYCAP. Video Capture is not available.");
00181     return false;
00182   }
00183 
00184   if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE))
00185   {
00186     ROS_ERROR("flir_boson_usb - The device does not handle single-planar video capture.");
00187     return false;
00188   }
00189 
00190   struct v4l2_format format;
00191 
00192   // Two different FORMAT modes, 8 bits vs RAW16
00193   if (video_mode == RAW16)
00194   {
00195     // I am requiring thermal 16 bits mode
00196     format.fmt.pix.pixelformat = V4L2_PIX_FMT_Y16;
00197 
00198     // Select the frame SIZE (will depend on the type of sensor)
00199     switch (sensor_type)
00200     {
00201       case Boson320:  // Boson320
00202         width = 320;
00203         height = 256;
00204         break;
00205       case Boson640:  // Boson640
00206         width = 640;
00207         height = 512;
00208         break;
00209       default:  // Boson320
00210         width = 320;
00211         height = 256;
00212         break;
00213     }
00214   }
00215   else  // 8- bits is always 640x512 (even for a Boson 320)
00216   {
00217     format.fmt.pix.pixelformat = V4L2_PIX_FMT_YVU420;  // thermal, works   LUMA, full Cr, full Cb
00218     width = 640;
00219     height = 512;
00220   }
00221 
00222   // Common varibles
00223   format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
00224   format.fmt.pix.width = width;
00225   format.fmt.pix.height = height;
00226 
00227   // request desired FORMAT
00228   if (ioctl(fd, VIDIOC_S_FMT, &format) < 0)
00229   {
00230     ROS_ERROR("flir_boson_usb - VIDIOC_S_FMT error. The camera does not support the requested video format.");
00231     return false;
00232   }
00233 
00234   // we need to inform the device about buffers to use.
00235   // and we need to allocate them.
00236   // we'll use a single buffer, and map our memory using mmap.
00237   // All this information is sent using the VIDIOC_REQBUFS call and a
00238   // v4l2_requestbuffers structure:
00239   struct v4l2_requestbuffers bufrequest;
00240   bufrequest.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
00241   bufrequest.memory = V4L2_MEMORY_MMAP;
00242   bufrequest.count = 1;   // we are asking for one buffer
00243 
00244   if (ioctl(fd, VIDIOC_REQBUFS, &bufrequest) < 0)
00245   {
00246     ROS_ERROR("flir_boson_usb - VIDIOC_REQBUFS error. The camera failed to allocate a buffer.");
00247     return false;
00248   }
00249 
00250   // Now that the device knows how to provide its data,
00251   // we need to ask it about the amount of memory it needs,
00252   // and allocate it. This information is retrieved using the VIDIOC_QUERYBUF call,
00253   // and its v4l2_buffer structure.
00254 
00255   memset(&bufferinfo, 0, sizeof(bufferinfo));
00256 
00257   bufferinfo.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
00258   bufferinfo.memory = V4L2_MEMORY_MMAP;
00259   bufferinfo.index = 0;
00260 
00261   if (ioctl(fd, VIDIOC_QUERYBUF, &bufferinfo) < 0)
00262   {
00263     ROS_ERROR("flir_boson_usb - VIDIOC_QUERYBUF error. Failed to retreive buffer information.");
00264     return false;
00265   }
00266 
00267   // map fd+offset into a process location (kernel will decide due to our NULL). length and
00268   // properties are also passed
00269   buffer_start = mmap(NULL, bufferinfo.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, bufferinfo.m.offset);
00270 
00271   if (buffer_start == MAP_FAILED)
00272   {
00273     ROS_ERROR("flir_boson_usb - mmap error. Failed to create a memory map for buffer.");
00274     return false;
00275   }
00276 
00277   // Fill this buffer with ceros. Initialization. Optional but nice to do
00278   memset(buffer_start, 0, bufferinfo.length);
00279 
00280   // Activate streaming
00281   int type = bufferinfo.type;
00282   if (ioctl(fd, VIDIOC_STREAMON, &type) < 0)
00283   {
00284     ROS_ERROR("flir_boson_usb - VIDIOC_STREAMON error. Failed to activate streaming on the camera.");
00285     return false;
00286   }
00287 
00288   // Declarations for RAW16 representation
00289   // Will be used in case we are reading RAW16 format
00290   // Boson320 , Boson 640
00291   // OpenCV input buffer  : Asking for all info: two bytes per pixel (RAW16)  RAW16 mode`
00292   thermal16 = Mat(height, width, CV_16U, buffer_start);
00293   // OpenCV output buffer : Data used to display the video
00294   thermal16_linear = Mat(height, width, CV_8U, 1);
00295 
00296   // Declarations for 8bits YCbCr mode
00297   // Will be used in case we are reading YUV format
00298   // Boson320, 640 :  4:2:0
00299   int luma_height = height+height/2;
00300   int luma_width = width;
00301   int color_space = CV_8UC1;
00302 
00303   // Declarations for Zoom representation
00304   // Will be used or not depending on program arguments
00305   thermal_luma = Mat(luma_height, luma_width,  color_space, buffer_start);  // OpenCV input buffer
00306   // OpenCV output buffer , BGR -> Three color spaces :
00307   // (640 - 640 - 640 : p11 p21 p31 .... / p12 p22 p32 ..../ p13 p23 p33 ...)
00308   thermal_rgb = Mat(height, width, CV_8UC3, 1);
00309 
00310   return true;
00311 }
00312 
00313 bool BosonCamera::closeCamera()
00314 {
00315   // Finish loop. Exiting.
00316   // Deactivate streaming
00317   int type = bufferinfo.type;
00318   if (ioctl(fd, VIDIOC_STREAMOFF, &type) < 0 )
00319   {
00320     ROS_ERROR("flir_boson_usb - VIDIOC_STREAMOFF error. Failed to disable streaming on the camera.");
00321     return false;
00322   };
00323 
00324   close(fd);
00325 
00326   return true;
00327 }
00328 
00329 void BosonCamera::captureAndPublish(const ros::TimerEvent& evt)
00330 {
00331   Size size(640, 512);
00332 
00333   sensor_msgs::CameraInfoPtr
00334     ci(new sensor_msgs::CameraInfo(camera_info->getCameraInfo()));
00335 
00336   ci->header.frame_id = frame_id;
00337 
00338   // Put the buffer in the incoming queue.
00339   if (ioctl(fd, VIDIOC_QBUF, &bufferinfo) < 0)
00340   {
00341     ROS_ERROR("flir_boson_usb - VIDIOC_QBUF error. Failed to queue the image buffer.");
00342     return;
00343   }
00344 
00345   // The buffer's waiting in the outgoing queue.
00346   if (ioctl(fd, VIDIOC_DQBUF, &bufferinfo) < 0)
00347   {
00348     ROS_ERROR("flir_boson_usb - VIDIOC_DQBUF error. Failed to dequeue the image buffer.");
00349     return;
00350   }
00351 
00352   if (video_mode == RAW16)
00353   {
00354     // -----------------------------
00355     // RAW16 DATA
00356     agcBasicLinear(thermal16, &thermal16_linear, height, width);
00357 
00358     // Display thermal after 16-bits AGC... will display an image
00359     if (!zoom_enable)
00360     {
00361       // Threshold using Otsu's method, then use the result as a mask on the original image
00362       Mat mask_mat, masked_img;
00363       threshold(thermal16_linear, mask_mat, 0, 255, CV_THRESH_BINARY|CV_THRESH_OTSU);
00364       thermal16_linear.copyTo(masked_img, mask_mat);
00365 
00366       // Normalize the pixel values to the range [0, 1] then raise to power (gamma). Then convert back for display.
00367       Mat d_out_img, norm_image, d_norm_image, gamma_corrected_image, d_gamma_corrected_image;
00368       double gamma = 0.8;
00369       masked_img.convertTo(d_out_img, CV_64FC1);
00370       normalize(d_out_img, d_norm_image, 0, 1, NORM_MINMAX, CV_64FC1);
00371       pow(d_out_img, gamma, d_gamma_corrected_image);
00372       d_gamma_corrected_image.convertTo(gamma_corrected_image, CV_8UC1);
00373       normalize(gamma_corrected_image, gamma_corrected_image, 0, 255, NORM_MINMAX, CV_8UC1);
00374 
00375       // Apply top hat filter
00376       int erosion_size = 5;
00377       Mat top_hat_img, kernel = getStructuringElement(MORPH_ELLIPSE,
00378           Size(2 * erosion_size + 1, 2 * erosion_size + 1));
00379       morphologyEx(gamma_corrected_image, top_hat_img, MORPH_TOPHAT, kernel);
00380 
00381       cv_img.image = thermal16_linear;
00382       cv_img.header.stamp = ros::Time::now();
00383       cv_img.header.frame_id = frame_id;
00384       cv_img.encoding = "mono8";
00385       pub_image = cv_img.toImageMsg();
00386 
00387       ci->header.stamp = pub_image->header.stamp;
00388       image_pub.publish(pub_image, ci);
00389     }
00390     else
00391     {
00392       resize(thermal16_linear, thermal16_linear_zoom, size);
00393 
00394       cv_img.image = thermal16_linear_zoom;
00395       cv_img.header.stamp = ros::Time::now();
00396       cv_img.header.frame_id = frame_id;
00397       cv_img.encoding = "mono8";
00398       pub_image = cv_img.toImageMsg();
00399 
00400       ci->header.stamp = pub_image->header.stamp;
00401       image_pub.publish(pub_image, ci);
00402     }
00403   }
00404   else  // Video is in 8 bits YUV
00405   {
00406     // ---------------------------------
00407     // DATA in YUV
00408     cvtColor(thermal_luma, thermal_rgb, COLOR_YUV2GRAY_I420, 0);
00409 
00410     cv_img.image = thermal_rgb;
00411     cv_img.encoding = "mono8";
00412     cv_img.header.stamp = ros::Time::now();
00413     cv_img.header.frame_id = frame_id;
00414     pub_image = cv_img.toImageMsg();
00415 
00416     ci->header.stamp = pub_image->header.stamp;
00417     image_pub.publish(pub_image, ci);
00418   }
00419 }


flir_boson_usb
Author(s): Joe Driscoll , Joshua Whitley
autogenerated on Mon Jul 1 2019 19:43:18