diff --git a/VirtualRobot/Visualization/CoinVisualization/CoinVisualizationFactory.cpp b/VirtualRobot/Visualization/CoinVisualization/CoinVisualizationFactory.cpp index 10a14574b75817ede96758e54fd1f2ea2e8c8ed6..0a6f3e2702ea544b3319aceba5276abaa5436c12 100644 --- a/VirtualRobot/Visualization/CoinVisualization/CoinVisualizationFactory.cpp +++ b/VirtualRobot/Visualization/CoinVisualization/CoinVisualizationFactory.cpp @@ -3677,6 +3677,30 @@ namespace VirtualRobot float zNear, float zFar, float vertFov//render param ) { + SoOffscreenRenderer offscreenRenderer{SbViewportRegion{width, height}}; + offscreenRenderer.setComponents(SoOffscreenRenderer::RGB); + offscreenRenderer.setBackgroundColor(SbColor(1.0f, 1.0f, 1.0f)); + return renderOffscreenRgbDepthPointcloud(&offscreenRenderer, + camNode, + scene, + width, height, + renderRgbImage, + rgbImage, + renderDepthImage, depthImage, + renderPointcloud, pointCloud, + zNear, zFar, vertFov); + } + + bool CoinVisualizationFactory::renderOffscreenRgbDepthPointcloud(SoOffscreenRenderer *offscreenRenderer, RobotNodePtr camNode, + SoNode *scene, short width, short height, bool renderRgbImage, std::vector<unsigned char> &rgbImage, + bool renderDepthImage, std::vector<float> &depthImage, bool renderPointcloud, + std::vector<Eigen::Vector3f> &pointCloud, float zNear, float zFar, float vertFov, float nanValue) + { + if(!offscreenRenderer) + { + VR_ERROR << "No renderer..." << endl; + return false; + } //check input if (!camNode) { @@ -3696,11 +3720,6 @@ namespace VirtualRobot //setup const bool calculateDepth = renderDepthImage || renderPointcloud; const unsigned int numPixel=width*height; - - SoOffscreenRenderer offscreenRenderer{SbViewportRegion{width, height}}; - offscreenRenderer.setComponents(SoOffscreenRenderer::RGB); - offscreenRenderer.setBackgroundColor(SbColor(1.0f, 1.0f, 1.0f)); - //required to get the zBuffer DepthRenderData userdata; std::vector<float> zBuffer; @@ -3717,9 +3736,9 @@ namespace VirtualRobot userdata.w = width; userdata.buffer = zBuffer.data(); - offscreenRenderer.getGLRenderAction()->setPassCallback(getZBuffer, static_cast<void*>(&userdata)); + offscreenRenderer->getGLRenderAction()->setPassCallback(getZBuffer, static_cast<void*>(&userdata)); ///TODO find way to only render rgb once - offscreenRenderer.getGLRenderAction()->setNumPasses(2); + offscreenRenderer->getGLRenderAction()->setNumPasses(2); } //render @@ -3748,8 +3767,9 @@ namespace VirtualRobot root->addChild(scene); static bool renderErrorPrinted = false; - const bool renderOk=offscreenRenderer.render(root); - root->unref(); + const bool renderOk=offscreenRenderer->render(root); + + if (!renderOk) { if (!renderErrorPrinted) @@ -3759,10 +3779,12 @@ namespace VirtualRobot } return false; } + + root->unref(); //rgb if(renderRgbImage) { - const unsigned char* glBuffer = offscreenRenderer.getBuffer(); + const unsigned char* glBuffer = offscreenRenderer->getBuffer(); const unsigned int numValues = numPixel*3; rgbImage.resize(numValues); std::copy(glBuffer, glBuffer+ numValues, rgbImage.begin()); @@ -3851,12 +3873,14 @@ namespace VirtualRobot if(renderPointcloud) { //the cam looks along -z => rotate aroud y 180° - pointCloud.at(pixelIndex)[0]=-xEye; - pointCloud.at(pixelIndex)[1]= yEye; - pointCloud.at(pixelIndex)[2]=-zEye; + auto& point = pointCloud.at(pixelIndex); + point[0]=-xEye; + point[1]= yEye; + point[2]=(-zEye >= zFar)? nanValue : -zEye; } } } + if(renderDepthImage) { depthImage = std::move(zBuffer); diff --git a/VirtualRobot/Visualization/CoinVisualization/CoinVisualizationFactory.h b/VirtualRobot/Visualization/CoinVisualization/CoinVisualizationFactory.h index a49e6cd0c1db2e6948923b8a801eb523479d9af8..81dd1166b8fab062dde0ed5883d299fbbfbe9fba 100644 --- a/VirtualRobot/Visualization/CoinVisualization/CoinVisualizationFactory.h +++ b/VirtualRobot/Visualization/CoinVisualization/CoinVisualizationFactory.h @@ -338,6 +338,34 @@ namespace VirtualRobot ); + /*! + * \brief Renders the given scene from the given cam position and outputs (optionally) the rgb image, depth image and point cloud. + * This version is much faster if always the same SoOffscreenRenderer is passed in. + * \param camNode The node of the robot that defines the position of the camera. Any node of the robot can host a camera. + * \param scene The scene that should be rendered. + * \param width The used image width. (>0) + * \param height The used image height. (>0) + * \param renderRgbImage Whether to output the rgb image. + * \param rgbImage The rgb image's output parameter. + * \param renderDepthImgae Whether to output the depth image. + * \param depthImage The depth image's output parameter. + * \param renderPointcloud Whether to output the point cloud. + * \param pointCloud The pointcloud's output parameter. + * \param zNear The near plane's distance. + * \param zFar The far plane's distance + * \param vertFov The fov in rad. (vertical) + * \param nanValue All values above this value will be mapped on this value (usually nan or 0) + * \return true on success + */ + static bool renderOffscreenRgbDepthPointcloud + (SoOffscreenRenderer* renderer, + RobotNodePtr camNode, SoNode* scene, + short width, short height, + bool renderRgbImage, std::vector<unsigned char>& rgbImage, + bool renderDepthImage, std::vector<float>& depthImage, + bool renderPointcloud, std::vector<Eigen::Vector3f>& pointCloud, + float zNear=10.f, float zFar=100000.f, float vertFov = M_PI/4, float nanValue = 0.0); + /*! * \brief Renders the given scene from the given cam position and outputs the rgb image, depth image and point cloud. * \param camNode The node of the robot that defines the position of the camera. Any node of the robot can host a camera.