/external/opencv3/doc/tutorials/viz/transformations/ |
transformations.markdown | 9 - How to use makeTransformToGlobal to compute pose 29 - Get camera pose from camera position, camera focal point and y direction. 34 /// We can get the pose of the cam using makeCameraPose 49 - Given the pose in camera coordinate system, estimate the global pose. 51 /// Pose of the widget in camera frame 53 /// Pose of the widget in global frame 67 - Visualize the cloud widget with the estimated global pose 72 - If the view point is set to be camera's, set viewer pose to **cam_pose**. 74 /// Set the viewer pose to that of camer [all...] |
/external/opencv3/doc/tutorials/viz/widget_pose/ |
widget_pose.markdown | 0 Pose of a widget {#tutorial_widget_pose} 10 - Use Affine3 to set pose of a widget 60 - Use Affine3f to set pose of the cube. 62 /// Construct pose 63 Affine3f pose(rot_mat, Vec3f(translation, translation, translation)); 64 myWindow.setWidgetPose("Cube Widget", pose);
|
/external/opencv3/modules/viz/include/opencv2/viz/ |
viz3d.hpp | 86 @param pose Pose of the widget. 88 void showWidget(const String &id, const Widget &widget, const Affine3d &pose = Affine3d::Identity()); 116 /** @brief Sets pose of a widget in the window. 118 @param id The id of the widget whose pose will be set. @param pose The new pose of the widget. 120 void setWidgetPose(const String &id, const Affine3d &pose); 122 /** @brief Updates pose of a widget in the window by pre-multiplying its current pose [all...] |
vizcore.hpp | 72 /** @brief Constructs camera pose from position, focal_point and up_vector (see gluLookAt() for more 79 This function returns pose of the camera in global coordinate frame. 156 CV_EXPORTS bool readPose(const String& file, Affine3d& pose, const String& tag = "pose"); 157 CV_EXPORTS void writePose(const String& file, const Affine3d& pose, const String& tag = "pose"); 160 CV_EXPORTS void writeTrajectory(InputArray traj, const String& files_format = "pose%05d.xml", int start = 0, const String& tag = "pose"); 163 CV_EXPORTS void readTrajectory(OutputArray traj, const String& files_format = "pose%05d.xml", int start = 0, int end = INT_MAX, const String& tag = "pose"); [all...] |
widgets.hpp | 183 /** @brief Sets pose of the widget. 185 @param pose The new pose of the widget. 187 void setPose(const Affine3d &pose); 188 /** @brief Updates pose of the widget by pre-multiplying its current pose. 190 @param pose The pose that the current pose of the widget will be pre-multiplied by. 192 void updatePose(const Affine3d &pose); [all...] |
/external/opencv3/modules/viz/test/ |
test_tutorial3.cpp | 20 /// We can get the pose of the cam using makeCameraPose 31 /// Pose of the widget in camera frame 33 /// Pose of the widget in global frame 44 /// Set the viewer pose to that of camera
|
test_tutorial2.cpp | 41 /// Construct pose 42 Affine3d pose(rot_vec, Vec3d(translation, translation, translation)); 44 myWindow.setWidgetPose("Cube Widget", pose);
|
tests_simple.cpp | 52 Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); local 57 viz.showWidget("dragon", WCloud(dragon_cloud, Color::bluberry()), pose); 70 Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); local 75 viz.showWidget("dragon", WCloud(dragon_cloud, colors), pose); 89 Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); local 93 viz.showWidget("dragon", WCloud(dragon_cloud), pose); 135 Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); local 139 viz.showWidget("mesh", WMesh(mesh), pose); 149 Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); local 153 viz.showWidget("mesh", WMesh(mesh), pose); 241 Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); local 256 Affine3d pose = Affine3d().rotate(Vec3d(0, 0.8, 0)); local 322 Vec3d pose = 5 * Vec3d(sin(3.14 + 2.7 + i*60 * CV_PI\/180), 0.4 - i*0.3, cos(3.14 + 2.7 + i*60 * CV_PI\/180)); local [all...] |
/hardware/qcom/msm8996/original-kernel-headers/media/ |
msm_fd.h | 20 * enum msm_fd_pose - Face pose. 32 * @pose: refer to enum msm_fd_pose. 39 __u32 pose; member in struct:msm_fd_face_data
|
/cts/tests/tests/media/src/android/media/cts/ |
FaceDetector_FaceTest.java | 66 face.pose(FaceDetector.Face.EULER_X); 67 face.pose(FaceDetector.Face.EULER_Y); 68 face.pose(FaceDetector.Face.EULER_Z); 72 face.pose(ErrorEuler);
|
/external/opencv3/doc/tutorials/viz/ |
table_of_content_viz.markdown | 18 You will learn how to change pose of a widget.
|
/external/opencv3/samples/cpp/tutorial_code/viz/ |
widget_pose.cpp | 3 * @brief Setting pose of a widget 70 /// Construct pose 71 Affine3f pose(rot_mat, Vec3f(translation, translation, translation)); 73 myWindow.setWidgetPose("Cube Widget", pose);
|
transformations.cpp | 22 << "This program shows how to use makeTransformToGlobal() to compute required pose," 76 /// We can get the pose of the cam using makeCameraPose 87 /// Pose of the widget in camera frame 89 /// Pose of the widget in global frame 104 /// Set the viewer pose to that of camera
|
/hardware/qcom/msm8994/original-kernel-headers/media/ |
msm_fd.h | 30 * enum msm_fd_pose - Face pose. 42 * @pose: refer to enum msm_fd_pose. 49 __u32 pose; member in struct:msm_fd_face_data
|
/hardware/qcom/msm8x84/original-kernel-headers/media/ |
msm_fd.h | 30 * enum msm_fd_pose - Face pose. 42 * @pose: refer to enum msm_fd_pose. 49 __u32 pose; member in struct:msm_fd_face_data
|
/external/opencv3/modules/viz/src/ |
vizimpl.hpp | 66 void showWidget(const String &id, const Widget &widget, const Affine3d &pose = Affine3d::Identity()); 73 void setWidgetPose(const String &id, const Affine3d &pose); 74 void updateWidgetPose(const String &id, const Affine3d &pose); 86 void setViewerPose(const Affine3d &pose);
|
viz3d.cpp | 112 void cv::viz::Viz3d::showWidget(const String &id, const Widget &widget, const Affine3d &pose) { impl_->showWidget(id, widget, pose); } 119 void cv::viz::Viz3d::setWidgetPose(const String &id, const Affine3d &pose) { impl_->setWidgetPose(id, pose); } 120 void cv::viz::Viz3d::updateWidgetPose(const String &id, const Affine3d &pose) { impl_->updateWidgetPose(id, pose); } 125 void cv::viz::Viz3d::setViewerPose(const Affine3d &pose) { impl_->setViewerPose(pose); }
|
vizcore.cpp | 248 bool cv::viz::readPose(const String& file, Affine3d& pose, const String& tag) 254 Mat hdr(pose.matrix, false); 256 if (hdr.empty() || hdr.cols != pose.matrix.cols || hdr.rows != pose.matrix.rows) 259 hdr.convertTo(pose.matrix, CV_64F); 263 void cv::viz::writePose(const String& file, const Affine3d& pose, const String& tag) 266 fs << tag << Mat(pose.matrix, false); 304 Mat pose = v[i]; local 305 CV_Assert(pose.type() == CV_32FC(16) || pose.type() == CV_64FC(16)) [all...] |
/external/ceres-solver/examples/ |
robot_pose_mle.cc | 39 // how to compute the maximum likelihood estimate (MLE) of the robot's pose at 45 // "--pose_separation" flag, at which pose it receives relative odometry 53 // between successive pose estimatess of the robot. 55 // range readings from each pose. 65 // global pose of the robot, and then computes the expected range reading. 72 // readings, and the range and odometry errors for every pose of the robot. 85 // Odometry u_i is the observed relative odometry between pose p_(i-1) and p_i, 87 // pose p_i. Both odometry as well as range readings are noisy, but we wish to 103 // terms, two for each pose, where for each pose term there is one term for th [all...] |
/external/opencv3/doc/tutorials/calib3d/camera_calibration_square_chess/ |
camera_calibration_square_chess.markdown | 14 Pose estimation 42 - Now we are ready to find chessboard pose by running \`solvePnP\`: :
|
/external/opencv3/doc/tutorials/calib3d/ |
table_of_content_calib3d.markdown | 31 Real time pose estimation of a textured object using ORB features, FlannBased matcher, PnP
|
/external/opencv3/doc/tutorials/calib3d/real_time_pose/ |
real_time_pose.markdown | 1 Real Time pose estimation of a textured object {#tutorial_real_time_pose} 5 The most elemental problem in augmented reality is the estimation of the camera pose respect of an 7 obtain an object pose in order to grasp it and do some manipulation. However, this is not a trivial 15 In this tutorial is explained how to build a real time application to estimate the camera pose in 25 - Pose estimation using PnP + Ransac. 31 In computer vision estimate the camera pose from *n* 3D-to-2D point correspondences is a fundamental 33 degrees of freedom of the pose and five calibration parameters: focal length, principal point, 45 reference frame, and their 2D projections \f$u_i\f$ onto the image, we seek to retrieve the pose (\f$R\f$ 89 The aim of this application is estimate in real time the object pose given its 3D textured model. 376 -# **Pose estimation using PnP + Ransac* [all...] |
/hardware/qcom/msm8994/kernel-headers/media/ |
msm_fd.h | 39 __u32 pose; member in struct:msm_fd_face_data
|
/hardware/qcom/msm8996/kernel-headers/media/ |
msm_fd.h | 40 __u32 pose; member in struct:msm_fd_face_data
|
/hardware/qcom/msm8x84/kernel-headers/media/ |
msm_fd.h | 39 __u32 pose; member in struct:msm_fd_face_data
|