diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 8e07bce7c4..cc509ca951 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -10,7 +10,7 @@ jobs: build-xenial: - runs-on: ubuntu-16.04 + runs-on: ubuntu-18.04 steps: - uses: actions/checkout@v2 @@ -19,10 +19,16 @@ jobs: run: git submodule update --init --depth 1 description/media - name: Build code for Ubuntu 16 - run: docker build . -f ./scripts/docker/astrobee_xenial.Dockerfile -t astrobee/astrobee:latest-xenial + run: docker build . -f ./scripts/docker/astrobee.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t astrobee/astrobee:latest-ubuntu16.04 - name: Test code - run: docker build . -f ./scripts/docker/test_astrobee_xenial.Dockerfile -t test + run: docker build . -f ./scripts/docker/test_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + -t astrobee/astrobee:test-ubuntu16.04 build-bionic: @@ -35,10 +41,16 @@ jobs: run: git submodule update --init --depth 1 description/media - name: Build code for Ubuntu 18 - run: docker build . -f ./scripts/docker/astrobee_bionic.Dockerfile -t astrobee/astrobee:latest-bionic + run: docker build . -f ./scripts/docker/astrobee.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON=3 + -t astrobee/astrobee:latest-ubuntu18.04 - name: Test code - run: docker build . -f ./scripts/docker/test_astrobee_bionic.Dockerfile -t test + run: docker build . -f ./scripts/docker/test_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + -t astrobee/astrobee:test-ubuntu18.04 build-focal: @@ -51,7 +63,13 @@ jobs: run: git submodule update --init --depth 1 description/media - name: Build code for Ubuntu 20 - run: docker build . -f ./scripts/docker/astrobee_focal.Dockerfile -t astrobee/astrobee:latest-focal + run: docker build . -f ./scripts/docker/astrobee.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + -t astrobee/astrobee:latest-ubuntu20.04 - name: Test code - run: docker build . -f ./scripts/docker/test_astrobee_focal.Dockerfile -t test + run: docker build . -f ./scripts/docker/test_astrobee.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + -t astrobee/astrobee:test-ubuntu20.04 diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 89d721681b..9606331444 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -5,10 +5,27 @@ name: Check for lint errors on: ['push', 'pull_request'] jobs: - lint_check: + lint_check_cpp: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Check repo for lint errors run: ./scripts/git/cpplint_repo.py . + + lint_check_python: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Install linters + run: | + pip install black + pip install isort + + - name: Run black + run: | + black . --diff --extend-exclude cmake --check + - name: Run isort + run: | + isort . --diff --extend-skip cmake --profile black --check-only diff --git a/CMakeLists.txt b/CMakeLists.txt index 70401e7a59..7a4ce49830 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -18,7 +18,7 @@ cmake_minimum_required(VERSION 3.0) project(Astrobee) -set(ASTROBEE_VERSION 0.15.1) +set(ASTROBEE_VERSION 0.15.2) # Define our options option(USE_CCACHE diff --git a/README.md b/README.md index c408a72052..5f55dc4d68 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ due to NASA legal requirements. Thank you for your understanding. [Extensive documentation is auto-generated from the contents of this repository.](https://nasa.github.io/astrobee/documentation.html) -["A Brief Guide to Astrobee’s Flight Software"](https://github.com/albee/a-brief-guide-to-astrobee/raw/master/a_brief_guide_to_astrobee_v1.0.pdf) is a good tutorial, with a particular emphasis on the advanced topic of modifying Astrobee's flight software to enable Guidance, Navigation, & Control (GN&C) research. (Note that most guest science can be implemented as an app that uses the [Astrobee Command API](https://nasa.github.io/astrobee/html/command_dictionary.html) without modifying the flight software.) +["A Brief Guide to Astrobee’s Flight Software"](https://github.com/albee/a-brief-guide-to-astrobee/raw/master/a_brief_guide_to_astrobee_latest.pdf) is a good tutorial, with a particular emphasis on the advanced topic of modifying Astrobee's flight software to enable Guidance, Navigation, & Control (GN&C) research. (Note that most guest science can be implemented as an app that uses the [Astrobee Command API](https://nasa.github.io/astrobee/html/command_dictionary.html) without modifying the flight software.) For more information, read [Astrobee-related publications](https://www.nasa.gov/content/research-publications-0). Learning about the Astrobee [platform](https://www.nasa.gov/sites/default/files/atoms/files/bualat_spaceops_2018_paper.pdf), diff --git a/RELEASE.md b/RELEASE.md index 3e73f61d77..ceec6c1aaa 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,5 +1,13 @@ # Releases +## Release 0.15.2 + + * added python linters black and isort in the CI pipeline + * added perching arm reconnect service + * added nav and dock cam bayer image output option + * added code to report time for llp, mlp, hlp + * added end of motion check + ## Release 0.15.1 * Separated config files for DDS communications to avoid conflict diff --git a/astrobee.doxyfile b/astrobee.doxyfile index 94b4990f9e..4d830446a7 100644 --- a/astrobee.doxyfile +++ b/astrobee.doxyfile @@ -39,7 +39,7 @@ PROJECT_NAME = "NASA Astrobee Robot Software" # control system is used. -PROJECT_NUMBER = 0.15.1 +PROJECT_NUMBER = 0.15.2 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a @@ -2392,7 +2392,7 @@ DOT_PATH = # command). # This tag requires that the tag HAVE_DOT is set to YES. -DOTFILE_DIRS = behaviors mobility +DOTFILE_DIRS = behaviors mobility gnc # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the \mscfile diff --git a/astrobee/config/cameras.config b/astrobee/config/cameras.config index 3e360272f8..05ac92d952 100644 --- a/astrobee/config/cameras.config +++ b/astrobee/config/cameras.config @@ -17,6 +17,13 @@ require "context" +-- bayer_enable: Set to true to enable publishing raw Bayer images (can +-- be converted to RGB). May incur significant I/O overhead. + +-- bayer_throttle_ratio: Set to n to publish 1 raw Bayer image for +-- every n images grabbed. With n = 1, every image is +-- published. Larger n reduces I/O overhead. + nav_cam = { width = 1280, height = 960, @@ -28,7 +35,9 @@ nav_cam = { gain = robot_camera_calibrations.nav_cam.gain, exposure = robot_camera_calibrations.nav_cam.exposure, calibration_gain = 105, - calibration_exposure = 30 + calibration_exposure = 30, + bayer_enable = false, + bayer_throttle_ratio = 3 }; dock_cam = { @@ -42,7 +51,9 @@ dock_cam = { gain= robot_camera_calibrations.dock_cam.gain, exposure= robot_camera_calibrations.dock_cam.exposure, calibration_gain = 105, - calibration_exposure = 30 + calibration_exposure = 30, + bayer_enable = false, + bayer_throttle_ratio = 3 }; -- The haz and perch cam params below are ignored for the time being. diff --git a/astrobee/config/communications/dds_ros_bridge.config b/astrobee/config/communications/dds_ros_bridge.config index ad2aa479d7..69b0f69c61 100644 --- a/astrobee/config/communications/dds_ros_bridge.config +++ b/astrobee/config/communications/dds_ros_bridge.config @@ -178,6 +178,10 @@ use_RTRT = true pub_topic_RTRT = "" +-- ros_zones_rapid_compressed_file => RZRCF -- +use_RZRCF = true +pub_topic_RZRCF = "-current_zones" + comm_status_rate = 1 cpu_state_rate = 0 disk_state_rate = 1 diff --git a/astrobee/config/management/data_bagger.config b/astrobee/config/management/data_bagger.config index f68b3e38c0..e8d52e25b3 100644 --- a/astrobee/config/management/data_bagger.config +++ b/astrobee/config/management/data_bagger.config @@ -50,7 +50,10 @@ default_topics = {{topic="gnc/ctl/traj", downlink="immediate", frequency=-1}, {topic="mob/state", downlink="immediate", frequency=-1}, {topic="command", downlink="immediate", frequency=-1}, {topic="mgt/ack", downlink="immediate", frequency=-1}, - {topic="mgt/sys_monitor/time_diff", downlink="immediate", frequency=-1}, + {topic="mgt/sys_monitor/time_sync", downlink="immediate", frequency=-1}, + {topic="gs/gs_manager/ack", downlink="immediate", frequency=-1}, + {topic="gs/gs_manager/config", downlink="immediate", frequency=-1}, + {topic="gs/gs_manager/state", downlink="immediate", frequency=-1}, {topic="rosout", downlink="immediate", frequency=-1}, {topic="robot_name", downlink="immediate", frequency=-1}} diff --git a/astrobee/config/mobility/choreographer.config b/astrobee/config/mobility/choreographer.config index 68e66158dc..c83d965af5 100644 --- a/astrobee/config/mobility/choreographer.config +++ b/astrobee/config/mobility/choreographer.config @@ -230,7 +230,7 @@ parameters = { description = "Allow new zones to overwrite the zone file" },{ id = "tolerance_max_time", reconfigurable = false, type = "double", - default = 1.0, unit = "seconds", + default = 3.0, unit = "seconds", description = "Maximum time a tolerance is acceptable" } } diff --git a/astrobee/config/mobility/mapper.config b/astrobee/config/mobility/mapper.config index c2e13cc38b..02e194d72a 100644 --- a/astrobee/config/mobility/mapper.config +++ b/astrobee/config/mobility/mapper.config @@ -75,14 +75,23 @@ parameters = { unit = "seconds", description = "How long the octomap remembers the fading memory map. It remembers forever when this variable is <= 0." },{ - id = "inflate_radius", + id = "robot_radius", reconfigurable = true, type = "double", - default = 0.25, + default = 0.28, + min = 0.0, + max = 1.0, + description = "Radius of the robot considered in the planner (default is 0.16 * sqrt(3))", + unit = "m" + },{ + id = "collision_distance", + reconfigurable = true, + type = "double", + default = 0.02, min = 0.01, max = 1, unit = "meters", - description = "How much the octomap is inflated." + description = "Minimum distance margin to maintain away from obstacles." },{ id = "cam_fov", reconfigurable = true, diff --git a/astrobee/config/mobility/planner_qp.config b/astrobee/config/mobility/planner_qp.config index f01933dc4b..6899861617 100644 --- a/astrobee/config/mobility/planner_qp.config +++ b/astrobee/config/mobility/planner_qp.config @@ -35,15 +35,6 @@ parameters = { max = 100, description = "Rate at which control state commands are created in plan", unit = "Hz" - }, { - id = "robot_radius", - reconfigurable = true, - type = "double", - default = 0.28, - min = 0.0, - max = 1.0, - description = "Radius of the robot considered in the planner (default is 0.16 * sqrt(3))", - unit = "m" }, { id = "two_d", reconfigurable = true, @@ -92,5 +83,14 @@ parameters = { max = 10.0, description = "loop time for trajectory optimization replay", unit = "seconds" + }, { + id = "planner_distance", + reconfigurable = true, + type = "double", + default = 0.08, + min = 0.0, + max = 10.0, + description = "extra planner keepout distance, min is map tolerance", + unit = "seconds" } } diff --git a/astrobee/config/worlds/granite.config b/astrobee/config/worlds/granite.config index 65895a872c..c5f43bf03a 100755 --- a/astrobee/config/worlds/granite.config +++ b/astrobee/config/worlds/granite.config @@ -110,6 +110,7 @@ world_flight_modes = { speed = 0; -- Tolerances + tolerance_pos_endpoint = 0.05; -- Endpoint position (5 cm) tolerance_pos = 0.1; -- Position (10 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.1745; -- Attitude (10 degrees) @@ -151,6 +152,7 @@ world_flight_modes = { speed = 2; -- Tolerances + tolerance_pos_endpoint = 0.05; -- Endpoint position (5 cm) tolerance_pos = 0.2; -- Position (20 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.3491; -- Attitude (20 degrees) @@ -192,6 +194,7 @@ world_flight_modes = { speed = 3; -- Tolerances + tolerance_pos_endpoint = 0.1; -- Endpoint position (5 cm) tolerance_pos = 0.2; -- Position (20 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.3490; -- Attitude (20 degrees) @@ -233,6 +236,7 @@ world_flight_modes = { speed = 1; -- Tolerances + tolerance_pos_endpoint = 0.05; -- Endpoint position (5 cm) tolerance_pos = 0.1; -- Position (10 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.1745; -- Attitude (10 degrees) @@ -273,6 +277,7 @@ world_flight_modes = { speed = 3; -- Tolerances + tolerance_pos_endpoint = 0.05; -- Endpoint position (5 cm) tolerance_pos = 0.1; -- Position (10 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.1745; -- Attitude (10 degrees) diff --git a/astrobee/config/worlds/iss.config b/astrobee/config/worlds/iss.config index 75c05ff4d6..38a9dbf6d9 100755 --- a/astrobee/config/worlds/iss.config +++ b/astrobee/config/worlds/iss.config @@ -120,6 +120,7 @@ world_flight_modes = { speed = 0; -- Tolerances + tolerance_pos_endpoint = 0.05; -- Endpoint position (5 cm) tolerance_pos = 0.1; -- Position (10 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.1745; -- Attitude (10 degrees) @@ -160,7 +161,8 @@ world_flight_modes = { speed = 2; -- Tolerances - tolerance_pos = 0.3; -- Position (30 cm) + tolerance_pos_endpoint = 0.05; -- Endpoint position (5 cm) + tolerance_pos = 0.25; -- Position (25 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.3491; -- Attitude (20 degrees) tolerance_omega = 0; -- Omega (disabled) @@ -200,6 +202,7 @@ world_flight_modes = { speed = 3; -- Tolerances + tolerance_pos_endpoint = 0.1; -- Endpoint position (5 cm) tolerance_pos = 0.2; -- Position (20 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.3490; -- Attitude (20 degrees) @@ -240,6 +243,7 @@ world_flight_modes = { speed = 1; -- Tolerances + tolerance_pos_endpoint = 0.05; -- Endpoint position (5 cm) tolerance_pos = 0.1; -- Position (10 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.1745; -- Attitude (10 degrees) @@ -280,6 +284,7 @@ world_flight_modes = { speed = 3; -- Tolerances + tolerance_pos_endpoint = 0.05; -- Endpoint position (5 cm) tolerance_pos = 0.1; -- Position (10 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.1745; -- Attitude (10 degrees) diff --git a/astrobee/config/worlds/mgtf.config b/astrobee/config/worlds/mgtf.config index 14536ad9d7..18307f5569 100644 --- a/astrobee/config/worlds/mgtf.config +++ b/astrobee/config/worlds/mgtf.config @@ -103,6 +103,7 @@ world_flight_modes = { speed_gain = 0; -- Tolerances + tolerance_pos_endpoint = 0.05; -- Endpoint position (5 cm) tolerance_pos = 0.1; -- Position (10 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.1745; -- Attitude (10 degrees) @@ -143,6 +144,7 @@ world_flight_modes = { speed = 2; -- Tolerances + tolerance_pos_endpoint = 0.05; -- Endpoint position (5 cm) tolerance_pos = 0.1; -- Position (10 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.1745; -- Attitude (10 degrees) @@ -183,6 +185,7 @@ world_flight_modes = { speed = 3; -- Tolerances + tolerance_pos_endpoint = 0.1; -- Endpoint position (5 cm) tolerance_pos = 0.2; -- Position (20 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.3490; -- Attitude (20 degrees) @@ -223,6 +226,7 @@ world_flight_modes = { speed = 1; -- Tolerances + tolerance_pos_endpoint = 0.05; -- Endpoint position (5 cm) tolerance_pos = 0.1; -- Position (10 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.1745; -- Attitude (10 degrees) @@ -263,6 +267,7 @@ world_flight_modes = { speed = 2; -- Tolerances + tolerance_pos_endpoint = 0.05; -- Endpoint position (5 cm) tolerance_pos = 0.1; -- Position (10 cm) tolerance_vel = 0; -- Velocity (disabled) tolerance_att = 0.1745; -- Attitude (10 degrees) diff --git a/behaviors/perch/src/perch_nodelet.cc b/behaviors/perch/src/perch_nodelet.cc index 1eb97d75e9..104d042935 100644 --- a/behaviors/perch/src/perch_nodelet.cc +++ b/behaviors/perch/src/perch_nodelet.cc @@ -201,7 +201,7 @@ class PerchNodelet : public ff_util::FreeFlyerNodelet { // [11] - If we successfully stopped, we switch to perch localization (future work) fsm_.Add(STATE::PERCHING_WAITING_FOR_SPIN_DOWN, MOTION_SUCCESS, [this](FSM::Event const& event) -> FSM::State { - Switch(LOCALIZATION_PERCH); + Switch(LOCALIZATION_MAPPED_LANDMARKS); return STATE::PERCHING_SWITCHING_TO_PL_LOC; }); // [12] - With all steps done, we conclude we are perched. @@ -825,7 +825,6 @@ class PerchNodelet : public ff_util::FreeFlyerNodelet { // Move to the approach pose. case APPROACH_POSE: msg.header.frame_id = platform_name_ + "handrail/approach"; - ROS_ERROR_STREAM("transform " << msg.header.frame_id); goal.states.push_back(msg); break; // Move to the recovery pose. This option is currently used to move to the diff --git a/communications/dds_ros_bridge/CMakeLists.txt b/communications/dds_ros_bridge/CMakeLists.txt index a853b347ff..4e19586103 100644 --- a/communications/dds_ros_bridge/CMakeLists.txt +++ b/communications/dds_ros_bridge/CMakeLists.txt @@ -23,15 +23,16 @@ catkin_package( set(DEPS ff_msgs - ) +) set(LIBS + ${Boost_IOSTREAMS_LIBRARY} config_reader ff_nodelet Qt4::QtXml rapidIo rapidExtAstrobee - ) +) set(INCLUDES ${CMAKE_CURRENT_SOURCE_DIR}/include diff --git a/communications/dds_ros_bridge/include/dds_ros_bridge/dds_ros_bridge.h b/communications/dds_ros_bridge/include/dds_ros_bridge/dds_ros_bridge.h index fb814c7483..1675d64eaf 100644 --- a/communications/dds_ros_bridge/include/dds_ros_bridge/dds_ros_bridge.h +++ b/communications/dds_ros_bridge/include/dds_ros_bridge/dds_ros_bridge.h @@ -61,6 +61,7 @@ #include "dds_ros_bridge/ros_string_rapid_text_message.h" #include "dds_ros_bridge/ros_sub_rapid_pub.h" #include "dds_ros_bridge/ros_telemetry_rapid_telemetry.h" +#include "dds_ros_bridge/ros_zones_rapid_compressed_file.h" #include "ff_msgs/SetRate.h" @@ -174,6 +175,9 @@ class DdsRosBridge : public ff_util::FreeFlyerNodelet { bool BuildStringToTextMessage(const std::string& name); bool BuildTelemetryToRapid(const std::string& sub_topic, const std::string& name); + bool BuildZonesToRapidCompressedFile(const std::string& sub_topic, + const std::string& zones_srv, + const std::string& name); /** * Build Rapid subscribers to Ros publishers diff --git a/communications/dds_ros_bridge/include/dds_ros_bridge/ros_zones_rapid_compressed_file.h b/communications/dds_ros_bridge/include/dds_ros_bridge/ros_zones_rapid_compressed_file.h new file mode 100644 index 0000000000..4cc7c52efa --- /dev/null +++ b/communications/dds_ros_bridge/include/dds_ros_bridge/ros_zones_rapid_compressed_file.h @@ -0,0 +1,73 @@ +/* Copyright (c) 2017, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The Astrobee platform is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +#ifndef DDS_ROS_BRIDGE_ROS_ZONES_RAPID_COMPRESSED_FILE_H_ +#define DDS_ROS_BRIDGE_ROS_ZONES_RAPID_COMPRESSED_FILE_H_ + +#include +#include +#include +#include +#include +#include + +#include "boost/iostreams/copy.hpp" +#include "boost/iostreams/device/back_inserter.hpp" +#include "boost/iostreams/device/array.hpp" +#include "boost/iostreams/filter/zlib.hpp" +#include "boost/iostreams/filtering_stream.hpp" + +#include "dds_ros_bridge/ros_sub_rapid_pub.h" +#include "dds_ros_bridge/util.h" + +#include "ff_msgs/GetZones.h" +#include "ff_msgs/Zone.h" + +#include "visualization_msgs/MarkerArray.h" + +#include "knDds/DdsTypedSupplier.h" + +#include "rapidUtil/RapidHelper.h" + +#include "AstrobeeConstants.h" +#include "CompressedFileSupport.h" + +namespace ff { + +class RosZonesToRapidCompressedFile : public RosSubRapidPub { + public: + RosZonesToRapidCompressedFile(const std::string& subscribe_topic, + const std::string& get_zones_srv, + const std::string& pub_topic, + const ros::NodeHandle &nh, + const unsigned int queue_size = 10); + + void Callback(visualization_msgs::MarkerArray::ConstPtr const& zones); + + private: + using Supplier = kn::DdsTypedSupplier; + using SupplierPtr = std::unique_ptr; + + SupplierPtr file_supplier_; + + ros::ServiceClient get_zones_client_; +}; + +} // end namespace ff + +#endif // DDS_ROS_BRIDGE_ROS_ZONES_RAPID_COMPRESSED_FILE_H_ diff --git a/communications/dds_ros_bridge/src/dds_ros_bridge.cc b/communications/dds_ros_bridge/src/dds_ros_bridge.cc index 45788de2d9..0ed11d99d1 100644 --- a/communications/dds_ros_bridge/src/dds_ros_bridge.cc +++ b/communications/dds_ros_bridge/src/dds_ros_bridge.cc @@ -658,6 +658,29 @@ bool DdsRosBridge::BuildTelemetryToRapid(const std::string& sub_topic, return true; } +bool DdsRosBridge::BuildZonesToRapidCompressedFile(const std::string& sub_topic, + const std::string& zones_srv, + const std::string& name) { + std::string pub_topic; + bool use; + + if (ReadTopicInfo(name, "pub", pub_topic, use)) { + if (use) { + ff::RosSubRapidPubPtr zones_to_compressed_file( + new ff::RosZonesToRapidCompressedFile(sub_topic, + zones_srv, + pub_topic, + nh_)); + components_++; + ros_sub_rapid_pubs_[name] = zones_to_compressed_file; + } + } else { + return false; + } + + return true; +} + bool DdsRosBridge::BuildCommandToCommand(const std::string& pub_topic, const std::string& name) { std::string sub_topic; @@ -1362,6 +1385,13 @@ bool DdsRosBridge::ReadParams() { return false; } + // ros_zones_rapid_compressed_file => RZRCF + if (!BuildZonesToRapidCompressedFile(TOPIC_MOBILITY_ZONES, + SERVICE_MOBILITY_GET_ZONES, + "RZRCF")) { + return false; + } + // Read and set the publishing rates if (!ReadRateParams()) { exit(EXIT_FAILURE); diff --git a/communications/dds_ros_bridge/src/ros_zones_rapid_compressed_file.cc b/communications/dds_ros_bridge/src/ros_zones_rapid_compressed_file.cc new file mode 100644 index 0000000000..fece24d4e2 --- /dev/null +++ b/communications/dds_ros_bridge/src/ros_zones_rapid_compressed_file.cc @@ -0,0 +1,180 @@ +/* Copyright (c) 2017, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The Astrobee platform is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +#include "dds_ros_bridge/ros_zones_rapid_compressed_file.h" + +ff::RosZonesToRapidCompressedFile::RosZonesToRapidCompressedFile( + const std::string& subscribe_topic, + const std::string& get_zones_srv, + const std::string& pub_topic, + const ros::NodeHandle &nh, + const unsigned int queue_size) + : RosSubRapidPub(subscribe_topic, pub_topic, nh, queue_size) { + file_supplier_.reset( + new ff::RosZonesToRapidCompressedFile::Supplier( + rapid::ext::astrobee::COMPRESSED_FILE_TOPIC + pub_topic, + "", "AstrobeeCurrentCompressedPlanProfile", "")); + + sub_ = nh_.subscribe(subscribe_topic, + queue_size, + &RosZonesToRapidCompressedFile::Callback, + this); + + get_zones_client_ = nh_.serviceClient(get_zones_srv); + + rapid::RapidHelper::initHeader(file_supplier_->event().hdr); +} + +void ff::RosZonesToRapidCompressedFile::Callback( + visualization_msgs::MarkerArray::ConstPtr const& zones) { + // The zones marker array seems to only be used for visualization and the data + // in the message isn't in the right format to send to the ground. However the + // message tells the bridge that the zones were loaded/changed and that it + // needs to query the choreographer for the zones. + ff_msgs::GetZones get_zones_srv; + + // Check to make sure the get zones service is running. If it isn't, all we + // can do is output the error. + if (!get_zones_client_.exists()) { + ROS_ERROR("Get zones service isn't running! Choreographer may have died!"); + return; + } + + // Check to make sure the service call worked + if (!get_zones_client_.call(get_zones_srv)) { + ROS_ERROR("Get zones service call returned false."); + return; + } + + // Sort zones first. The zones file that comes from GDS can contain multiple + // sets of keepins and keepouts and each keepin and keepout can contain + // multiple boxes. Thus we need to find all the keepins and keepouts that have + // the same name and we need to sort the boxes by their index. Since this code + // will run very infrequently, I'm okay with the sort taking time. + std::map> sorted_zones; + std::vector::iterator zone_it, zone_begin, zone_end, sort_it; + + zone_begin = get_zones_srv.response.zones.begin(); + zone_end = get_zones_srv.response.zones.end(); + for (zone_it = zone_begin; zone_it != zone_end; zone_it++) { + // Check to see if the zone is already in the map + if (sorted_zones.count(zone_it->name) > 0) { + sort_it = sorted_zones[zone_it->name].begin(); + // Use box index to insert the zone into the correct location in the + // vector + sorted_zones[zone_it->name].insert((sort_it + zone_it->index), *zone_it); + } else { + sorted_zones[zone_it->name].push_back(*zone_it); + } + } + + // Convert zones to json string/file + std::string zones_file_content = "{\n"; + + // Put timestamp in file content + // Convert timestamp from seconds to milliseconds + uint64_t time_milli = (uint64_t)get_zones_srv.response.timestamp.sec * 1000; + time_milli += (uint64_t)get_zones_srv.response.timestamp.nsec / 1000000; + zones_file_content += "\t\"timestamp\" : "; + zones_file_content += std::to_string(time_milli); + zones_file_content += "\",\n"; + zones_file_content += "\t\"zones\" : ["; + + std::map>::iterator map_it; + for (map_it = sorted_zones.begin(); map_it != sorted_zones.end(); map_it++) { + zones_file_content += " {\n\t\t\"sequence\": ["; + for (sort_it = map_it->second.begin(); + sort_it != map_it->second.end(); + sort_it++) { + zones_file_content += " [ "; + zones_file_content += std::to_string(sort_it->min.x) + ", "; + zones_file_content += std::to_string(sort_it->min.y) + ", "; + zones_file_content += std::to_string(sort_it->min.z) + ", "; + zones_file_content += std::to_string(sort_it->max.x) + ", "; + zones_file_content += std::to_string(sort_it->max.y) + ", "; + zones_file_content += std::to_string(sort_it->max.z); + zones_file_content += " ],"; + } + + // Remove comma + zones_file_content.pop_back(); + zones_file_content += " ], \n\t\t\"safe\" : "; + // The type and name will be the same for all zones so we can just use the + // first one. Also the type clutter is never used in the GDS zones so I + // don't think it is being used but we will mark it as a keepout just in + // case it is + if (map_it->second.begin()->type == ff_msgs::Zone::KEEPOUT || + map_it->second.begin()->type == ff_msgs::Zone::CLUTTER) { + zones_file_content += "false,\n"; + } else if (map_it->second.begin()->type == ff_msgs::Zone::KEEPIN) { + zones_file_content += "true,\n"; + } else { + ROS_ERROR("DDS ROS bridge: Unknown zone type in zone to compressed file"); + return; + } + + // Add zone name + zones_file_content += "\t\t\"name\" : \"" + map_it->second.begin()->name; + zones_file_content += "\"\n\t},"; + } + + // Remove comma + zones_file_content.pop_back(); + zones_file_content += " ]\n}"; + + // Fill rapid compressed file message + rapid::ext::astrobee::CompressedFile &msg = file_supplier_->event(); + msg.hdr.timeStamp = util::RosTime2RapidTime(get_zones_srv.response.timestamp); + + // Leave the id blank. This is only used when GDS sends a compressed file to + // Astrobee. + + // Set compression type + msg.compressionType = rapid::ext::astrobee::COMPRESSION_TYPE_DEFLATE; + + // Set data + // First compress the data + boost::iostreams::filtering_ostream out; + boost::iostreams::basic_array_source source(zones_file_content.c_str(), + zones_file_content.size()); + + std::vector data; + // This is the max size the compressed file can be in the rapid message + uintmax_t max_cf_size = 128 * 1024; + data.reserve(max_cf_size); + + out.push(boost::iostreams::zlib_compressor()); + out.push(boost::iostreams::back_inserter(data)); + + boost::iostreams::copy(source, out); + + if (data.size() >= max_cf_size) { + ROS_ERROR_STREAM("DDS ROS Bridge: The compressed file for the zones is " << + "too big to send to the ground."); + return; + } + + // Next set the data in the rapid message + msg.compressedFile.ensure_length(data.size(), data.size()); + unsigned char* buf = msg.compressedFile.get_contiguous_buffer(); + for (unsigned int i = 0; i < data.size(); i++) { + buf[i] = data[i]; + } + + file_supplier_->sendEvent(); +} diff --git a/communications/ff_msgs/action/Motion.action b/communications/ff_msgs/action/Motion.action index 2810474325..80879bc088 100644 --- a/communications/ff_msgs/action/Motion.action +++ b/communications/ff_msgs/action/Motion.action @@ -45,47 +45,48 @@ string reference_frame # Motion result int32 response # Motion action response -int32 ALREADY_THERE = 2 # MOVE: We are already at the location -int32 SUCCESS = 1 # ALL: Motion succeeded -int32 PREEMPTED = 0 # ALL: Motion preempted by thirdparty -int32 PLAN_FAILED = -1 # MOVE/EXEC: Plan/bootstrap failed -int32 VALIDATE_FAILED = -2 # MOVE/EXEC: No comms with mapper -int32 PMC_FAILED = -3 # MOVE/EXEC: PMC failed -int32 CONTROL_FAILED = -4 # ALL: Control failed -int32 OBSTACLE_DETECTED = -5 # ALL: Obstacle / replan disabled -int32 REPLAN_NOT_ENOUGH_TIME = -6 # MOVE/EXEC: Not enough time to replan -int32 REPLAN_FAILED = -7 # MOVE/EXEC: Replanning failed -int32 REVALIDATE_FAILED = -8 # MOVE/EXEC: Revalidating failed -int32 NOT_IN_WAITING_MODE = -9 # ALL: Internal failure -int32 INVALID_FLIGHT_MODE = -10 # ALL: No flight mode specified -int32 UNEXPECTED_EMPTY_SEGMENT = -11 # EXEC: Segment empty -int32 COULD_NOT_RESAMPLE = -12 # EXEC: Could not resample segment -int32 UNEXPECTED_EMPTY_STATES = -13 # MOVE: State vector empty -int32 INVALID_COMMAND = -14 # Command rejected -int32 CANNOT_QUERY_ROBOT_POSE = -15 # TF2 failed to find the current pose -int32 NOT_ON_FIRST_POSE = -16 # EXEC: Not on first pose of exec -int32 BAD_DESIRED_VELOCITY = -17 # Requested vel too high -int32 BAD_DESIRED_ACCELERATION = -18 # Requested accel too high -int32 BAD_DESIRED_OMEGA = -19 # Requested omega too high -int32 BAD_DESIRED_ALPHA = -20 # Requested alpha too high -int32 BAD_DESIRED_RATE = -21 # Requested rate too low -int32 TOLERANCE_VIOLATION_POSITION = -22 # Position tolerance violated -int32 TOLERANCE_VIOLATION_ATTITUDE = -23 # Attitude tolerance violated -int32 TOLERANCE_VIOLATION_VELOCITY = -24 # Velocity tolerance violated -int32 TOLERANCE_VIOLATION_OMEGA = -25 # Omega tolerance violated -int32 VIOLATES_RESAMPLING = -26 # Validation: could not resample@10Hz -int32 VIOLATES_KEEP_OUT = -27 # Validation: Keep out violation -int32 VIOLATES_KEEP_IN = -28 # Validation: Keep in violation -int32 VIOLATES_MINIMUM_FREQUENCY = -29 # Validation: Sample frequency too low -int32 VIOLATES_STATIONARY_ENDPOINT = -30 # Validation: Last setpoint not static -int32 VIOLATES_FIRST_IN_PAST = -31 # Validation: First timestamp in past -int32 VIOLATES_MINIMUM_SETPOINTS = -32 # Validation: Not enough setpoints -int32 VIOLATES_HARD_LIMIT_VEL = -33 # Validation: Velocity too high -int32 VIOLATES_HARD_LIMIT_ACCEL = -34 # Validation: Acceleration too high -int32 VIOLATES_HARD_LIMIT_OMEGA = -35 # Validation: Omega too high -int32 VIOLATES_HARD_LIMIT_ALPHA = -36 # Validation: Alpha too high -int32 CANCELLED = -37 # ALL: Motion cancelled by callee -int32 INVALID_REFERENCE_FRAME = -38 # ALL: Unknown reference frame +int32 ALREADY_THERE = 2 # MOVE: We are already at the location +int32 SUCCESS = 1 # ALL: Motion succeeded +int32 PREEMPTED = 0 # ALL: Motion preempted by thirdparty +int32 PLAN_FAILED = -1 # MOVE/EXEC: Plan/bootstrap failed +int32 VALIDATE_FAILED = -2 # MOVE/EXEC: No comms with mapper +int32 PMC_FAILED = -3 # MOVE/EXEC: PMC failed +int32 CONTROL_FAILED = -4 # ALL: Control failed +int32 OBSTACLE_DETECTED = -5 # ALL: Obstacle / replan disabled +int32 REPLAN_NOT_ENOUGH_TIME = -6 # MOVE/EXEC: Not enough time to replan +int32 REPLAN_FAILED = -7 # MOVE/EXEC: Replanning failed +int32 REVALIDATE_FAILED = -8 # MOVE/EXEC: Revalidating failed +int32 NOT_IN_WAITING_MODE = -9 # ALL: Internal failure +int32 INVALID_FLIGHT_MODE = -10 # ALL: No flight mode specified +int32 UNEXPECTED_EMPTY_SEGMENT = -11 # EXEC: Segment empty +int32 COULD_NOT_RESAMPLE = -12 # EXEC: Could not resample segment +int32 UNEXPECTED_EMPTY_STATES = -13 # MOVE: State vector empty +int32 INVALID_COMMAND = -14 # Command rejected +int32 CANNOT_QUERY_ROBOT_POSE = -15 # TF2 failed to find the current pose +int32 NOT_ON_FIRST_POSE = -16 # EXEC: Not on first pose of exec +int32 BAD_DESIRED_VELOCITY = -17 # Requested vel too high +int32 BAD_DESIRED_ACCELERATION = -18 # Requested accel too high +int32 BAD_DESIRED_OMEGA = -19 # Requested omega too high +int32 BAD_DESIRED_ALPHA = -20 # Requested alpha too high +int32 BAD_DESIRED_RATE = -21 # Requested rate too low +int32 TOLERANCE_VIOLATION_POSITION_ENDPOINT = -22 # Position tolerance violated +int32 TOLERANCE_VIOLATION_POSITION = -23 # Position tolerance violated +int32 TOLERANCE_VIOLATION_ATTITUDE = -24 # Attitude tolerance violated +int32 TOLERANCE_VIOLATION_VELOCITY = -25 # Velocity tolerance violated +int32 TOLERANCE_VIOLATION_OMEGA = -26 # Omega tolerance violated +int32 VIOLATES_RESAMPLING = -27 # Validation: could not resample@10Hz +int32 VIOLATES_KEEP_OUT = -28 # Validation: Keep out violation +int32 VIOLATES_KEEP_IN = -29 # Validation: Keep in violation +int32 VIOLATES_MINIMUM_FREQUENCY = -30 # Validation: Sample frequency too low +int32 VIOLATES_STATIONARY_ENDPOINT = -31 # Validation: Last setpoint not static +int32 VIOLATES_FIRST_IN_PAST = -32 # Validation: First timestamp in past +int32 VIOLATES_MINIMUM_SETPOINTS = -33 # Validation: Not enough setpoints +int32 VIOLATES_HARD_LIMIT_VEL = -34 # Validation: Velocity too high +int32 VIOLATES_HARD_LIMIT_ACCEL = -35 # Validation: Acceleration too high +int32 VIOLATES_HARD_LIMIT_OMEGA = -36 # Validation: Omega too high +int32 VIOLATES_HARD_LIMIT_ALPHA = -37 # Validation: Alpha too high +int32 CANCELLED = -38 # ALL: Motion cancelled by callee +int32 INVALID_REFERENCE_FRAME = -39 # ALL: Unknown reference frame # Human readable FSM result for debugging string fsm_result diff --git a/communications/ff_msgs/msg/FlightMode.msg b/communications/ff_msgs/msg/FlightMode.msg index 44dd6d466c..f7f1ba78b0 100644 --- a/communications/ff_msgs/msg/FlightMode.msg +++ b/communications/ff_msgs/msg/FlightMode.msg @@ -26,6 +26,7 @@ bool control_enabled # Is control enabled? float32 collision_radius # Collision radius in meters # Tolerances (all in SI units) +float32 tolerance_pos_endpoint # Endpoint position tolerance in m float32 tolerance_pos # Position tolerance in m float32 tolerance_vel # Velocity tolerance in m/s float32 tolerance_att # Attitude tolerance in rads diff --git a/communications/ff_msgs/msg/TimeDiffStamped.msg b/communications/ff_msgs/msg/TimeSyncStamped.msg similarity index 84% rename from communications/ff_msgs/msg/TimeDiffStamped.msg rename to communications/ff_msgs/msg/TimeSyncStamped.msg index c5cb2d845c..39a6a6592f 100644 --- a/communications/ff_msgs/msg/TimeDiffStamped.msg +++ b/communications/ff_msgs/msg/TimeSyncStamped.msg @@ -20,5 +20,11 @@ # Header with timestamp std_msgs/Header header -# Time diff in seconds between llp and mlp -float32 time_diff_sec +# Processor that the heartbeat came from +string remote_processor + +# Current time on the mlp +time mlp_time + +# Time in the incoming heartbeat +time remote_time diff --git a/communications/ff_msgs/srv/GetFloat.srv b/communications/ff_msgs/srv/GetFloat.srv new file mode 100644 index 0000000000..7da14c297f --- /dev/null +++ b/communications/ff_msgs/srv/GetFloat.srv @@ -0,0 +1,22 @@ +# Copyright (c) 2017, United States Government, as represented by the +# Administrator of the National Aeronautics and Space Administration. +# +# All rights reserved. +# +# The Astrobee platform is licensed under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with the +# License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# This message sends some float64 data, which can a server parameter. + +--- +float64 data # float data +bool success # whether succeeded diff --git a/communications/ff_msgs/srv/GetOccupancyMap.srv b/communications/ff_msgs/srv/GetOccupancyMap.srv new file mode 100644 index 0000000000..db5c23721b --- /dev/null +++ b/communications/ff_msgs/srv/GetOccupancyMap.srv @@ -0,0 +1,26 @@ +# Copyright (c) 2017, United States Government, as represented by the +# Administrator of the National Aeronautics and Space Administration. +# +# All rights reserved. +# +# The Astrobee platform is licensed under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with the +# License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# This message allows the MOBILITY::MAPPER zones to be retrieved + +--- +time timestamp # When these zones were updated + +int8[] map # Occupancy map +geometry_msgs/Vector3 origin # Map origin +geometry_msgs/Vector3 dim # Map dimentions +float32 resolution # Map resolution diff --git a/debian/changelog b/debian/changelog index 7f0ed059ea..4b4b4606fd 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,13 @@ +astrobee (0.15.2) testing; urgency=medium + + * added python linters black and isort in the CI pipeline + * added perching arm reconnect service + * added nav and dock cam bayer image output option + * added code to report time for llp, mlp, hlp + * added end of motion check + + -- Astrobee Flight Software Thu, 30 Sep 2021 15:18:04 -0700 + astrobee (0.15.1) testing; urgency=medium * Separated config files for DDS communications to avoid conflict diff --git a/description/description/urdf/model_eps.urdf.xacro b/description/description/urdf/model_eps.urdf.xacro index 37466a825b..8a7c17b7e1 100644 --- a/description/description/urdf/model_eps.urdf.xacro +++ b/description/description/urdf/model_eps.urdf.xacro @@ -21,9 +21,10 @@ > /${ns}/ - 10.0 - 0.05 - 5.0 + 10.0 + 0.04 + 0.05 + 5.0 true true false diff --git a/doc/diagrams/Makefile b/doc/diagrams/Makefile index 6287e75271..3aad479c5a 100644 --- a/doc/diagrams/Makefile +++ b/doc/diagrams/Makefile @@ -31,7 +31,7 @@ dot_src = $(wildcard *.dot) dot_svg = $(dot_src:%.dot=$(svg_dir)/%.svg) dot_png = $(dot_src:%.dot=$(png_dir)/%.png) -plantuml_ver = 1.2019.1 +plantuml_ver = 1.2019.5 plantuml_zip = plantuml-jar-asl-$(plantuml_ver).zip plantuml_url = https://sourceforge.net/projects/plantuml/files/$(plantuml_ver)/ diff --git a/doc/general_documentation/INSTALL.md b/doc/general_documentation/INSTALL.md index eeff28f354..72094b1858 100644 --- a/doc/general_documentation/INSTALL.md +++ b/doc/general_documentation/INSTALL.md @@ -10,9 +10,9 @@ on a host machine, and make sure that you can checkout and build code. *Note: You will need 4 GBs of RAM to compile the software. If you don't have that much RAM available, please use swap space.* -*Note: Preferably install Ubuntu 16.04. At this time we do not officially support -any other operating system or Ubuntu version. Experimental instructions steps for -Ubuntu 18 installation are included* +*Note: Preferably install Ubuntu 16.04. These instructions are also valid for +Ubuntu 18.04 and 20.04, however at this time we do not officially support +any other operating system or Ubuntu version.* *Note: Please ensure you install the 64-bit version of Ubuntu. We do not support running Astrobee Robot Software on 32-bit systems.* diff --git a/doc/scripts/action2dox.py b/doc/scripts/action2dox.py index 6a0565275f..5c4e22092a 100755 --- a/doc/scripts/action2dox.py +++ b/doc/scripts/action2dox.py @@ -1,15 +1,15 @@ #!/usr/bin/env python # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -19,27 +19,28 @@ import os import sys from os.path import basename + from common import * # What to search for -token = '/action/' +token = "/action/" # The full path to the file -path = sys.argv[1]; -idx_dot = path.rfind('.'); -idx_msg = path.find(token); +path = sys.argv[1] +idx_dot = path.rfind(".") +idx_msg = path.find(token) package = basename(path[:idx_msg]) -message = basename(path[idx_msg+len(token):idx_dot]); +message = basename(path[idx_msg + len(token) : idx_dot]) # Load the message template -template = load_file('./doc/scripts/templates/action.template') -header = load_file('./doc/scripts/templates/license.template') +template = load_file("./doc/scripts/templates/action.template") +header = load_file("./doc/scripts/templates/license.template") # Load the raw data describing the message chunks = split_into_chunks(path) if len(chunks) != 3: - sys.stderr.write("File '%s' does not have three chunks" % (filename)) - sys.exit(1) + sys.stderr.write("File '%s' does not have three chunks" % (filename)) + sys.exit(1) # Remove the license as well as any whitespace padding chunks[0] = chunks[0].replace(header, "").rstrip().strip() @@ -47,29 +48,31 @@ # Find the description desc = "" goal = "" -state = 0 # 0: padding, 1: header, 2:data +state = 0 # 0: padding, 1: header, 2:data for line in chunks[0].splitlines(): - rec = line.replace("#","").rstrip().strip() - if len(rec) < 2: - state += 1; - if len(rec) > 0: - if state == 1: - desc += line.replace("#","").rstrip().strip() + " " - else: - goal += line + "\n" + rec = line.replace("#", "").rstrip().strip() + if len(rec) < 2: + state += 1 + if len(rec) > 0: + if state == 1: + desc += line.replace("#", "").rstrip().strip() + " " + else: + goal += line + "\n" desc = desc.rstrip().strip() goal = goal.rstrip().strip() -#This is the data that will be injected into the template -data = {'file': path, - 'extension': 'action', - 'type': 'Action', - 'package': package, - 'message': message, - 'description': desc, - 'goal': extract_message(goal), - 'response': extract_message(chunks[1]), - 'feedback': extract_message(chunks[2]) } +# This is the data that will be injected into the template +data = { + "file": path, + "extension": "action", + "type": "Action", + "package": package, + "message": message, + "description": desc, + "goal": extract_message(goal), + "response": extract_message(chunks[1]), + "feedback": extract_message(chunks[2]), +} # print description -print template % data \ No newline at end of file +print((template % data)) diff --git a/doc/scripts/common.py b/doc/scripts/common.py index 2ac5cf2338..9f1eeb2aa2 100644 --- a/doc/scripts/common.py +++ b/doc/scripts/common.py @@ -1,15 +1,15 @@ #!/usr/bin/env python # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -18,77 +18,83 @@ import os import sys - from os.path import basename + def typerlink(typ): - tok = typ.replace("[","") - tok = tok.replace("]","") - tok = tok.split("/") - if len(tok) == 1: + tok = typ.replace("[", "") + tok = tok.replace("]", "") + tok = tok.split("/") + if len(tok) == 1: + return typ + package = tok[0].replace("_", "__") + message = tok[1].replace("_", "__") + if tok[0].strip() in ("ff_msgs", "ff_hw_msgs", "vpp_msgs"): + return '%s' % (package, message, typ) return typ - package = tok[0].replace("_","__") - message = tok[1].replace("_","__") - if tok[0].strip() in ("ff_msgs","ff_hw_msgs","vpp_msgs"): - return '%s' % (package,message,typ) - return typ + # Load and return a message template def split_into_chunks(filename): - chunks = [] - buff = "" - for line in open(filename): - if line.startswith('---'): - chunks.append(buff) - buff = "" - else: - buff += line - chunks.append(buff) - return chunks + chunks = [] + buff = "" + for line in open(filename): + if line.startswith("---"): + chunks.append(buff) + buff = "" + else: + buff += line + chunks.append(buff) + return chunks + def extract_message(str): - variables = [] - constants = [] - desc = "" - for line in str.splitlines(): - rec = line.strip() - if len(rec) == 0: continue - if rec[0] == "#": - desc += line.replace("#","").strip() + " " - else: - idx = rec.find("#") - if idx >= 0: - desc = rec[idx+1:].strip() - rec = rec[:idx] - lst = rec.split("=") - tok = (" ".join(lst[0].split())).split() - dtype = tok[0].strip() - dname = tok[1].strip() - if len(lst) > 1: - dname += " = " + lst[1].strip(); - constants.append("| %s | **%s** | %s |" % (typerlink(dtype),dname,desc)) - desc = "" - else: - variables.append("| %s | `%s` | %s |" % (typerlink(dtype),dname,desc)) - desc = "" + variables = [] + constants = [] + desc = "" + for line in str.splitlines(): + rec = line.strip() + if len(rec) == 0: + continue + if rec[0] == "#": + desc += line.replace("#", "").strip() + " " + else: + idx = rec.find("#") + if idx >= 0: + desc = rec[idx + 1 :].strip() + rec = rec[:idx] + lst = rec.split("=") + tok = (" ".join(lst[0].split())).split() + dtype = tok[0].strip() + dname = tok[1].strip() + if len(lst) > 1: + dname += " = " + lst[1].strip() + constants.append( + "| %s | **%s** | %s |" % (typerlink(dtype), dname, desc) + ) + desc = "" + else: + variables.append("| %s | `%s` | %s |" % (typerlink(dtype), dname, desc)) + desc = "" + + buff = "" + for line in variables: + buff += line + "\n" + for line in constants: + buff += line + "\n" + if len(buff) == 0: + buff = "| | | |" + return buff - buff = "" - for line in variables: - buff += line +"\n" - for line in constants: - buff += line +"\n" - if len(buff) == 0: - buff = "| | | |" - return buff # Load and return a message template def load_file(filename): - if not os.path.isfile(filename): - sys.stderr.write("Cannot locate file '%s'\n" % (filename)) - sys.exit(1) - with open(filename, 'r') as f: - content = f.read() - if not content: - sys.stderr.write("File '%s' is empty\n" % (filename)) - sys.exit(1) - return content + if not os.path.isfile(filename): + sys.stderr.write("Cannot locate file '%s'\n" % (filename)) + sys.exit(1) + with open(filename, "r") as f: + content = f.read() + if not content: + sys.stderr.write("File '%s' is empty\n" % (filename)) + sys.exit(1) + return content diff --git a/doc/scripts/csv2dox.py b/doc/scripts/csv2dox.py index 7fdc103d6c..208bedc721 100755 --- a/doc/scripts/csv2dox.py +++ b/doc/scripts/csv2dox.py @@ -1,47 +1,56 @@ #!/usr/bin/env python # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +import csv import os import sys -import csv + from common import * + def hyperlink(url, name): - return '%s' % (url,name) + return '%s' % (url, name) + # Grab the CSV content content = "" header = True -with open(sys.argv[1], 'rb') as csvfile: - csvreader = csv.reader(csvfile, delimiter=',', quotechar='"') - for row in csvreader: - if header: - header = False - else: - content += '| ' + hyperlink(row[2], row[1]) + ' | ' + row[3] + ' | ' + row[4] + ' |\n' +with open(sys.argv[1], "rb") as csvfile: + csvreader = csv.reader(csvfile, delimiter=",", quotechar='"') + for row in csvreader: + if header: + header = False + else: + content += ( + "| " + + hyperlink(row[2], row[1]) + + " | " + + row[3] + + " | " + + row[4] + + " |\n" + ) # This is the data that will be injected into the template -data = { - 'content': content -} +data = {"content": content} # Load the message template -template = load_file('./doc/scripts/templates/csv.template') +template = load_file("./doc/scripts/templates/csv.template") # Populate the template -print template % data \ No newline at end of file +print((template % data)) diff --git a/doc/scripts/deliv2csv.py b/doc/scripts/deliv2csv.py index c621f7895b..f458349f41 100644 --- a/doc/scripts/deliv2csv.py +++ b/doc/scripts/deliv2csv.py @@ -1,11 +1,12 @@ -import ruamel.yaml as yaml from sys import stdout -stream = file('deliverables.yaml', 'r') +import ruamel.yaml as yaml + +stream = file("deliverables.yaml", "r") deliverables = yaml.round_trip_load(stream) -ref_keys = deliverables[0].values()[0].keys() +ref_keys = list(list(deliverables[0].values())[0].keys()) stdout.write("devliverable") for k in ref_keys: @@ -13,13 +14,13 @@ stdout.write("\n") for d in deliverables: - stdout.write(d.keys()[0]) + stdout.write(list(d.keys())[0]) for k in ref_keys: s = "" try: - v = d.values()[0][k] - s = str(d.values()[0][k]) + v = list(d.values())[0][k] + s = str(list(d.values())[0][k]) except: pass - stdout.write("\t"+s) + stdout.write("\t" + s) stdout.write("\n") diff --git a/doc/scripts/msg2dox.py b/doc/scripts/msg2dox.py index 75fdc9f4ac..cb1cc9783a 100755 --- a/doc/scripts/msg2dox.py +++ b/doc/scripts/msg2dox.py @@ -1,15 +1,15 @@ #!/usr/bin/env python # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -19,27 +19,28 @@ import os import sys from os.path import basename + from common import * # What to search for -token = '/msg/' +token = "/msg/" # The full path to the file -path = sys.argv[1]; -idx_dot = path.rfind('.'); -idx_msg = path.find(token); +path = sys.argv[1] +idx_dot = path.rfind(".") +idx_msg = path.find(token) package = basename(path[:idx_msg]) -message = basename(path[idx_msg+len(token):idx_dot]); +message = basename(path[idx_msg + len(token) : idx_dot]) # Load the message template -template = load_file('./doc/scripts/templates/msg.template') -header = load_file('./doc/scripts/templates/license.template') +template = load_file("./doc/scripts/templates/msg.template") +header = load_file("./doc/scripts/templates/license.template") # Load the raw data describing the message chunks = split_into_chunks(path) if len(chunks) != 1: - sys.stderr.write("File '%s' does not have one chunk" % (filename)) - sys.exit(1) + sys.stderr.write("File '%s' does not have one chunk" % (filename)) + sys.exit(1) # Remove the license as well as any whitespace padding chunks[0] = chunks[0].replace(header, "").rstrip().strip() @@ -47,26 +48,28 @@ # Find the description desc = "" data = "" -state = 0 # 0: padding, 1: header, 2:data +state = 0 # 0: padding, 1: header, 2:data for line in chunks[0].splitlines(): - rec = line.replace("#","").rstrip().strip() - if len(rec) < 2: - state += 1; - if len(rec) > 0: - if state == 1: - desc += line.replace("#","").rstrip().strip() + " " - else: - data += line + "\n" + rec = line.replace("#", "").rstrip().strip() + if len(rec) < 2: + state += 1 + if len(rec) > 0: + if state == 1: + desc += line.replace("#", "").rstrip().strip() + " " + else: + data += line + "\n" desc = desc.rstrip().strip() data = data.rstrip().strip() # This is the data that will be injected into the template -data = {'file': path, - 'extension': 'msg', - 'type': 'Message', - 'package': package, - 'message': message, - 'description': desc, - 'content': extract_message(data) } +data = { + "file": path, + "extension": "msg", + "type": "Message", + "package": package, + "message": message, + "description": desc, + "content": extract_message(data), +} -print template % data \ No newline at end of file +print((template % data)) diff --git a/doc/scripts/puml2dox.py b/doc/scripts/puml2dox.py index fea1aa27df..62f302fc8e 100755 --- a/doc/scripts/puml2dox.py +++ b/doc/scripts/puml2dox.py @@ -21,42 +21,43 @@ import os import re import sys + from common import * -doc_dir = os.path.normpath(os.path.join( - os.path.abspath(os.path.dirname(sys.argv[0])), '../' - )) +doc_dir = os.path.normpath( + os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), "../") +) puml_path = os.path.relpath(sys.argv[1], os.getcwd()) name_no_ext = os.path.splitext(os.path.basename(puml_path))[0] -plantuml_jar = doc_dir+'/diagrams/plantuml/plantuml.jar' +plantuml_jar = doc_dir + "/diagrams/plantuml/plantuml.jar" -template = load_file(doc_dir+'/scripts/templates/diagram.template') +template = load_file(doc_dir + "/scripts/templates/diagram.template") # # Extract diagram width # -max_width=900 -svg = open(doc_dir+'/diagrams/gen/svg/'+name_no_ext+'.svg', "r") -first=svg.readline() -pattern=re.compile('viewBox="0 0 ([0-9]+) ([0-9]+)"') -result=re.search(pattern, first) +max_width = 900 +svg = open(doc_dir + "/diagrams/gen/svg/" + name_no_ext + ".svg", "r") +first = svg.readline() +pattern = re.compile('viewBox="0 0 ([0-9]+) ([0-9]+)"') +result = re.search(pattern, first) if not result: - print "width not found!" + print("width not found!") width = max_width else: width = int(result.group(1)) if width > max_width: - scaled_width=max_width + scaled_width = max_width else: - scaled_width=width + scaled_width = width # # Generate the PNG image from the PUML diagram # -output_dir = os.path.join(doc_dir+'/html') +output_dir = os.path.join(doc_dir + "/html") # plantuml '-output' option is always relative to the source file, # so we need to compute the generation directory relative to it! relative_gen_dir = os.path.relpath(output_dir, os.path.dirname(puml_path)) @@ -70,15 +71,15 @@ # Generate a page that includes the diagram image # # html_relative_path = os.path.join('./', name_no_ext+'.png') -html_relative_path = os.path.join('../diagrams/gen/svg', name_no_ext+'.svg') -package = puml_path.replace(os.getcwd(), '').split(os.sep)[0] +html_relative_path = os.path.join("../diagrams/gen/svg", name_no_ext + ".svg") +package = puml_path.replace(os.getcwd(), "").split(os.sep)[0] data = { - 'package' : package, - 'diagram_name' : name_no_ext, - 'diagram_path' : html_relative_path, - 'width' : scaled_width + "package": package, + "diagram_name": name_no_ext, + "diagram_path": html_relative_path, + "width": scaled_width, } -print(template % data) +print((template % data)) diff --git a/doc/scripts/simplify_cmake_depsgraph.py b/doc/scripts/simplify_cmake_depsgraph.py index 86cc6d99fc..ae3b6fd7e4 100755 --- a/doc/scripts/simplify_cmake_depsgraph.py +++ b/doc/scripts/simplify_cmake_depsgraph.py @@ -4,15 +4,15 @@ # Simplify a cmake generated dependency graph. # # Cmake has the capability to generate executable/librairies dependency graph. -# However, the generated graphs are unreadable because every library file is +# However, the generated graphs are unreadable because every library file is # represented as a unique node. This script groups the libraries files by # package name. # The 'groups' variable list the patterns used to re-group the various nodes # from a same package into a single node. The groups list is specifically -# crafted for ARS: it mostly address the ROS and Gazebo libs, plus some +# crafted for ARS: it mostly address the ROS and Gazebo libs, plus some # key other dependencies. # -# Usage: +# Usage: # 1. Generate the dependency graphs with something like # cd $BUILD_PATH # cmake --graphviz=deps/ars . @@ -24,10 +24,10 @@ # -import sys import re +import sys -groups = [ +groups = [ ("nodeROS", "ROS Libraries", "/opt/ros/.+"), ("nodeGazebo", "gazebo", "/usr/lib/.+/libgazebo[_a-z0-9]*.so"), ("nodeBoost", "boost", "/usr/lib/.+/libboost.+\.so"), @@ -37,45 +37,47 @@ ("nodeGflags", "gflags", "/usr/lib/.+/libgflag.*\.so"), ("nodeGlog", "glog", "/usr/lib/.+/libglog.*\.so"), ("nodeLinux", "Linux System Libraries", "/usr/lib/.*.so") -# Not sure if libPocoFoundation should be listed individually -# ("nodeLinux", "Linux System Libraries", "/usr/lib/libPoco.*.so"), -# ("nodeLinux", "Linux System Libraries", "/usr/lib/.+linux-gnu/.+\.so") - ] + # Not sure if libPocoFoundation should be listed individually + # ("nodeLinux", "Linux System Libraries", "/usr/lib/libPoco.*.so"), + # ("nodeLinux", "Linux System Libraries", "/usr/lib/.+linux-gnu/.+\.so") +] nodes = list() + def process_dot(file): global nodes lines = file.readlines() - + # Identify groups of libraries # outer loop is groups: this way the order of the group list is respected # and it allows to glob larger pattern after more specific patterns # have already been processed for g in groups: for i, l in enumerate(lines): - pattern=re.compile('\s"(node[0-9]+)"\s\[\slabel="('+g[2]+')"\s.+') - result=re.search(pattern, l) + pattern = re.compile('\s"(node[0-9]+)"\s\[\slabel="(' + g[2] + ')"\s.+') + result = re.search(pattern, l) if result: lines.pop(i) lines.insert(i, l.replace(result.group(2), g[1])) - nodes.append( (result.group(1), g[0]) ) + nodes.append((result.group(1), g[0])) # Replace nodes with common group node name for n in nodes: - lines = [ l.replace(n[0], n[1]) for l in lines ] - + lines = [l.replace(n[0], n[1]) for l in lines] + # Add strict to avoid multiple edges - lines[0] = "strict "+lines[0] - + lines[0] = "strict " + lines[0] + # Output the new file for l in lines: print(l, end=" ") - + + if len(sys.argv) < 2: print("provide input file as first arg") exit f = open(sys.argv[1], "r") -process_dot(f) -#print(nodes) +process_dot(f) +# print(nodes) diff --git a/doc/scripts/srv2dox.py b/doc/scripts/srv2dox.py index fff0ed5848..bac14b3b68 100755 --- a/doc/scripts/srv2dox.py +++ b/doc/scripts/srv2dox.py @@ -1,15 +1,15 @@ #!/usr/bin/env python # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -19,27 +19,28 @@ import os import sys from os.path import basename + from common import * # What to search for -token = '/srv/' +token = "/srv/" # The full path to the file -path = sys.argv[1]; -idx_dot = path.rfind('.'); -idx_msg = path.find(token); +path = sys.argv[1] +idx_dot = path.rfind(".") +idx_msg = path.find(token) package = basename(path[:idx_msg]) -message = basename(path[idx_msg+len(token):idx_dot]); +message = basename(path[idx_msg + len(token) : idx_dot]) # Load the message template -template = load_file('./doc/scripts/templates/srv.template') -header = load_file('./doc/scripts/templates/license.template') +template = load_file("./doc/scripts/templates/srv.template") +header = load_file("./doc/scripts/templates/license.template") # Load the raw data describing the message chunks = split_into_chunks(path) if len(chunks) != 2: - sys.stderr.write("File '%s' does not have two chunks" % (filename)) - sys.exit(1) + sys.stderr.write("File '%s' does not have two chunks" % (filename)) + sys.exit(1) # Remove the license as well as any whitespace padding chunks[0] = chunks[0].replace(header, "").rstrip().strip() @@ -47,28 +48,30 @@ # Find the description desc = "" data = "" -state = 0 # 0: padding, 1: header, 2:data +state = 0 # 0: padding, 1: header, 2:data for line in chunks[0].splitlines(): - rec = line.replace("#","").rstrip().strip() - if len(rec) < 2: - state += 1; - if len(rec) > 0: - if state == 1: - desc += line.replace("#","").rstrip().strip() + " " - else: - data += line + "\n" + rec = line.replace("#", "").rstrip().strip() + if len(rec) < 2: + state += 1 + if len(rec) > 0: + if state == 1: + desc += line.replace("#", "").rstrip().strip() + " " + else: + data += line + "\n" desc = desc.rstrip().strip() data = data.rstrip().strip() # This is the data that will be injected into the template -data = {'file': path, - 'extension': 'srv', - 'type': 'Service', - 'package': package, - 'message': message, - 'description': desc, - 'request': extract_message(data), - 'response': extract_message(chunks[1]) } +data = { + "file": path, + "extension": "srv", + "type": "Service", + "package": package, + "message": message, + "description": desc, + "request": extract_message(data), + "response": extract_message(chunks[1]), +} -#print description -print template % data \ No newline at end of file +# print description +print((template % data)) diff --git a/gnc/ctl/statemachine.dot b/gnc/ctl/ctl_fsm.dot similarity index 83% rename from gnc/ctl/statemachine.dot rename to gnc/ctl/ctl_fsm.dot index 830868a87e..a5c5f9b68f 100644 --- a/gnc/ctl/statemachine.dot +++ b/gnc/ctl/ctl_fsm.dot @@ -1,4 +1,4 @@ -# dot -Tpdf statemachine.dot -o statemachine.pdf +# dot -Tpdf ctl_fsm.dot -o ctl_fsm.pdf digraph G { graph [label="CTL\n", labelloc=t, fontsize=50]; size="11.7,8.3!"; @@ -15,8 +15,8 @@ digraph G { # Actions WAITING -> NOMINAL [label="[0]\nGOAL_NOMINAL\nControl(NOMINAL, Segment)", color=black]; - NOMINAL -> WAITING - [label="[1]\nGOAL_COMPLETE\nControl(STOP)\nResult(SUCCESS)", color=black]; + NOMINAL -> STOPPING + [label="[1]\nGOAL_COMPLETE\nControl(STOP)", color=black]; NOMINAL -> WAITING [label="[2]\nGOAL_CANCEL\nControl(STOP)\nResult(CANCELLED)", color=black]; WAITING -> STOPPING diff --git a/gnc/ctl/readme.md b/gnc/ctl/readme.md index 333f50016a..27d0308bb0 100644 --- a/gnc/ctl/readme.md +++ b/gnc/ctl/readme.md @@ -1,7 +1,9 @@ -\page ctl GNC Control Wrapper +\page ctl Control (CTL) -The control subsystem takes the pose from the EKF, as well as control commands, to determine -the forces and torques that should be applied to the robot for smooth control. +The control subsystem takes the pose from the EKF, as well as control commands, to +determine the forces and torques that should be applied to the robot for smooth control. +If executes the given segment based on a trajectory given by the choreographer, the faul +checking on the execution is done in the choreographer based on the feedback. # Inputs @@ -16,3 +18,27 @@ the forces and torques that should be applied to the robot for smooth control. * `gnc/ctl/segment`: The current segment the control subsystem is traversing. * `gnc/ctl/progress`: The progress in executing the current segment. +# GNC Control Wrapper behavior + +\dotfile ctl_fsm "GNC Control Wrapper finite state machine" width=10cm + +* WAITING: No setpoints sent to the internal Matlab controller. +* NOMINAL: Waits until the start time for deferred executions, commands the setpoints to the internal Matlab in nominal mode until the desired trajectory is finished. +* STOPPING: Commands internal Matlab into stopping mode. When the velocity and angular velocity fall under a certain stop threshould, it succeeds. + + +# Autocode Matlab Internal behavior + +The internal Matlab behavior has 4 modes: +* Idle: No control is present. +* Stopping: Position error is calculated based on the delayed current position and the current position. Velocity and acceleration commands are zero. +* Nominal: It follows the trajectory given for position, velocity and acceleraion. +* Stopped: Position/Attitude error is zero. Control Velocity and acceleration goals are zero. + + +# Enable/Disable Control + +In the case a new controller is to be tested, it is possible to enable/disble the current +controller using the service `gnc/ctl/enable`. `true` will enable the control and `false` +will disable it. If the requested state is the one already active, then the service does +not return success. \ No newline at end of file diff --git a/gnc/readme.md b/gnc/readme.md index 9fb3258687..4349e90c35 100644 --- a/gnc/readme.md +++ b/gnc/readme.md @@ -3,26 +3,25 @@ The Guidance, Navigation & Control subsystem is responsible for tracking the robot's pose and controlling the robot's motion. -# GNC Subsystems +## GNC Subsystems GNC consists of four main subsystems, that run in a chain: -\subpage ekf -1. `EKF`: The extended Kalman filter integrates IMU measurements, as well as +1. \subpage ekf : The extended Kalman filter integrates IMU measurements, as well as visual measurements from the localization subsystem, to track the robot's pose. -\subpage ctl -2. `CTL`: The control subsystem takes the robot's current state from the EKF, + +2. \subpage ctl : The control subsystem takes the robot's current state from the EKF, and determines the forces and torques needed to execute the commands sent from the mobility subsystem. -\subpage fam -3. `FAM`: The force allocation module takes the force and torque commands + +3. \subpage fam : The force allocation module takes the force and torque commands from the control subsystem, and determines the PMC speeds and nozzle positions to best execute these forces and torques. -\subpage -4. `SIM`: The simulator simulates the inputs to the EKF, and simulates the + +4. \subpage simwrapper : The simulator simulates the inputs to the EKF, and simulates the robot's motion based on the outputs of the FAM. -# GNC Code Organization +## GNC Code Organization Unlike the rest of flight software, the GNC systems are written mainly in Simulink. Simulink drag and drop box diagrams are then converted to @@ -32,11 +31,10 @@ automatically generated C code. The structure of the GNC code is: [Sim Wrapper](@ref simwrapper) is the model side of the GN&C ROS Wrapper. These folders contain ROS nodelets that mainly convert inputs and outputs between ROS messages and Simulink. -\subpage gncautocode -2. [GNC Autocode](@ref gncautocode): This folder contains a thin C++ wrapper + +2. \subpage gncautocode : This folder contains a thin C++ wrapper around the auto-generated C functions, which are all compiled in this package into a library. -\subpage matlab -\subpage simwrapper -3. [Matlab](@ref matlab): This folder contains the Matlab / Simulink code. This + +3. \subpage matlab : This folder contains the Matlab / Simulink code. This is where all the core functionality is located. diff --git a/gnc/sim_wrapper/readme.md b/gnc/sim_wrapper/readme.md index e6fd22d582..ee37b40240 100644 --- a/gnc/sim_wrapper/readme.md +++ b/gnc/sim_wrapper/readme.md @@ -7,20 +7,19 @@ and the robot's motion. # Required Inputs -* `/ctl/fam` -* `/pmc_actuator/command` +* `hw/pmc/command` # Simulated Outputs -* `/imu/data` -* `/ctl/vpp_state` -* `/pmc_actuator/telemetry` +* `hw/imu` +* `hw/pmc/telemetry` * `/clock` -* `/localization/mapped_landmarks/registration` -* `/localization/mapped_landmarks/features` -* `/localization/optical_flow/features` -* `/localization/optical_flow/registration` -* `/localization/handrail/features` -* `/localization/handrail/registration` -* `/ground_truth` +* `hw/cam_nav` +* `loc/ml/registration` +* `loc/ml/features` +* `loc/of/features` +* `loc/of/registration` +* `loc/hr/features` +* `loc/hr/registration` +* `loc/truth/pose` diff --git a/hardware/eps_driver/tools/eps_aux.py b/hardware/eps_driver/tools/eps_aux.py index bd1552373b..eeee8d9913 100755 --- a/hardware/eps_driver/tools/eps_aux.py +++ b/hardware/eps_driver/tools/eps_aux.py @@ -1,22 +1,24 @@ #!/usr/bin/python import rospy - from ff_hw_msgs.msg import EpsHousekeeping + def callback(data): - aux = float('nan') - system = float('nan') + aux = float("nan") + system = float("nan") for c in data.values: - if c.name.find("AUX_PWR_I") >= 0 : + if c.name.find("AUX_PWR_I") >= 0: aux = c.value - if c.name.find("SYSTEM_I") >= 0 : + if c.name.find("SYSTEM_I") >= 0: system = c.value - print("system= %.3fA | aux= %.3fA " % (system, aux)) - + print(("system= %.3fA | aux= %.3fA " % (system, aux))) + + def listener(): - rospy.init_node('pmc_consumption', anonymous=True) + rospy.init_node("pmc_consumption", anonymous=True) rospy.Subscriber("/hw/eps/housekeeping", EpsHousekeeping, callback) rospy.spin() - -if __name__ == '__main__': + + +if __name__ == "__main__": listener() diff --git a/hardware/is_camera/include/is_camera/camera.h b/hardware/is_camera/include/is_camera/camera.h index e61331d10e..362e051ec6 100644 --- a/hardware/is_camera/include/is_camera/camera.h +++ b/hardware/is_camera/include/is_camera/camera.h @@ -46,18 +46,22 @@ struct V4LStruct; // Nodelet class class CameraNodelet : public ff_util::FreeFlyerNodelet { public: - static constexpr size_t kImageMsgBuffer = 15; // 17.6 Mb of buffer space. This - // number is so large because - // this sets the life time for - // each image message sent - // out. Eventually we'll write - // over the pointer we've - // handed out. 15 frames means - // .. an image will stay valid - // for 1.0 seconds. - // Localization can process at - // 2 Hz, so it only needs an - // image for 0.5 seconds. + // The size of the image message ring buffer for publishing + // grayscale images. 15 images = 17.6 MB of buffer space. This + // number is so large because it sets the lifetime for each image + // message sent out. Eventually we'll write over the pointer we've + // handed out. At 15 Hz, 15 frames means an image will stay valid + // for 1.0 seconds. Localization can process at 2 Hz, so it only + // needs an image for 0.5 seconds. + static constexpr size_t kImageMsgBuffer = 15; + + // The size of the image message ring buffer for publishing raw + // Bayer format images. The same comments apply about message + // lifetime, but in this case we're expecting the subscriber + // callback to at most de-Bayer the image before passing it on, so + // the buffer doesn't need to be so long. + static constexpr size_t kBayerImageMsgBufferLength = 5; + static constexpr size_t kImageWidth = 1280; static constexpr size_t kImageHeight = 960; @@ -71,22 +75,36 @@ class CameraNodelet : public ff_util::FreeFlyerNodelet { private: void PublishLoop(); bool EnableService(ff_msgs::SetBool::Request& req, ff_msgs::SetBool::Response& res); // NOLINT + size_t getNumBayerSubscribers(); sensor_msgs::ImagePtr img_msg_buffer_[kImageMsgBuffer]; + sensor_msgs::ImagePtr bayer_img_msg_buffer_[kBayerImageMsgBufferLength]; size_t img_msg_buffer_idx_; + size_t bayer_img_msg_buffer_idx_; std::thread thread_; std::atomic thread_running_; ros::Publisher pub_; + ros::Publisher bayer_pub_; std::shared_ptr v4l_; config_reader::ConfigReader config_; ros::Timer config_timer_; - std::string output_topic_; std::string camera_device_; std::string camera_topic_; + std::string bayer_camera_topic_; std::string config_name_; int camera_gain_, camera_exposure_; bool calibration_mode_; + + // bayer_enable: Set to true to enable publishing raw Bayer image + // (can be converted to RGB). May incur significant I/O overhead. + bool bayer_enable_; + + // bayer_throttle_ratio: Set to n to publish 1 raw Bayer image for + // every n images grabbed. With n = 1, every image is + // published. Larger n reduces I/O overhead. + unsigned int bayer_throttle_ratio_; + size_t bayer_throttle_ratio_counter_; }; } // end namespace is_camera diff --git a/hardware/is_camera/src/camera.cc b/hardware/is_camera/src/camera.cc index 1882467938..3930972312 100644 --- a/hardware/is_camera/src/camera.cc +++ b/hardware/is_camera/src/camera.cc @@ -162,7 +162,12 @@ namespace is_camera { }; CameraNodelet::CameraNodelet() : ff_util::FreeFlyerNodelet(), - img_msg_buffer_idx_(0), thread_running_(false), camera_topic_(""), calibration_mode_(false) + img_msg_buffer_idx_(0), + bayer_img_msg_buffer_idx_(0), + thread_running_(false), + camera_topic_(""), + calibration_mode_(false), + bayer_throttle_ratio_counter_(0) {} CameraNodelet::~CameraNodelet() { @@ -196,6 +201,10 @@ namespace is_camera { config_.CheckFilesUpdated(std::bind(&CameraNodelet::ReadParams, this));}, false, true); pub_ = nh->advertise(camera_topic_, 1); + if (bayer_enable_) { + bayer_camera_topic_ = camera_topic_ + "_bayer"; + bayer_pub_ = nh->advertise(bayer_camera_topic_, 1); + } // Allocate space for our output msg buffer for (size_t i = 0; i < kImageMsgBuffer; i++) { @@ -207,11 +216,37 @@ namespace is_camera { img_msg_buffer_[i]->data.resize(kImageWidth * kImageHeight); } + if (bayer_enable_) { + // Allocate space for our Bayer output msg buffer + for (size_t i = 0; i < kBayerImageMsgBufferLength; i++) { + bayer_img_msg_buffer_[i].reset(new sensor_msgs::Image()); + bayer_img_msg_buffer_[i]->width = kImageWidth; + bayer_img_msg_buffer_[i]->height = kImageHeight; + + // The OpenCV specification of the IS camera Bayer encoding + // appears to be "BayerGR" if our previous cvtColor() call + // is correct. That corresponds to a ROS specification of + // enc::BAYER_GBRG8 [1]. + // [1] https://github.com/ros-perception/image_pipeline/blob/71d537a50bf4e7769af513f4a3d3df0d188cfa05/image_proc/src/nodelets/debayer.cpp#L223 + bayer_img_msg_buffer_[i]->encoding = "bayer_gbrg8"; + bayer_img_msg_buffer_[i]->step = kImageWidth; + bayer_img_msg_buffer_[i]->data.resize(kImageWidth * kImageHeight); + } + } + v4l_.reset(new V4LStruct(camera_device_, camera_gain_, camera_exposure_)); thread_running_ = true; thread_ = std::thread(&CameraNodelet::PublishLoop, this); } + size_t CameraNodelet::getNumBayerSubscribers(void) { + if (bayer_enable_) { + return bayer_pub_.getNumSubscribers(); + } else { + return 0; + } + } + void CameraNodelet::ReadParams(void) { if (!config_.ReadFiles()) { ROS_ERROR("Failed to read config files."); @@ -251,6 +286,18 @@ namespace is_camera { } } + if (!camera.GetBool("bayer_enable", &bayer_enable_)) { + FF_FATAL("Bayer enable not specified."); + exit(EXIT_FAILURE); + } + + if (bayer_enable_) { + if (!camera.GetUInt("bayer_throttle_ratio", &bayer_throttle_ratio_)) { + FF_FATAL("Bayer throttle ratio not specified."); + exit(EXIT_FAILURE); + } + } + if (thread_running_) { v4l_->SetParameters(camera_gain_, camera_exposure_); } @@ -261,7 +308,7 @@ namespace is_camera { while (thread_running_) { if (!camera_running) { - while ((pub_.getNumSubscribers() == 0) && thread_running_) + while ((pub_.getNumSubscribers() + getNumBayerSubscribers() == 0) && thread_running_) usleep(100000); if (!thread_running_) break; @@ -299,34 +346,63 @@ namespace is_camera { int last_buf = v4l_->buf.index; v4l_->buf.index = (last_buf + 1) % v4l_->req.count; - if (pub_.getNumSubscribers() != 0) + if (pub_.getNumSubscribers() + getNumBayerSubscribers() > 0) xioctl(v4l_->fd, VIDIOC_QBUF, &v4l_->buf); else camera_running = false; ros::Time timestamp = ros::Time::now(); - // Select our output msg buffer - img_msg_buffer_idx_ = (img_msg_buffer_idx_ + 1) % kImageMsgBuffer; - if (!failed) { - // Wrap the buffer with cv::Mat so we can manipulate it. - cv::Mat wrapped(v4l_->fmt.fmt.pix.height, - v4l_->fmt.fmt.pix.width, - cv::DataType::type, - v4l_->buffers[last_buf].start, - v4l_->fmt.fmt.pix.width); // does not copy - cv::Mat owrapped(kImageHeight, kImageWidth, - cv::DataType::type, - &(img_msg_buffer_[img_msg_buffer_idx_]->data[0]), - kImageWidth); - cv::cvtColor(wrapped, owrapped, - CV_BayerGR2GRAY); - - // Attach the time - img_msg_buffer_[img_msg_buffer_idx_]->header = std_msgs::Header(); - img_msg_buffer_[img_msg_buffer_idx_]->header.stamp = timestamp; - - pub_.publish(img_msg_buffer_[img_msg_buffer_idx_]); + if (pub_.getNumSubscribers() > 0) { + sensor_msgs::ImagePtr& out_image = img_msg_buffer_[img_msg_buffer_idx_]; + + // Use OpenCV to convert raw Bayer format to grayscale (writing the result + // into the pre-allocated slot in the image message ring buffer). + cv::Mat wrapped(v4l_->fmt.fmt.pix.height, + v4l_->fmt.fmt.pix.width, + cv::DataType::type, + v4l_->buffers[last_buf].start, + v4l_->fmt.fmt.pix.width); // does not copy + cv::Mat owrapped(kImageHeight, kImageWidth, + cv::DataType::type, + &(out_image->data[0]), + kImageWidth); + cv::cvtColor(wrapped, owrapped, + CV_BayerGR2GRAY); + + // Attach the time + out_image->header = std_msgs::Header(); + out_image->header.stamp = timestamp; + + pub_.publish(out_image); + + // Increment index in ring buffer + img_msg_buffer_idx_ = (img_msg_buffer_idx_ + 1) % kImageMsgBuffer; + } + + if (getNumBayerSubscribers() > 0) { + bayer_throttle_ratio_counter_ = (bayer_throttle_ratio_counter_ + 1) + % bayer_throttle_ratio_; + + if (bayer_throttle_ratio_counter_ == 0) { + sensor_msgs::ImagePtr& out_image = bayer_img_msg_buffer_[bayer_img_msg_buffer_idx_]; + + // Copy the raw Bayer format data into the pre-allocated + // slot in the image message ring buffer. + memcpy(&(out_image->data[0]), + v4l_->buffers[last_buf].start, + kImageWidth * kImageHeight); + + // Attach the time + out_image->header = std_msgs::Header(); + out_image->header.stamp = timestamp; + + bayer_pub_.publish(out_image); + + // Increment index in ring buffer + bayer_img_msg_buffer_idx_ = (bayer_img_msg_buffer_idx_ + 1) % kBayerImageMsgBufferLength; + } + } } ros::spinOnce(); diff --git a/hardware/perching_arm/readme.md b/hardware/perching_arm/readme.md index 5de2733c33..62bddc5d18 100644 --- a/hardware/perching_arm/readme.md +++ b/hardware/perching_arm/readme.md @@ -1,3 +1,8 @@ \page perching_arm Perching arm -To be written... \ No newline at end of file +## Re-initialize service + +In case we forget to power on the arm bay before starting fsw, it is +possible to re-initialize the perching arm, trying to re-connect with: + + rosservice call /hw/arm/enable_arm "enabled: true" \ No newline at end of file diff --git a/hardware/perching_arm/src/perching_arm_node.cc b/hardware/perching_arm/src/perching_arm_node.cc index 97f3a3dedd..ddc4140408 100644 --- a/hardware/perching_arm/src/perching_arm_node.cc +++ b/hardware/perching_arm/src/perching_arm_node.cc @@ -50,6 +50,7 @@ class PerchingArmNode : public ff_util::FreeFlyerNodelet { // Called on flight software stack initialization - every NODELET_FATAIL // call below should be converted to an initialization fault... void Initialize(ros::NodeHandle *nh) { + nh_ = *nh; // Read the configuration config_reader::ConfigReader config_params; config_params.AddFile("hw/perching_arm.config"); @@ -60,6 +61,15 @@ class PerchingArmNode : public ff_util::FreeFlyerNodelet { if (!config_params.GetTable("perching_arm", &devices)) NODELET_FATAL("Could get perching_arm item in config file"); + // Reconnect to the arm service + if (!initialized_) { + srv_a_ = + nh->advertiseService(SERVICE_HARDWARE_PERCHING_ARM_ENABLE, + &PerchingArmNode::EnableArmCallback, + this); + initialized_ = true; + } + // Iterate over all devices for (int i = 0; i < devices.GetSize(); i++) { config_reader::ConfigReader::Table device_info; @@ -99,6 +109,7 @@ class PerchingArmNode : public ff_util::FreeFlyerNodelet { NODELET_WARN("Could not initialize the arm. It is attached?"); return; } + arm_connected_ = true; // Grab config parameters for the matched device config_reader::ConfigReader::Table config_list; @@ -347,6 +358,23 @@ class PerchingArmNode : public ff_util::FreeFlyerNodelet { } } + // This service re-initializes the perching arm if the arm was + // not powered on during startup + bool EnableArmCallback(ff_hw_msgs::SetEnabled::Request &req, + ff_hw_msgs::SetEnabled::Response &res) { + if (req.enabled && !arm_connected_) { + Initialize(&nh_); + res.success = arm_connected_; + } else if (!req.enabled && arm_connected_) { + res.success = false; + NODELET_WARN("It is not possible to disable the arm"); + } else { + res.success = true; + NODELET_WARN("Already satisfies request"); + } + return true; + } + // Set the distal velocity bool SetDistVelCallback(ff_hw_msgs::SetJointMaxVelocity::Request &req, ff_hw_msgs::SetJointMaxVelocity::Response &res) { @@ -415,11 +443,15 @@ class PerchingArmNode : public ff_util::FreeFlyerNodelet { } private: + ros::NodeHandle nh_; + bool initialized_ = false; + bool arm_connected_ = false; PerchingArm arm_; // Arm interface library ros::Subscriber sub_; // Joint state subscriber ros::Publisher pub_; // Joint state publisher + ros::ServiceServer srv_a_; // Enable the arm ros::ServiceServer srv_p_; // Set max pan velocity - ros::ServiceServer srv_t_; // Set max tilt velcoity + ros::ServiceServer srv_t_; // Set max tilt velocity ros::ServiceServer srv_ps_; // Enable/Disable the proximal joint servo ros::ServiceServer srv_ds_; // Enable/Disable the distal joint servo ros::ServiceServer srv_gs_; // Enable/Disable the gripper joint servo diff --git a/hardware/pmc_actuator/tools/pmc_currents.py b/hardware/pmc_actuator/tools/pmc_currents.py index ea44339774..6f364b411a 100755 --- a/hardware/pmc_actuator/tools/pmc_currents.py +++ b/hardware/pmc_actuator/tools/pmc_currents.py @@ -1,25 +1,27 @@ #!/usr/bin/python import rospy - from ff_hw_msgs.msg import EpsHousekeeping + def callback(data): - pmc_1 = float('nan') - pmc_2 = float('nan') - system = float('nan') + pmc_1 = float("nan") + pmc_2 = float("nan") + system = float("nan") for c in data.values: - if c.name.find("MOTOR1_I") >= 0 : + if c.name.find("MOTOR1_I") >= 0: pmc_1 = c.value - if c.name.find("MOTOR2_I") >= 0 : + if c.name.find("MOTOR2_I") >= 0: pmc_2 = c.value - if c.name.find("SYSTEM_I") >= 0 : + if c.name.find("SYSTEM_I") >= 0: system = c.value - print("system= %.3fA | pmc_1= %.3fA | pmc_2=%.3fA" % (system, pmc_1, pmc_2)) - + print(("system= %.3fA | pmc_1= %.3fA | pmc_2=%.3fA" % (system, pmc_1, pmc_2))) + + def listener(): - rospy.init_node('pmc_consumption', anonymous=True) + rospy.init_node("pmc_consumption", anonymous=True) rospy.Subscriber("/hw/eps/housekeeping", EpsHousekeeping, callback) rospy.spin() - -if __name__ == '__main__': + + +if __name__ == "__main__": listener() diff --git a/hardware/pmc_actuator/tools/pmc_status.py b/hardware/pmc_actuator/tools/pmc_status.py index e898852d3b..e7d05bac9c 100755 --- a/hardware/pmc_actuator/tools/pmc_status.py +++ b/hardware/pmc_actuator/tools/pmc_status.py @@ -1,8 +1,8 @@ #!/usr/bin/python import rospy - from ff_hw_msgs.msg import PmcTelemetry + def callback(data): # print("got new data!") index = 0 @@ -11,17 +11,32 @@ def callback(data): state = p.status_1 >> 2 & 0b11 bad_crc = p.status_1 >> 6 & 0b1 m_disabled = p.status_1 >> 5 & 0b1 - + speedcam_line = p.status_2 >> 1 & 0b1 - print("PMC[%d]: cmd_id=%03d m_speed=%03d m_cur=%02d mode=%d, state=%d bad_crc=%d m_dis=%d sc_line=%d" % - (index, p.command_id, p.motor_speed, p.motor_current, - mode, state, bad_crc, m_disabled, speedcam_line)) + print( + ( + "PMC[%d]: cmd_id=%03d m_speed=%03d m_cur=%02d mode=%d, state=%d bad_crc=%d m_dis=%d sc_line=%d" + % ( + index, + p.command_id, + p.motor_speed, + p.motor_current, + mode, + state, + bad_crc, + m_disabled, + speedcam_line, + ) + ) + ) index = index + 1 - + + def listener(): - rospy.init_node('pmc_status', anonymous=True) + rospy.init_node("pmc_status", anonymous=True) rospy.Subscriber("/hw/pmc/telemetry", PmcTelemetry, callback) rospy.spin() - -if __name__ == '__main__': + + +if __name__ == "__main__": listener() diff --git a/localization/marker_tracking/ros/tools/features_counter.py b/localization/marker_tracking/ros/tools/features_counter.py index 7113451b1d..566d8ee02b 100755 --- a/localization/marker_tracking/ros/tools/features_counter.py +++ b/localization/marker_tracking/ros/tools/features_counter.py @@ -1,32 +1,44 @@ #!/usr/bin/python import sys -import rospy +import rospy from ff_msgs.msg import VisualLandmarks use_csv_format = False + def callback(data): - count=len(data.landmarks) + count = len(data.landmarks) if use_csv_format: ts = data.header.stamp if count == 0: pose_str = "nan, nan, nan" else: - pose_str = "{:.3f}, {:.3f}, {:.3f}".format(data.pose.position.x, - data.pose.position.y, data.pose.position.z) + pose_str = "{:.3f}, {:.3f}, {:.3f}".format( + data.pose.position.x, data.pose.position.y, data.pose.position.z + ) # print("%d.%s" % (ts.secs, format(ts.nsecs, '09d')) ) - print("%s, %d, %s" % - (format(float(ts.secs)+float(ts.nsecs)/1E9, '.3f'), count, pose_str)) + print( + ( + "%s, %d, %s" + % ( + format(float(ts.secs) + float(ts.nsecs) / 1e9, ".3f"), + count, + pose_str, + ) + ) + ) else: print(count) + def listener(type): - rospy.init_node('features_counter', anonymous=True) - rospy.Subscriber("/loc/"+type+"/features", VisualLandmarks, callback) + rospy.init_node("features_counter", anonymous=True) + rospy.Subscriber("/loc/" + type + "/features", VisualLandmarks, callback) rospy.spin() - -if __name__ == '__main__': + + +if __name__ == "__main__": if len(sys.argv) < 2: print("Usage: features_counter.py [csv_format]") print(" = 'ml' or 'ar'") @@ -35,10 +47,10 @@ def listener(type): else: feature = sys.argv[1] if len(sys.argv) > 2: - if sys.argv[2] == ',' or sys.argv[2] == 'csv': + if sys.argv[2] == "," or sys.argv[2] == "csv": use_csv_format = True if use_csv_format: - print("seconds, " + feature + "_count, x, y, z") + print(("seconds, " + feature + "_count, x, y, z")) else: - print("counting " + feature + " features:" ) + print(("counting " + feature + " features:")) listener(feature) diff --git a/localization/sparse_mapping/scripts/camera_trade_study.py b/localization/sparse_mapping/scripts/camera_trade_study.py index 554a6248fc..477aae4912 100644 --- a/localization/sparse_mapping/scripts/camera_trade_study.py +++ b/localization/sparse_mapping/scripts/camera_trade_study.py @@ -1,6 +1,8 @@ import subprocess -print '# fov xres yres featureerror maperror observations disterror distdev angleerror angledev' +print( + "# fov xres yres featureerror maperror observations disterror distdev angleerror angledev" +) resolutions = [(320, 240), (640, 480), (1280, 720)] num_observations = [10, 20, 50] @@ -8,16 +10,47 @@ feature_error = [0, 1, 2, 5] fovs = [90, 120, 150, 170] for ferr in feature_error: - for merr in map_error: - for numobs in num_observations: - for (xres, yres) in resolutions: - for fov_x in fovs: - output = subprocess.check_output(["./bin/evaluate_camera", "--map_error", str(merr), "--feature_error", str(ferr), - "--num_observations", str(numobs), "--fov_x", str(fov_x), "--xres", str(xres), "--yres", str(yres)]).split('\n') - parts1 = output[0].split() - disterr = float(parts1[2]) - distdev = float(parts1[4]) - parts2 = output[1].split() - angerr = float(parts2[2]) - angdev = float(parts2[4]) - print '%g, %d, %d, %g, %g, %d, %g, %g, %g, %g' % (fov_x, xres, yres, ferr, merr, numobs, disterr, distdev, angerr, angdev) + for merr in map_error: + for numobs in num_observations: + for (xres, yres) in resolutions: + for fov_x in fovs: + output = subprocess.check_output( + [ + "./bin/evaluate_camera", + "--map_error", + str(merr), + "--feature_error", + str(ferr), + "--num_observations", + str(numobs), + "--fov_x", + str(fov_x), + "--xres", + str(xres), + "--yres", + str(yres), + ] + ).split("\n") + parts1 = output[0].split() + disterr = float(parts1[2]) + distdev = float(parts1[4]) + parts2 = output[1].split() + angerr = float(parts2[2]) + angdev = float(parts2[4]) + print( + ( + "%g, %d, %d, %g, %g, %d, %g, %g, %g, %g" + % ( + fov_x, + xres, + yres, + ferr, + merr, + numobs, + disterr, + distdev, + angerr, + angdev, + ) + ) + ) diff --git a/localization/sparse_mapping/tools/grow_map.py b/localization/sparse_mapping/tools/grow_map.py index 080ab64b36..399953ed91 100644 --- a/localization/sparse_mapping/tools/grow_map.py +++ b/localization/sparse_mapping/tools/grow_map.py @@ -1,15 +1,15 @@ #!/usr/bin/env python # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -19,50 +19,60 @@ # Create a small map out of a big map that has the same predictive # power. That is tested by localizing the images from the big map # against the created small map. See the documentation for more info. - -import sys, os, re, subprocess, argparse, random, shutil + +import argparse +import os +import random +import re +import shutil +import subprocess +import sys + def check_bot(): - '''It is very crucial that the bot is set.''' - bot = 'ASTROBEE_ROBOT' + """It is very crucial that the bot is set.""" + bot = "ASTROBEE_ROBOT" if bot in os.environ: - print(bot + "=" + os.environ[bot]) + print((bot + "=" + os.environ[bot])) else: - raise Exception('Must set ' + bot) + raise Exception("Must set " + bot) + + +def run_cmd(cmd, logfile=""): -def run_cmd(cmd, logfile = ""): - - print(" ".join(cmd)) + print((" ".join(cmd))) if logfile != "": - f = open(logfile, 'w') - + f = open(logfile, "w") + stdout = "" stderr = "" - popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr = subprocess.STDOUT, - universal_newlines=True) + popen = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True + ) for stdout_line in iter(popen.stdout.readline, ""): if logfile != "": f.write(stdout_line) - + stdout += stdout_line - + popen.stdout.close() return_code = popen.wait() if return_code: raise subprocess.CalledProcessError(return_code, cmd) - + if return_code != 0: - print("Failed to run command.\nOutput: ", stdout, "\nError: ", stderr) + print(("Failed to run command.\nOutput: ", stdout, "\nError: ", stderr)) return (return_code, stdout, stderr) + def write_to_file(images, image_file): - with open(image_file, 'w+') as f: + with open(image_file, "w+") as f: for image in images: f.write(image + "\n") + def mkdir_p(path): try: os.makedirs(path) @@ -70,74 +80,93 @@ def mkdir_p(path): if os.path.isdir(path): pass else: - raise Exception('Could not make directory ' + path + ' as a file with this name exists.') + raise Exception( + "Could not make directory " + path + " as a file with this name exists." + ) + def read_image_list(filename): images = [] - with open(filename, 'r') as f: + with open(filename, "r") as f: for image in f.read().splitlines(): images.append(image.rstrip()) - + if len(images) == 0: - print("No images were read from: " + filename) + print(("No images were read from: " + filename)) sys.exit(1) return images + def list_images_in_map(mapfile): - - cmd = ['build_map', '-info', '-output_map', mapfile] + + cmd = ["build_map", "-info", "-output_map", mapfile] (returncode, stdout, stderr) = run_cmd(cmd) if returncode != 0: - print("Failed to run: " + " ".join(cmd)) + print(("Failed to run: " + " ".join(cmd))) images = [] - for line in (stdout + "\n" + stderr).split('\n'): - + for line in (stdout + "\n" + stderr).split("\n"): + # Print warning messages to stdout m = re.match("^.*?\s([^\s]*?jpg)", line) if m: images.append(m.group(1)) - + return images + def extract_submap(input_map, output_map, image_file, work_dir): - #image_file = work_dir + "/image_list.txt" - #with open(image_file, 'w+') as f: + # image_file = work_dir + "/image_list.txt" + # with open(image_file, 'w+') as f: # for image in images: # f.write(image + "\n") - - print("Extracting submap from " + input_map + " to " + output_map) - cmd = ['extract_submap', '-input_map', input_map, '-output_map', output_map, - '-skip_bundle_adjustment'] - cmd += ['-image_list', image_file] - + print(("Extracting submap from " + input_map + " to " + output_map)) + cmd = [ + "extract_submap", + "-input_map", + input_map, + "-output_map", + output_map, + "-skip_bundle_adjustment", + ] + + cmd += ["-image_list", image_file] + run_cmd(cmd) + def extract_submap_with_vocab_db(input_map, output_map, image_file): - print("Extracting submap from " + input_map + " to " + output_map) - cmd = ['extract_submap', '-input_map', input_map, '-output_map', output_map, - '-skip_bundle_adjustment'] + print(("Extracting submap from " + input_map + " to " + output_map)) + cmd = [ + "extract_submap", + "-input_map", + input_map, + "-output_map", + output_map, + "-skip_bundle_adjustment", + ] + + cmd += ["-image_list", image_file] - cmd += ['-image_list', image_file] - run_cmd(cmd) - print("Building vocab db and pruning " + output_map) - cmd = ['build_map', '-output_map', output_map, '-vocab_db'] + print(("Building vocab db and pruning " + output_map)) + cmd = ["build_map", "-output_map", output_map, "-vocab_db"] run_cmd(cmd) + def parse_localization_log_str(log_str): - '''From: + """From: Errors for mydir/009151.jpg: 1e+06 m extract the image name and the error value. - ''' - + """ + images = [] errors = [] for line in log_str.split("\n"): @@ -151,15 +180,16 @@ def parse_localization_log_str(log_str): return (images, errors) + def parse_localization_log_file(log_file): - with open(log_file, 'r') as f: + with open(log_file, "r") as f: all_data = f.read() return parse_localization_log_str(all_data) + def parse_images_with_bad_localization(log_str, localization_error): - '''Return the images for which error value is > localization_error. - ''' + """Return the images for which error value is > localization_error.""" bad_images_set = set() bad_images = [] @@ -173,35 +203,57 @@ def parse_images_with_bad_localization(log_str, localization_error): bad_images_set.add(image) bad_images.append(image) errors.append(error) - + return (bad_images_set, bad_images, errors) + def find_hard_to_localize_images(args, ref_map, source_map, out_file): - + # See if the remaining images are enough to localize images in the full map - cmd = ['localize_cams', '-reference_map', ref_map, '-source_map', source_map, - '-min_brisk_threshold', str(args.min_brisk_threshold), - '-default_brisk_threshold', str(args.default_brisk_threshold), - '-max_brisk_threshold', str(args.max_brisk_threshold), - '-early_break_landmarks', str(args.early_break_landmarks), - '-num_similar', str(args.num_similar), - '-ransac_inlier_tolerance', str(args.ransac_inlier_tolerance), - '-num_ransac_iterations', str(args.num_ransac_iterations)] + cmd = [ + "localize_cams", + "-reference_map", + ref_map, + "-source_map", + source_map, + "-min_brisk_threshold", + str(args.min_brisk_threshold), + "-default_brisk_threshold", + str(args.default_brisk_threshold), + "-max_brisk_threshold", + str(args.max_brisk_threshold), + "-early_break_landmarks", + str(args.early_break_landmarks), + "-num_similar", + str(args.num_similar), + "-ransac_inlier_tolerance", + str(args.ransac_inlier_tolerance), + "-num_ransac_iterations", + str(args.num_ransac_iterations), + ] if args.histogram_equalization: - cmd.append('-histogram_equalization') - - full_log = out_file + ".log"; - print("Writing the log of localization to: " + full_log) - - (returncode, stdout, stderr) = run_cmd(cmd, full_log) - - (bad_images_set, bad_images, errors) = \ - parse_images_with_bad_localization(stdout + "\n" + stderr, - args.localization_error) - print("Number of images with localization error > " + str(args.localization_error) + \ - " with reference map: " + ref_map + " is: " + str(len(bad_images))) - - print("Writing these images to: " + out_file) + cmd.append("-histogram_equalization") + + full_log = out_file + ".log" + print(("Writing the log of localization to: " + full_log)) + + (returncode, stdout, stderr) = run_cmd(cmd, full_log) + + (bad_images_set, bad_images, errors) = parse_images_with_bad_localization( + stdout + "\n" + stderr, args.localization_error + ) + print( + ( + "Number of images with localization error > " + + str(args.localization_error) + + " with reference map: " + + ref_map + + " is: " + + str(len(bad_images)) + ) + ) + + print(("Writing these images to: " + out_file)) with open(out_file, "w+") as f: f.write("# image and localization error:\n") for count in range(len(bad_images)): @@ -209,8 +261,9 @@ def find_hard_to_localize_images(args, ref_map, source_map, out_file): return bad_images + def check_subset(list1, list2): - '''Check that all images in the first list are also present in the second.''' + """Check that all images in the first list are also present in the second.""" set2 = set() for val in list2: @@ -218,13 +271,14 @@ def check_subset(list1, list2): for val in list1: if val not in set2: - print("Missing image " + str(val)) + print(("Missing image " + str(val))) return False return True + def in_notin(list1, list2): - '''Find entries in the first list that are not in the second list.''' + """Find entries in the first list that are not in the second list.""" set2 = set() for val in list2: @@ -234,60 +288,129 @@ def in_notin(list1, list2): for val in list1: if val not in set2: out_list.append(val) - + return out_list - + + def parse_args(): - parser = argparse.ArgumentParser(description='Remove images from a map that appear reduntant.') - parser.add_argument("-small_map", type=str, required = True, - help="Input registered, pruned, BRISK map with vocab db.") - parser.add_argument("-big_map", type=str, required = True, - help="Input registered, unpruned, BRISK map without vocab db.") - parser.add_argument('-output_map', type = str, required = True, - help='Output registered, pruned, BRISK map with vocab db.') - parser.add_argument("-min_brisk_threshold", type=int, required = False, default = 20, - help="Min BRISK threshold.") - parser.add_argument("-default_brisk_threshold", type=int, required = False, default = 90, - help="Default BRISK threshold.") - parser.add_argument("-max_brisk_threshold", type=int, required = False, default = 110, - help="Max BRISK threshold.") - parser.add_argument("-early_break_landmarks", type=int, required = False, default = 100, - help = "Break early when we have this many landmarks during localization.") - parser.add_argument('-histogram_equalization', dest='histogram_equalization', - action='store_true', required = False) - parser.add_argument("-num_similar", type=int, required = False, default = 20, - help = "Use in localization this many images which " + \ - "are most similar to the image to localize.") - parser.add_argument("-num_ransac_iterations", type=int, required = False, default = 100, - help = "Use in localization this many ransac iterations.") - parser.add_argument("-ransac_inlier_tolerance", type=int, required = False, default = 5, - help = "Use in localization this inlier tolerance.") - parser.add_argument("-localization_error", type=float, required = False, default = 0.02, - help="An image that has localization error bigger than this, " + \ - "in meters, is considered hard to localize.") - parser.add_argument("-big_localization_error", type=float, required = False, default = 0.05, - help="Image whose localization error went over this threshold as " + - "result of reducing the map should be flagged.") - parser.add_argument("-work_dir", type=str, required = True, - help="Store all results in this directory.") - - parser.add_argument('image_lists', nargs='*') - + parser = argparse.ArgumentParser( + description="Remove images from a map that appear reduntant." + ) + parser.add_argument( + "-small_map", + type=str, + required=True, + help="Input registered, pruned, BRISK map with vocab db.", + ) + parser.add_argument( + "-big_map", + type=str, + required=True, + help="Input registered, unpruned, BRISK map without vocab db.", + ) + parser.add_argument( + "-output_map", + type=str, + required=True, + help="Output registered, pruned, BRISK map with vocab db.", + ) + parser.add_argument( + "-min_brisk_threshold", + type=int, + required=False, + default=20, + help="Min BRISK threshold.", + ) + parser.add_argument( + "-default_brisk_threshold", + type=int, + required=False, + default=90, + help="Default BRISK threshold.", + ) + parser.add_argument( + "-max_brisk_threshold", + type=int, + required=False, + default=110, + help="Max BRISK threshold.", + ) + parser.add_argument( + "-early_break_landmarks", + type=int, + required=False, + default=100, + help="Break early when we have this many landmarks during localization.", + ) + parser.add_argument( + "-histogram_equalization", + dest="histogram_equalization", + action="store_true", + required=False, + ) + parser.add_argument( + "-num_similar", + type=int, + required=False, + default=20, + help="Use in localization this many images which " + + "are most similar to the image to localize.", + ) + parser.add_argument( + "-num_ransac_iterations", + type=int, + required=False, + default=100, + help="Use in localization this many ransac iterations.", + ) + parser.add_argument( + "-ransac_inlier_tolerance", + type=int, + required=False, + default=5, + help="Use in localization this inlier tolerance.", + ) + parser.add_argument( + "-localization_error", + type=float, + required=False, + default=0.02, + help="An image that has localization error bigger than this, " + + "in meters, is considered hard to localize.", + ) + parser.add_argument( + "-big_localization_error", + type=float, + required=False, + default=0.05, + help="Image whose localization error went over this threshold as " + + "result of reducing the map should be flagged.", + ) + parser.add_argument( + "-work_dir", + type=str, + required=True, + help="Store all results in this directory.", + ) + + parser.add_argument("image_lists", nargs="*") + args = parser.parse_args() return args + def add_neighbors_of_bad_images(all_images, bad_images): - '''Given a list of images in all_images, and a subset of them + """Given a list of images in all_images, and a subset of them in bad_images, create a list of images that has all the images in bad_images, and for each such image also has the image before it and the image after it, as they show in all_images. The reason we want to add the neighbors for each bad image is that we will put all these in a map, and it is not good for localization that in a map an image shows up isolated. - ''' - + """ + all_images_dict = {} for image_count in range(len(all_images)): all_images_dict[all_images[image_count]] = image_count @@ -299,34 +422,34 @@ def add_neighbors_of_bad_images(all_images, bad_images): image_count = all_images_dict[bad_image] # Add the bad image - #print("add bad image " + bad_image) + # print("add bad image " + bad_image) images_to_add_dict[image_count] = bad_image # Add its neighbors if image_count - 1 >= 0: - #print("add neighbor " + all_images[image_count - 1]) + # print("add neighbor " + all_images[image_count - 1]) images_to_add_dict[image_count - 1] = all_images[image_count - 1] if image_count + 1 < len(all_images): - #print("add neighbor " + all_images[image_count + 1]) + # print("add neighbor " + all_images[image_count + 1]) images_to_add_dict[image_count + 1] = all_images[image_count + 1] images_to_add = [] for image_count in images_to_add_dict: - #print("---add ", images_to_add_dict[image_count]) + # print("---add ", images_to_add_dict[image_count]) images_to_add.append(images_to_add_dict[image_count]) return images_to_add + def grow_map(args, curr_map_images, curr_map, source_map): - '''Images in source map that cannot be localized against curr_map are added to it. + """Images in source map that cannot be localized against curr_map are added to it. Addition is happening by assemblng all needed images and carving out the submap of args.big_map having them. - ''' - + """ + log_file = args.work_dir + "/bad_localization_list.txt" - bad_images = find_hard_to_localize_images(args, curr_map, source_map, - log_file) + bad_images = find_hard_to_localize_images(args, curr_map, source_map, log_file) # Let the neigbhobs of bad (not localizable) images as also be bad source_map_images = list_images_in_map(source_map) @@ -341,34 +464,38 @@ def grow_map(args, curr_map_images, curr_map, source_map): curr_map_images = sorted(set(curr_map_images + all_bad_images)) curr_map_list = args.work_dir + "/curr_map_list.txt" - print("The grown map will use the images in: " + curr_map_list) + print(("The grown map will use the images in: " + curr_map_list)) write_to_file(curr_map_images, curr_map_list) curr_map = args.work_dir + "/grown_map.map" - print("Growing the map and building its vocab db. Writing it to: " + curr_map) + print(("Growing the map and building its vocab db. Writing it to: " + curr_map)) extract_submap_with_vocab_db(args.big_map, curr_map, curr_map_list) else: print("No new images will be added at this stage.") - + return [curr_map_images, curr_map] - + + def main(): args = parse_args() - print("Using histogram_equalization: " + str(args.histogram_equalization)) + print(("Using histogram_equalization: " + str(args.histogram_equalization))) if not os.path.exists(args.small_map): raise Exception("The input small map does not exist.") if not os.path.exists(args.big_map): raise Exception("The input big map does not exist.") - if args.small_map == args.big_map or args.small_map == args.output_map or \ - args.big_map == args.output_map: + if ( + args.small_map == args.big_map + or args.small_map == args.output_map + or args.big_map == args.output_map + ): raise Exception("Some of the specified maps have the same names.") - + check_bot() - + # TODO(oalexan1): Must ensure both maps are registered and that the second one # is larger than the first one! @@ -377,7 +504,7 @@ def main(): # These will be updated at each iteration curr_map = args.small_map curr_map_images = list_images_in_map(curr_map) - + big_map_images = list_images_in_map(args.big_map) for list_iter in range(len(args.image_lists)): @@ -388,30 +515,49 @@ def main(): # Sanity check missing_images = in_notin(curr_map_images, big_map_images) if len(missing_images) > 0: - print("The following images are in " + args.big_map + " but not in " + curr_map) + print( + ( + "The following images are in " + + args.big_map + + " but not in " + + curr_map + ) + ) print(missing_images) print("This violates the assumptions of this tool.") sys.exit(1) - - print("See which images to add to map from: " + candidate_list) + + print(("See which images to add to map from: " + candidate_list)) # Add those images for which localization against curr_map fails source_map = args.work_dir + "/" + "inc_map.map" extract_submap(args.big_map, source_map, candidate_list, args.work_dir) - [curr_map_images, curr_map] = grow_map(args, curr_map_images, curr_map, source_map) + [curr_map_images, curr_map] = grow_map( + args, curr_map_images, curr_map, source_map + ) # One last verification. Images in args.big_map that cannot be localized # against the map grown so far must be added to it. - [curr_map_images, curr_map] = grow_map(args, curr_map_images, curr_map, args.big_map) + [curr_map_images, curr_map] = grow_map( + args, curr_map_images, curr_map, args.big_map + ) if curr_map == args.small_map: - print("The map did not grow. Create " + args.output_map + " as a copy of " + args.small_map) + print( + ( + "The map did not grow. Create " + + args.output_map + + " as a copy of " + + args.small_map + ) + ) shutil.copy(args.small_map, args.output_map) else: - print("Moving " + curr_map + " to " + args.output_map) + print(("Moving " + curr_map + " to " + args.output_map)) shutil.move(curr_map, args.output_map) - + + if __name__ == "__main__": main() diff --git a/localization/sparse_mapping/tools/parse_total_station.py b/localization/sparse_mapping/tools/parse_total_station.py index c64eba0c50..5644937094 100644 --- a/localization/sparse_mapping/tools/parse_total_station.py +++ b/localization/sparse_mapping/tools/parse_total_station.py @@ -1,22 +1,24 @@ #!/usr/bin/env python # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import os, sys, re +import os +import re +import sys """Parse a measurements file as output by the Total Station, and convert it into a format to be read by build_map when doing @@ -24,33 +26,32 @@ """ if len(sys.argv) < 2: - print("Usage: " + sys.argv[0] + ' input.txt output.txt') + print(("Usage: " + sys.argv[0] + " input.txt output.txt")) sys.exit(1) -fileIn = sys.argv[1] +fileIn = sys.argv[1] fileOut = sys.argv[2] with open(fileIn) as f: - f_out = open(fileOut, 'w') + f_out = open(fileOut, "w") lines = f.readlines() f_out.write("# x y z id\n") for line in lines: # Match: *110001+0000000000111111 81...0+0000000000000942 82...0+0000000000001960 83...0-0000000000000312 - m = re.match("^\*.*?\+0*(.*?)\s+8\d\.\.\.0([\+\-].*?)\s+8\d\.\.\.0([\+\-].*?)\s+8\d\.\.\.0([\+\-].*?)\s+", line) + m = re.match( + "^\*.*?\+0*(.*?)\s+8\d\.\.\.0([\+\-].*?)\s+8\d\.\.\.0([\+\-].*?)\s+8\d\.\.\.0([\+\-].*?)\s+", + line, + ) if not m: continue # Strip extra zeros and convert from mm to meters pt_id = m.group(1) - x = int(m.group(2))/1000.0 - y = int(m.group(3))/1000.0 - z = int(m.group(4))/1000.0 + x = int(m.group(2)) / 1000.0 + y = int(m.group(3)) / 1000.0 + z = int(m.group(4)) / 1000.0 # The id field is just a comment now, it is not needed f_out.write(str(x) + " " + str(y) + " " + str(z) + " # " + pt_id + "\n") - - - - diff --git a/localization/sparse_mapping/tools/reduce_map.py b/localization/sparse_mapping/tools/reduce_map.py index 5486d32e05..307ec7ea01 100644 --- a/localization/sparse_mapping/tools/reduce_map.py +++ b/localization/sparse_mapping/tools/reduce_map.py @@ -1,15 +1,15 @@ #!/usr/bin/env python # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -19,37 +19,44 @@ # Create a small map out of a big map that has the same predictive # power. That is tested by localizing the images from the big map # against the created small map. See the documentation for more info. - -import sys, os, re, subprocess, argparse, random -def run_cmd(cmd, logfile = ""): +import argparse +import os +import random +import re +import subprocess +import sys - print(" ".join(cmd)) + +def run_cmd(cmd, logfile=""): + + print((" ".join(cmd))) if logfile != "": - f = open(logfile, 'w') - + f = open(logfile, "w") + stdout = "" stderr = "" - popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr = subprocess.STDOUT, - universal_newlines=True) + popen = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True + ) for stdout_line in iter(popen.stdout.readline, ""): if logfile != "": f.write(stdout_line) - + stdout += stdout_line - + popen.stdout.close() return_code = popen.wait() if return_code: raise subprocess.CalledProcessError(return_code, cmd) - + if return_code != 0: - print("Failed to run command.\nOutput: ", stdout, "\nError: ", stderr) + print(("Failed to run command.\nOutput: ", stdout, "\nError: ", stderr)) return (return_code, stdout, stderr) + def mkdir_p(path): try: os.makedirs(path) @@ -57,64 +64,76 @@ def mkdir_p(path): if os.path.isdir(path): pass else: - raise Exception('Could not make directory ' + path + ' as a file with this name exists.') + raise Exception( + "Could not make directory " + path + " as a file with this name exists." + ) + def read_image_list(filename): images = [] - with open(filename, 'r') as f: + with open(filename, "r") as f: for image in f.read().splitlines(): images.append(image.rstrip()) - + if len(images) == 0: - print("No images were read from: " + filename) + print(("No images were read from: " + filename)) sys.exit(1) return images + def list_images_in_map(mapfile): - - cmd = ['build_map', '-info', '-output_map', mapfile] + + cmd = ["build_map", "-info", "-output_map", mapfile] (returncode, stdout, stderr) = run_cmd(cmd) if returncode != 0: - print("Failed to run: " + " ".join(cmd)) + print(("Failed to run: " + " ".join(cmd))) images = [] - for line in (stdout + "\n" + stderr).split('\n'): - + for line in (stdout + "\n" + stderr).split("\n"): + # Print warning messages to stdout m = re.match("^.*?\s([^\s]*?jpg)", line) if m: images.append(m.group(1)) - + return images + def extract_submap_with_vocab_db(input_map, output_map, images, work_dir): - image_file = work_dir + "/image_list.txt"; - with open(image_file, 'w+') as f: + image_file = work_dir + "/image_list.txt" + with open(image_file, "w+") as f: for image in images: f.write(image + "\n") - - print("Extracting submap from " + input_map + " to " + output_map) - cmd = ['extract_submap', '-input_map', input_map, '-output_map', output_map, - '-skip_bundle_adjustment'] - cmd +=['-image_list', image_file] - + print(("Extracting submap from " + input_map + " to " + output_map)) + cmd = [ + "extract_submap", + "-input_map", + input_map, + "-output_map", + output_map, + "-skip_bundle_adjustment", + ] + + cmd += ["-image_list", image_file] + run_cmd(cmd) - print("Building vocab db and pruning " + output_map) - cmd = ['build_map', '-output_map', output_map, '-vocab_db'] + print(("Building vocab db and pruning " + output_map)) + cmd = ["build_map", "-output_map", output_map, "-vocab_db"] run_cmd(cmd) + def parse_localization_log_str(log_str): - '''From: + """From: Errors for mydir/009151.jpg: 1e+06 m extract the image name and the error value. - ''' - + """ + images = [] errors = [] for line in log_str.split("\n"): @@ -128,15 +147,16 @@ def parse_localization_log_str(log_str): return (images, errors) + def parse_localization_log_file(log_file): - with open(log_file, 'r') as f: + with open(log_file, "r") as f: all_data = f.read() return parse_localization_log_str(all_data) + def parse_images_with_bad_localization(log_str, localization_error): - '''Return the images for which error value is > localization_error. - ''' + """Return the images for which error value is > localization_error.""" bad_images_set = set() bad_images = [] @@ -150,35 +170,57 @@ def parse_images_with_bad_localization(log_str, localization_error): bad_images_set.add(image) bad_images.append(image) errors.append(error) - + return (bad_images_set, bad_images, errors) + def find_hard_to_localize_images(args, ref_map, source_map, out_file): - + # See if the remaining images are enough to localize images in the full map - cmd = ['localize_cams', '-reference_map', ref_map, '-source_map', source_map, - '-min_brisk_threshold', str(args.min_brisk_threshold), - '-default_brisk_threshold', str(args.default_brisk_threshold), - '-max_brisk_threshold', str(args.max_brisk_threshold), - '-early_break_landmarks', str(args.early_break_landmarks), - '-num_similar', str(args.num_similar), - '-ransac_inlier_tolerance', str(args.ransac_inlier_tolerance), - '-num_ransac_iterations', str(args.num_ransac_iterations)] + cmd = [ + "localize_cams", + "-reference_map", + ref_map, + "-source_map", + source_map, + "-min_brisk_threshold", + str(args.min_brisk_threshold), + "-default_brisk_threshold", + str(args.default_brisk_threshold), + "-max_brisk_threshold", + str(args.max_brisk_threshold), + "-early_break_landmarks", + str(args.early_break_landmarks), + "-num_similar", + str(args.num_similar), + "-ransac_inlier_tolerance", + str(args.ransac_inlier_tolerance), + "-num_ransac_iterations", + str(args.num_ransac_iterations), + ] if args.histogram_equalization: - cmd.append('-histogram_equalization') - - full_log = out_file + ".log"; - print("Writing the log of localization to: " + full_log) - - (returncode, stdout, stderr) = run_cmd(cmd, full_log) - - (bad_images_set, bad_images, errors) = \ - parse_images_with_bad_localization(stdout + "\n" + stderr, - args.localization_error) - print("Number of images with localization error > " + str(args.localization_error) + \ - " with reference map: " + ref_map + " is: " + str(len(bad_images))) - - print("Writing these images to: " + out_file) + cmd.append("-histogram_equalization") + + full_log = out_file + ".log" + print(("Writing the log of localization to: " + full_log)) + + (returncode, stdout, stderr) = run_cmd(cmd, full_log) + + (bad_images_set, bad_images, errors) = parse_images_with_bad_localization( + stdout + "\n" + stderr, args.localization_error + ) + print( + ( + "Number of images with localization error > " + + str(args.localization_error) + + " with reference map: " + + ref_map + + " is: " + + str(len(bad_images)) + ) + ) + + print(("Writing these images to: " + out_file)) with open(out_file, "w+") as f: f.write("# image and localization error:\n") for count in range(len(bad_images)): @@ -186,8 +228,9 @@ def find_hard_to_localize_images(args, ref_map, source_map, out_file): return bad_images_set + def check_subset(list1, list2): - '''Check that all images in the first list are also present in the second.''' + """Check that all images in the first list are also present in the second.""" set2 = set() for val in list2: @@ -195,13 +238,14 @@ def check_subset(list1, list2): for val in list1: if val not in set2: - print("Missing image " + str(val)) + print(("Missing image " + str(val))) return False return True + def in_notin(list1, list2): - '''Find entries in the first list that are not in the second list.''' + """Find entries in the first list that are not in the second list.""" set2 = set() for val in list2: @@ -211,131 +255,229 @@ def in_notin(list1, list2): for val in list1: if val not in set2: out_list.append(val) - + return out_list - + + def parse_args(): - parser = argparse.ArgumentParser(description='Remove images from a map that appear reduntant.') - parser.add_argument("-input_map", type=str, required = True, - help="Input registered, unpruned, BRISK map with vocab db.") - #parser.add_argument('-output_map', type = str, required = True, + parser = argparse.ArgumentParser( + description="Remove images from a map that appear reduntant." + ) + parser.add_argument( + "-input_map", + type=str, + required=True, + help="Input registered, unpruned, BRISK map with vocab db.", + ) + # parser.add_argument('-output_map', type = str, required = True, # help='Output registered, pruned, BRISK map with vocab db.') - parser.add_argument("-min_brisk_threshold", type=int, required = False, default = 20, - help="Min BRISK threshold.") - parser.add_argument("-default_brisk_threshold", type=int, required = False, default = 90, - help="Default BRISK threshold.") - parser.add_argument("-max_brisk_threshold", type=int, required = False, default = 110, - help="Max BRISK threshold.") - parser.add_argument("-early_break_landmarks", type=int, required = False, default = 100, - help = "Break early when we have this many landmarks during localization.") - parser.add_argument('-histogram_equalization', dest='histogram_equalization', - action='store_true', required = False) - - parser.add_argument("-num_similar", type=int, required = False, default = 20, - help = "Use in localization this many images which " + \ - "are most similar to the image to localize.") - parser.add_argument("-num_ransac_iterations", type=int, required = False, default = 100, - help = "Use in localization this many ransac iterations.") - parser.add_argument("-ransac_inlier_tolerance", type=int, required = False, default = 5, - help = "Use in localization this inlier tolerance.") - parser.add_argument("-sample_rate", type=float, required = False, default = 0.25, - help="The fraction of images to try to remove from the map at once.") - parser.add_argument("-localization_error", type=float, required = False, default = 0.02, - help="An image that has localization error bigger than this, " + \ - "in meters, is considered hard to localize.") - parser.add_argument("-big_localization_error", type=float, required = False, default = 0.05, - help="Image whose localization error went over this threshold as " + - "result of reducing the map should be flagged.") - parser.add_argument("-attempts", type=int, required = False, default = 2, - help="How many times to try to reduce the map.") - parser.add_argument("-work_dir", type=str, required = True, - help="Store all results in this directory.") - parser.add_argument("-image_list", type=str, required = False, - help="Instead of taking out images of the map randomly, start with a submap with the images from this list.") - #parser.add_argument("-v", "-verbosity", type=int, - #help="increase output verbosity") + parser.add_argument( + "-min_brisk_threshold", + type=int, + required=False, + default=20, + help="Min BRISK threshold.", + ) + parser.add_argument( + "-default_brisk_threshold", + type=int, + required=False, + default=90, + help="Default BRISK threshold.", + ) + parser.add_argument( + "-max_brisk_threshold", + type=int, + required=False, + default=110, + help="Max BRISK threshold.", + ) + parser.add_argument( + "-early_break_landmarks", + type=int, + required=False, + default=100, + help="Break early when we have this many landmarks during localization.", + ) + parser.add_argument( + "-histogram_equalization", + dest="histogram_equalization", + action="store_true", + required=False, + ) + + parser.add_argument( + "-num_similar", + type=int, + required=False, + default=20, + help="Use in localization this many images which " + + "are most similar to the image to localize.", + ) + parser.add_argument( + "-num_ransac_iterations", + type=int, + required=False, + default=100, + help="Use in localization this many ransac iterations.", + ) + parser.add_argument( + "-ransac_inlier_tolerance", + type=int, + required=False, + default=5, + help="Use in localization this inlier tolerance.", + ) + parser.add_argument( + "-sample_rate", + type=float, + required=False, + default=0.25, + help="The fraction of images to try to remove from the map at once.", + ) + parser.add_argument( + "-localization_error", + type=float, + required=False, + default=0.02, + help="An image that has localization error bigger than this, " + + "in meters, is considered hard to localize.", + ) + parser.add_argument( + "-big_localization_error", + type=float, + required=False, + default=0.05, + help="Image whose localization error went over this threshold as " + + "result of reducing the map should be flagged.", + ) + parser.add_argument( + "-attempts", + type=int, + required=False, + default=2, + help="How many times to try to reduce the map.", + ) + parser.add_argument( + "-work_dir", + type=str, + required=True, + help="Store all results in this directory.", + ) + parser.add_argument( + "-image_list", + type=str, + required=False, + help="Instead of taking out images of the map randomly, start with a submap with the images from this list.", + ) + # parser.add_argument("-v", "-verbosity", type=int, + # help="increase output verbosity") args = parser.parse_args() return args + def main(): args = parse_args() - + mkdir_p(args.work_dir) - print("Using histogram_equalization: " + str(args.histogram_equalization)) + print(("Using histogram_equalization: " + str(args.histogram_equalization))) input_list = [] if args.image_list is not None: - print("Initalizing the reduced map with images from " + args.image_list) + print(("Initalizing the reduced map with images from " + args.image_list)) args.attempts = 1 print("Using just one attempt, hence only trying to add to the input list.") - input_list = read_image_list(args.image_list) + input_list = read_image_list(args.image_list) else: - print("Using " + str(args.attempts) + " at reducing the map.") + print(("Using " + str(args.attempts) + " at reducing the map.")) prev_map = args.input_map for outer_iter in range(args.attempts): print("\n\n-------------------------------------------") - print("Attempting to remove images. Iteration: " + str(outer_iter) + ".") - + print(("Attempting to remove images. Iteration: " + str(outer_iter) + ".")) + images = list_images_in_map(prev_map) - print("Current iteration map has " + str(len(images)) + " images.") + print(("Current iteration map has " + str(len(images)) + " images.")) if args.image_list is None: - exclude_images = random.sample(images, int(len(images)*args.sample_rate)) - print("Randomly excluding " + str(len(exclude_images)) + - " images from the map.") + exclude_images = random.sample(images, int(len(images) * args.sample_rate)) + print( + ( + "Randomly excluding " + + str(len(exclude_images)) + + " images from the map." + ) + ) else: if not check_subset(input_list, images): - print("The images in " + args.image_list + - " must be all in the input map: " + args.input_map) + print( + ( + "The images in " + + args.image_list + + " must be all in the input map: " + + args.input_map + ) + ) sys.exit(1) exclude_images = in_notin(images, input_list) - print("Start by excluding " + str(len(exclude_images)) + - " images from the map.") - - #for image in images: + print( + ( + "Start by excluding " + + str(len(exclude_images)) + + " images from the map." + ) + ) + + # for image in images: # print("image is ", image) # - #print("subimages: ", exclude_images) - #print("size is ", len(exclude_images)) + # print("subimages: ", exclude_images) + # print("size is ", len(exclude_images)) reduced_log_file = "" submap = "" include_images = [] - + for inner_iter in range(10): - print("\nAdding images back. Iteration: " + str(inner_iter) + ".") + print(("\nAdding images back. Iteration: " + str(inner_iter) + ".")) submap = args.work_dir + "/" + "submap_iter" + str(outer_iter) + ".map" include_images = in_notin(images, exclude_images) - - extract_submap_with_vocab_db(args.input_map, submap, include_images, - args.work_dir) - - reduced_log_file = args.work_dir + \ - "/bad_localization_with_reduced_map_outer_iter" + \ - str(outer_iter) + "_inner_iter_" + str(inner_iter) + ".txt" - bad_images_set = find_hard_to_localize_images(args, submap, args.input_map, - reduced_log_file) + + extract_submap_with_vocab_db( + args.input_map, submap, include_images, args.work_dir + ) + + reduced_log_file = ( + args.work_dir + + "/bad_localization_with_reduced_map_outer_iter" + + str(outer_iter) + + "_inner_iter_" + + str(inner_iter) + + ".txt" + ) + bad_images_set = find_hard_to_localize_images( + args, submap, args.input_map, reduced_log_file + ) # Put back the images that we can't take out as they have bad localization error new_exclude_images = [] - num_added_back = 0 + num_added_back = 0 for image in exclude_images: if image in bad_images_set: num_added_back += 1 else: new_exclude_images.append(image) exclude_images = new_exclude_images[:] - print("Added back to the map " + str(num_added_back) + " image(s).") + print(("Added back to the map " + str(num_added_back) + " image(s).")) if num_added_back == 0: print("No more images to add back to the map. Stopping.") @@ -344,18 +486,30 @@ def main(): # Do some stats (reduced_images, reduced_errors) = parse_localization_log_file(reduced_log_file) - print("Images for which localization error is larger than " + \ - str(args.localization_error) + " are:") + print( + ( + "Images for which localization error is larger than " + + str(args.localization_error) + + " are:" + ) + ) for local_it in range(len(reduced_images)): if reduced_errors[local_it] > args.localization_error: - print(reduced_images[local_it] + " " + str(reduced_errors[local_it])) + print((reduced_images[local_it] + " " + str(reduced_errors[local_it]))) + + print( + ( + "The reduced map for attempt " + + str(outer_iter) + + " is saved to " + + submap + ) + ) + print(("It has " + str(len(include_images)) + " images.")) - print("The reduced map for attempt " + str(outer_iter) + \ - " is saved to " + submap) - print("It has " + str(len(include_images)) + " images.") - prev_map = submap - + + if __name__ == "__main__": main() diff --git a/localization/sparse_mapping/tools/view_control_points.py b/localization/sparse_mapping/tools/view_control_points.py index 65250fabd2..2d8f58fbaf 100644 --- a/localization/sparse_mapping/tools/view_control_points.py +++ b/localization/sparse_mapping/tools/view_control_points.py @@ -1,26 +1,30 @@ #!/usr/bin/env python # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import sys, os, re, subprocess +import os +import re +import subprocess +import sys """Parse a file containing registration points for the ISS, and publish them in RViz. """ + def gen_marker(marker_id, marker_type, Point, Color, scale, text): """Place a marker of a given type and color at a given location""" @@ -61,23 +65,35 @@ def gen_marker(marker_id, marker_type, Point, Color, scale, text): text: "%s" mesh_resource: '' mesh_use_embedded_materials: False -""" % \ - (marker_id, marker_type, Point[0], Point[1], Point[2], - scale, scale, scale, Color[0], Color[1], Color[2], text); - - return marker; +""" % ( + marker_id, + marker_type, + Point[0], + Point[1], + Point[2], + scale, + scale, + scale, + Color[0], + Color[1], + Color[2], + text, + ) + + return marker + def write_marker_file(fileIn, fileOut): - print("Reading: " + fileIn) + print(("Reading: " + fileIn)) with open(fileIn) as f: - f_out = open(fileOut, 'w') - print("Writing: " + fileOut) + f_out = open(fileOut, "w") + print(("Writing: " + fileOut)) lines = f.readlines() f_out.write("markers: \n") - + marker_id = 1 for line in lines: @@ -93,45 +109,50 @@ def write_marker_file(fileIn, fileOut): vals.append(v) if len(vals) < 4: - print("Skipping invalid line: '" + line + "'") + print(("Skipping invalid line: '" + line + "'")) continue - + Point = [float(vals[0]), float(vals[1]), float(vals[2])] marker_text = vals[4] - + # Plot a point as a sphere marker_id += 1 - marker_type = 2 # plot a small sphere + marker_type = 2 # plot a small sphere marker_scale = 0.01 - marker_color = [1.0, 0.0, 0.0] # red - marker = gen_marker(marker_id, marker_type, Point, marker_color, - marker_scale, marker_text) + marker_color = [1.0, 0.0, 0.0] # red + marker = gen_marker( + marker_id, marker_type, Point, marker_color, marker_scale, marker_text + ) f_out.write(marker) # Plot its text label marker_id += 1 - marker_type = 9 # plot a text label - text_scale = 0.1 - text_color = [1.0, 1.0, 1.0] # white - marker = gen_marker(marker_id, marker_type, Point, text_color, - text_scale, marker_text) + marker_type = 9 # plot a text label + text_scale = 0.1 + text_color = [1.0, 1.0, 1.0] # white + marker = gen_marker( + marker_id, marker_type, Point, text_color, text_scale, marker_text + ) f_out.write(marker) + # The main program if len(sys.argv) < 1: - print("Usage: " + sys.argv[0] + ' registration.txt') + print(("Usage: " + sys.argv[0] + " registration.txt")) sys.exit(1) - -fileIn = sys.argv[1] + +fileIn = sys.argv[1] fileOut = fileIn + ".out" write_marker_file(fileIn, fileOut) # Publish -cmd = 'rostopic pub --once /loc/registration visualization_msgs/MarkerArray -f ' + fileOut -print("Running: " + cmd) +cmd = ( + "rostopic pub --once /loc/registration visualization_msgs/MarkerArray -f " + fileOut +) +print(("Running: " + cmd)) subprocess.call(cmd.split(" ")) -print("Removing: " + fileOut) +print(("Removing: " + fileOut)) os.remove(fileOut) diff --git a/management/executive/include/executive/executive.h b/management/executive/include/executive/executive.h index 44d301c790..243f348683 100644 --- a/management/executive/include/executive/executive.h +++ b/management/executive/include/executive/executive.h @@ -52,6 +52,7 @@ #include #include #include +#include #include #include #include @@ -181,9 +182,7 @@ class Executive : public ff_util::FreeFlyerNodelet { bool CheckStoppedOrDrifting(std::string const& cmd_id, std::string const& cmd_name); bool ConfigureLed(ff_hw_msgs::ConfigureSystemLeds& led_srv); - bool ConfigureMobility(std::string const& cmd_id); - bool ConfigureMobility(bool move_to_start, - std::string& err_msg); + bool ConfigureMobility(bool move_to_start, std::string& err_msg); bool FailCommandIfMoving(ff_msgs::CommandStampedPtr const& cmd); bool LoadUnloadNodelet(ff_msgs::CommandStampedPtr const& cmd); ros::Time MsToSec(std::string timestamp); @@ -310,6 +309,7 @@ class Executive : public ff_util::FreeFlyerNodelet { ros::ServiceClient set_data_client_, enable_recording_client_; ros::ServiceClient eps_terminate_client_; ros::ServiceClient unload_load_nodelet_client_; + ros::ServiceClient set_collision_distance_client_; ros::Subscriber cmd_sub_, dock_state_sub_, fault_state_sub_, gs_ack_sub_; ros::Subscriber heartbeat_sub_, motion_sub_, plan_sub_, zones_sub_, data_sub_; diff --git a/management/executive/src/executive.cc b/management/executive/src/executive.cc index 2b4b8bb3cd..43a8735de1 100644 --- a/management/executive/src/executive.cc +++ b/management/executive/src/executive.cc @@ -1171,21 +1171,15 @@ bool Executive::ConfigureLed(ff_hw_msgs::ConfigureSystemLeds& led_srv) { return true; } -// Functions used to set variables that are used to configure mobility before a +// Function used to set variables that are used to configure mobility before a // move or execute -bool Executive::ConfigureMobility(std::string const& cmd_id) { - bool successful = true; - - // Initialize config clients if they haven't been initialized +bool Executive::ConfigureMobility(bool move_to_start, std::string& err_msg) { + // Initialize choreographer config client if it hasn't been initialized if (!choreographer_cfg_) { choreographer_cfg_ = std::make_shared(&nh_, NODE_CHOREOGRAPHER); } - if (!mapper_cfg_) { - mapper_cfg_ = std::make_shared(&nh_, NODE_MAPPER); - } - // Set values for configuring, these values will persist until changed // Choreographer choreographer_cfg_->Set("desired_vel", @@ -1207,93 +1201,40 @@ bool Executive::ConfigureMobility(std::string const& cmd_id) { choreographer_cfg_->Set("planner", agent_state_.planner); // This function is not used for the first segment of a plan so always disable // move to start - choreographer_cfg_->Set("enable_bootstrapping", false); + choreographer_cfg_->Set("enable_bootstrapping", move_to_start); choreographer_cfg_->Set("enable_replanning", agent_state_.replanning_enabled); - // Mapper - mapper_cfg_->Set("inflate_radius", agent_state_.collision_distance); - - std::string err_msg = ""; - - // Reconfigure choreographer, mapper + // Reconfigure choreographer if (!choreographer_cfg_->Reconfigure()) { - successful = false; - err_msg = "Couldn't configure the mobilty::choreographer node! "; - } - - if (!mapper_cfg_->Reconfigure()) { - successful = false; - err_msg += "Couldn't configure the mobility::mapper node!"; - } - - // Ack error - if (!successful) { - NODELET_ERROR("%s", err_msg.c_str()); - state_->AckCmd(cmd_id, ff_msgs::AckCompletedStatus::EXEC_FAILED, err_msg); + err_msg = "Couldn't configure the choreographer!"; + return false; } - return successful; -} - -bool Executive::ConfigureMobility(bool move_to_start, - std::string& err_msg) { - bool successful = true; - - // TODO(Katie) Change when Ted changes the sequencer - - // Initialize config clients if they haven't been initialized - if (!choreographer_cfg_) { - choreographer_cfg_ = - std::make_shared(&nh_, NODE_CHOREOGRAPHER); - } + // Set the collision distance in the mapper + ff_msgs::SetFloat collision_distance_srv; + collision_distance_srv.request.data = agent_state_.collision_distance; - if (!mapper_cfg_) { - mapper_cfg_ = - std::make_shared(&nh_, NODE_MAPPER); + // Check to make sure the service is valid and running + // Don't use the check service exists function since we don't want to + // ack if we are executing a plan + if (!set_collision_distance_client_.exists()) { + err_msg = "Set collision distance service isn't running! "; + err_msg += "Node may have died!"; + return false; } - // Set values for configuring, these values will persist until changed - // Choreographer - choreographer_cfg_->Set("desired_vel", - agent_state_.target_linear_velocity); - choreographer_cfg_->Set("desired_accel", - agent_state_.target_linear_accel); - choreographer_cfg_->Set("desired_omega", - agent_state_.target_angular_velocity); - choreographer_cfg_->Set("desired_alpha", - agent_state_.target_angular_accel); - choreographer_cfg_->Set("enable_faceforward", - !agent_state_.holonomic_enabled); - choreographer_cfg_->Set("enable_collision_checking", - agent_state_.check_obstacles); - choreographer_cfg_->Set("enable_validation", agent_state_.check_zones); - choreographer_cfg_->Set("enable_timesync", false); - choreographer_cfg_->Set("enable_immediate", - agent_state_.immediate_enabled); - choreographer_cfg_->Set("planner", agent_state_.planner); - choreographer_cfg_->Set("enable_bootstrapping", move_to_start); - choreographer_cfg_->Set("enable_replanning", - agent_state_.replanning_enabled); - - // Mapper - mapper_cfg_->Set("inflate_radius", agent_state_.collision_distance); - - // Clear err_msg - err_msg = ""; - - // Reconfigure choreographer, planner, mapper - if (!choreographer_cfg_->Reconfigure()) { - successful = false; - err_msg = "Couldn't configure the mobilty::choreographer node! "; + if (!set_collision_distance_client_.call(collision_distance_srv)) { + err_msg = "Set collision distance service returned false."; + return false; } - if (!mapper_cfg_->Reconfigure()) { - successful = false; - err_msg += "Couldn't configure the mobility::mapper node!"; + if (!collision_distance_srv.response.success) { + err_msg = "Set collision distance service was not successful."; + return false; } - return successful; + return true; } // Used to check the mobility state for commands that can only be executed when @@ -1330,7 +1271,6 @@ bool Executive::FailCommandIfMoving(ff_msgs::CommandStampedPtr const& cmd) { return false; } - bool Executive::LoadUnloadNodelet(ff_msgs::CommandStampedPtr const& cmd) { bool load = true; std::string which = "Load"; @@ -3774,6 +3714,9 @@ void Executive::Initialize(ros::NodeHandle *nh) { unload_load_nodelet_client_ = nh_.serviceClient( SERVICE_MANAGEMENT_SYS_MONITOR_UNLOAD_LOAD_NODELET); + set_collision_distance_client_ = nh_.serviceClient( + SERVICE_MOBILITY_SET_COLLISION_DISTANCE); + // initialize configure clients later, when initialized here, the service is // invalid when we try to use it. Must have something to do with startup order // of executive, choreographer, planner, or mapper diff --git a/management/executive/src/op_state_ready.cc b/management/executive/src/op_state_ready.cc index 5932d13538..4e2ee588cf 100644 --- a/management/executive/src/op_state_ready.cc +++ b/management/executive/src/op_state_ready.cc @@ -122,7 +122,10 @@ OpState* OpStateReady::HandleCmd(ff_msgs::CommandStampedPtr const& cmd) { return this; } - if (!exec_->ConfigureMobility(cmd->cmd_id)) { + if (!exec_->ConfigureMobility(false, err_msg)) { + AckCmd(cmd->cmd_id, + ff_msgs::AckCompletedStatus::EXEC_FAILED, + err_msg); return this; } diff --git a/management/executive/src/op_state_teleop.cc b/management/executive/src/op_state_teleop.cc index 5e5a066b4b..0b0773e1da 100644 --- a/management/executive/src/op_state_teleop.cc +++ b/management/executive/src/op_state_teleop.cc @@ -93,7 +93,10 @@ OpState* OpStateTeleop::HandleCmd(ff_msgs::CommandStampedPtr const& cmd) { return this; } - if (!exec_->ConfigureMobility(cmd->cmd_id)) { + if (!exec_->ConfigureMobility(false, err_msg)) { + AckCmd(cmd->cmd_id, + ff_msgs::AckCompletedStatus::EXEC_FAILED, + err_msg); return this; } @@ -137,6 +140,8 @@ OpState* OpStateTeleop::HandleResult( std::string const& result_response, std::string const& cmd_id, Action const& action) { + std::string err_msg; + if (state == ff_util::FreeFlyerActionState::Enum::SUCCESS) { // If the action was reacquire, we have more work to do if (action == REACQUIRE) { @@ -155,7 +160,10 @@ OpState* OpStateTeleop::HandleResult( return OpStateRepo::Instance()->ready()->StartupState(); } - if (!exec_->ConfigureMobility(move_cmd_->cmd_id)) { + if (!exec_->ConfigureMobility(false, err_msg)) { + AckCmd(move_cmd_->cmd_id, + ff_msgs::AckCompletedStatus::EXEC_FAILED, + err_msg); move_cmd_ = NULL; return OpStateRepo::Instance()->ready()->StartupState(); } diff --git a/management/sys_monitor/include/sys_monitor/sys_monitor.h b/management/sys_monitor/include/sys_monitor/sys_monitor.h index ec53f6d249..f6ade332ac 100644 --- a/management/sys_monitor/include/sys_monitor/sys_monitor.h +++ b/management/sys_monitor/include/sys_monitor/sys_monitor.h @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include #include #include @@ -195,7 +195,7 @@ class SysMonitor : public ff_util::FreeFlyerNodelet { ros::NodeHandle nh_; ros::Publisher pub_cmd_, pub_heartbeat_; ros::Publisher pub_fault_config_, pub_fault_state_; - ros::Publisher pub_time_diff_; + ros::Publisher pub_time_sync_; ros::Timer reload_params_timer_, startup_timer_, reload_nodelet_timer_; ros::Timer heartbeat_timer_; ros::ServiceServer unload_load_nodelet_service_; @@ -213,6 +213,7 @@ class SysMonitor : public ff_util::FreeFlyerNodelet { config_reader::ConfigReader config_params_; bool time_diff_fault_triggered_; + bool log_time_llp_, log_time_hlp_; int pub_queue_size_, sub_queue_size_; int num_current_blocking_fault_; unsigned int startup_time_, reload_nodelet_timeout_, heartbeat_pub_rate_; diff --git a/management/sys_monitor/src/sys_monitor.cc b/management/sys_monitor/src/sys_monitor.cc index 00d9e14e04..cd94453b39 100644 --- a/management/sys_monitor/src/sys_monitor.cc +++ b/management/sys_monitor/src/sys_monitor.cc @@ -23,6 +23,8 @@ SysMonitor::SysMonitor() : ff_util::FreeFlyerNodelet(NODE_SYS_MONITOR, false), time_diff_node_("imu_aug"), time_diff_fault_triggered_(false), + log_time_llp_(true), + log_time_hlp_(true), pub_queue_size_(10), sub_queue_size_(100), time_drift_thres_sec_(0.25) { @@ -163,35 +165,33 @@ void SysMonitor::HeartbeatCallback(ff_msgs::HeartbeatConstPtr const& hb) { uint i = 0, j = 0, tmp_id; bool fault_found = true; - // Check to see if node heartbeat is set up in watchdogs - if (watch_dogs_.count(hb->node) > 0) { - WatchdogPtr wd = watch_dogs_.at(hb->node); - wd->ResetTimer(); - // Check if the manager in the heartbeat matches the manager in the config. - // If not, replace the manager with what is in the heartbeat since that is - // more accurate. - if (wd->nodelet_manager() != hb->nodelet_manager) { - wd->nodelet_manager(hb->nodelet_manager); - } - - // Check to see if node restarted publishing its heartbeat - if (wd->hb_fault_occurring()) { - wd->hb_fault_occurring(false); - RemoveFault(wd->fault_id()); - } + // Report time if the heartbeat came from the imu aug or guest science manager + // node. + if (hb->node == time_diff_node_ || hb->node == "guest_science_manager") { + ff_msgs::TimeSyncStamped time_sync_msg; + ros::Time time_now = ros::Time::now(); - // Check time drift, use time in imu_aug heartbeat + // Check time drift from llp, use time in imu_aug heartbeat if (hb->node == time_diff_node_) { - float time_diff_sec = (ros::Time::now() - hb->header.stamp).toSec(); - PublishTimeDiff(time_diff_sec); - // Check if time difference is great than threshold. If it is, trigger - // fault + time_sync_msg.remote_processor = "LLP"; + + // Output time difference to the ros log for the first imu aug heartbeat + if (log_time_llp_) { + NODELET_INFO_STREAM("LLP time is " << hb->header.stamp << + " and the MLP time is " << time_now << "."); + log_time_llp_ = false; + } + + // Check for time drift. If time difference is great than the threshold, + // trigger fault + // This fault only applies to clock skew between the MLP and the LLP since + // this skew causes navigation failures. + float time_diff_sec = (time_now - hb->header.stamp).toSec(); if (abs(time_diff_sec) > time_drift_thres_sec_) { if (!time_diff_fault_triggered_) { std::string key = ff_util::fault_keys[ff_util::TIME_DIFF_TOO_HIGH]; unsigned int id = faults_[key]; - AddFault(id, ("Time diff is: " + - std::to_string(abs(time_diff_sec)))); + AddFault(id, ("Time diff is: " + std::to_string(abs(time_diff_sec)))); PublishFaultResponse(id); time_diff_fault_triggered_ = true; } @@ -203,6 +203,38 @@ void SysMonitor::HeartbeatCallback(ff_msgs::HeartbeatConstPtr const& hb) { time_diff_fault_triggered_ = false; } } + } else { + time_sync_msg.remote_processor = "HLP"; + // Output time difference to the ros log for the first guest science + // manager heartbeat + if (log_time_hlp_) { + NODELET_INFO_STREAM("HLP time is " << hb->header.stamp << + " and the MLP time is "<< time_now << "."); + log_time_hlp_ = false; + } + } + + time_sync_msg.header.stamp = ros::Time::now(); + time_sync_msg.mlp_time = time_now; + time_sync_msg.remote_time = hb->header.stamp; + pub_time_sync_.publish(time_sync_msg); + } + + // Check to see if node heartbeat is set up in watchdogs + if (watch_dogs_.count(hb->node) > 0) { + WatchdogPtr wd = watch_dogs_.at(hb->node); + wd->ResetTimer(); + // Check if the manager in the heartbeat matches the manager in the config. + // If not, replace the manager with what is in the heartbeat since that is + // more accurate. + if (wd->nodelet_manager() != hb->nodelet_manager) { + wd->nodelet_manager(hb->nodelet_manager); + } + + // Check to see if node restarted publishing its heartbeat + if (wd->hb_fault_occurring()) { + wd->hb_fault_occurring(false); + RemoveFault(wd->fault_id()); } // Get last heartbeat for fault comparison @@ -215,10 +247,10 @@ void SysMonitor::HeartbeatCallback(ff_msgs::HeartbeatConstPtr const& hb) { for (i = 0; i < previous_hb->faults.size(); i++) { RemoveFault(previous_hb->faults[i].id); } + // Set previous hb to null so that we don't compare the faults with the + // last time the nodelet was run previous_hb = NULL; } - // Set previous hb to null so that we don't compare the faults with the last - // time the nodelet was run // Check to see if this is the first heartbeat from the node if (!previous_hb) { @@ -372,8 +404,8 @@ void SysMonitor::Initialize(ros::NodeHandle *nh) { pub_queue_size_, true); - pub_time_diff_ = nh_.advertise( - TOPIC_MANAGEMENT_SYS_MONITOR_TIME_DIFF, + pub_time_sync_ = nh_.advertise( + TOPIC_MANAGEMENT_SYS_MONITOR_TIME_SYNC, pub_queue_size_, false); @@ -394,6 +426,14 @@ void SysMonitor::Initialize(ros::NodeHandle *nh) { num_current_blocking_fault_ = 0; OutputFaultTables(); + + // Add guest science manager to the list of unwatched heartbeats. The guest + // science manager heartbeat is only used to report the time on the HLP. + // It doesn't make sense to monitor the guest science heartbeat and trigger a + // fault if it is missing because the guest science manager doesn't start up + // with the flight software, isn't run all the time, and cannot be un/loaded + // like the nodes that run on the MLP and LLP. + unwatched_heartbeats_.push_back("guest_science_manager"); } // Function used for debugging purposes only @@ -517,13 +557,6 @@ void SysMonitor::PublishHeartbeat(bool initialization_fault) { pub_heartbeat_.publish(heartbeat_); } -void SysMonitor::PublishTimeDiff(float time_diff_sec) { - ff_msgs::TimeDiffStamped time_diff_msg; - time_diff_msg.header.stamp = ros::Time::now(); - time_diff_msg.time_diff_sec = time_diff_sec; - pub_time_diff_.publish(time_diff_msg); -} - void SysMonitor::StartupTimerCallback(ros::TimerEvent const& te) { ff_msgs::UnloadLoadNodelet::Request load_req; int load_result; diff --git a/mobility/choreographer/include/choreographer/planner.h b/mobility/choreographer/include/choreographer/planner.h index cec770ed78..283013a2fa 100644 --- a/mobility/choreographer/include/choreographer/planner.h +++ b/mobility/choreographer/include/choreographer/planner.h @@ -41,8 +41,12 @@ #include #include #include +#include #include +// Voxel map +#include + #include #include #include @@ -121,6 +125,23 @@ class PlannerImplementation : public ff_util::FreeFlyerNodelet { } return false; } + // Get the keepin and keepout zones + bool GetZonesMap(std::vector &map, Vec3f &origin, Vec3i &dim, double &map_res) { + ff_msgs::GetOccupancyMap srv; + if (client_z_m_.Call(srv)) { + map.resize(srv.response.map.size()); + map = srv.response.map; + origin[0] = srv.response.origin.x; + origin[1] = srv.response.origin.y; + origin[2] = srv.response.origin.z; + dim[0] = srv.response.dim.x; + dim[1] = srv.response.dim.y; + dim[2] = srv.response.dim.z; + map_res = srv.response.resolution; + return true; + } + return false; + } bool GetFreeMap(pcl::PointCloud *points, float *resolution) { ff_msgs::GetMap srv; if (client_f_.Call(srv)) { @@ -154,8 +175,12 @@ class PlannerImplementation : public ff_util::FreeFlyerNodelet { server_p_.Create(nh, topic); // Initialize the get zone call client_z_.SetConnectedCallback(std::bind(&PlannerImplementation::ConnectedCallback, this)); - client_z_.SetTimeoutCallback(std::bind(&PlannerImplementation::GetZoneTimeoutCallback, this)); + client_z_.SetTimeoutCallback(std::bind(&PlannerImplementation::GetZonesTimeoutCallback, this)); client_z_.Create(nh, SERVICE_MOBILITY_GET_ZONES); + // Initialize the get zone map call + client_z_m_.SetConnectedCallback(std::bind(&PlannerImplementation::ConnectedCallback, this)); + client_z_m_.SetTimeoutCallback(std::bind(&PlannerImplementation::GetZonesMapTimeoutCallback, this)); + client_z_m_.Create(nh, SERVICE_MOBILITY_GET_ZONES_MAP); // Initialize the register planner call client_r_.SetConnectedCallback(std::bind(&PlannerImplementation::ConnectedCallback, this)); client_r_.SetTimeoutCallback(std::bind(&PlannerImplementation::RegisterTimeoutCallback, this)); @@ -204,11 +229,12 @@ class PlannerImplementation : public ff_util::FreeFlyerNodelet { // Ensure all clients are connected void ConnectedCallback(void) { NODELET_DEBUG_STREAM("ConnectedCallback()"); - if (!client_z_.IsConnected()) return; // Zone - if (!client_r_.IsConnected()) return; // Register - if (!client_o_.IsConnected()) return; // Register - if (!client_f_.IsConnected()) return; // Register - if (state_ != INITIALIZING) return; // Don't initialize twice + if (!client_z_.IsConnected()) return; // Zones + if (!client_z_m_.IsConnected()) return; // Zones Map + if (!client_r_.IsConnected()) return; // Register + if (!client_o_.IsConnected()) return; // Register + if (!client_f_.IsConnected()) return; // Register + if (state_ != INITIALIZING) return; // Don't initialize twice // Register this planner NODELET_DEBUG_STREAM("Registering planner"); client_r_.Call(registration_); @@ -222,8 +248,13 @@ class PlannerImplementation : public ff_util::FreeFlyerNodelet { } // Timeout on a trajectory generation request - void GetZoneTimeoutCallback(void) { - return InitFault("Timeout connecting to the get zone service"); + void GetZonesTimeoutCallback(void) { + return InitFault("Timeout connecting to the get zones service"); + } + + // Timeout on a trajectory generation request + void GetZonesMapTimeoutCallback(void) { + return InitFault("Timeout connecting to the get zones map service"); } // Timeout on a free map request @@ -281,6 +312,7 @@ class PlannerImplementation : public ff_util::FreeFlyerNodelet { State state_; // Planner state ff_util::FreeFlyerActionServer server_p_; ff_util::FreeFlyerServiceClient client_z_; + ff_util::FreeFlyerServiceClient client_z_m_; ff_util::FreeFlyerServiceClient client_r_; ff_util::FreeFlyerServiceClient client_f_, client_o_; ff_msgs::RegisterPlanner registration_; // Registration info diff --git a/mobility/choreographer/include/choreographer/validator.h b/mobility/choreographer/include/choreographer/validator.h index 886536fe3d..d167e65092 100644 --- a/mobility/choreographer/include/choreographer/validator.h +++ b/mobility/choreographer/include/choreographer/validator.h @@ -34,9 +34,15 @@ #include #include #include +#include +#include + +// Voxel map +#include // STL includes #include +#include namespace choreographer { @@ -69,15 +75,20 @@ class Validator { // Markers for keep in / keep out zones void PublishMarkers(); - // Check if a point is inside a cuboid - bool PointInsideCuboid(geometry_msgs::Point const& x, - geometry_msgs::Vector3 const& cubemin, - geometry_msgs::Vector3 const& cubemax); + // Process zones when building the occupancy map + void ProcessZone(std::vector& map, int type, char cell_value, bool surface); + + // Build the occupancy map + bool GetZonesMap(); // Callback to get the keep in/out zones bool GetZonesCallback(ff_msgs::GetZones::Request& req, ff_msgs::GetZones::Response& res); + // Callback to get the keep in/out zones map + bool GetZonesMapCallback(ff_msgs::GetOccupancyMap::Request& req, + ff_msgs::GetOccupancyMap::Response& res); + // Callback to set the keep in/out zones bool SetZonesCallback(ff_msgs::SetZones::Request& req, ff_msgs::SetZones::Response& res); @@ -87,8 +98,19 @@ class Validator { bool overwrite_; // New zones overwrite ff_msgs::SetZones::Request zones_; // Zones ros::Publisher pub_zones_; // Zone publisher - ros::ServiceServer get_zones_srv_; // Set zone service - ros::ServiceServer set_zones_srv_; // Set zone service + ros::ServiceServer get_zones_srv_; // Get zones service + ros::ServiceServer get_zones_map_srv_; // Get zones map + ros::ServiceServer set_zones_srv_; // Set zones service + ros::ServiceClient get_resolution_; // Get the zones map resolution + ros::ServiceClient get_map_inflation_; // Get the zones map inflation + + double map_res_ = 0.08; + std::shared_ptr jps_map_util_; + + // Voxel map values + char val_occ_ = 100; // Assume occupied cell has value 100 + char val_free_ = 0; // Assume free cell has value 0 + char val_unknown_ = -1; // Assume unknown cell has value -1 }; } // namespace choreographer diff --git a/mobility/choreographer/readme.md b/mobility/choreographer/readme.md index 1eb82f7fa4..a0500b997f 100644 --- a/mobility/choreographer/readme.md +++ b/mobility/choreographer/readme.md @@ -46,4 +46,71 @@ A replanning approach is implemented to support obstacle avoidance using the qp- Internally, the choreographer is encoded as a finite state machine depicted below and implemented using the ff_util/FSM class. This class essentially captures a map of (state, event) -> lambda function relationships. The state of the system is moved forward by setting the initial state and then pushing a sequence of events to the FSM. -\dotfile choreographer_fsm "Choreographer finite state machine" \ No newline at end of file +\dotfile choreographer_fsm "Choreographer finite state machine" + + +## Trajectory checks + +To ensure that the robot is following the desired trajectory, the choreographer overviews the controller feedback and cancels the movement if the motion value is above the defined values for a certain amout of time. The parameters are tuned according to the flight mode chosen. A zero value for any of these parameters disables the check. + +| Parameter | Description | +|:---------------------------------|:----------- | +| ```tolerance_pos_endpoint``` | End of motion position tolerance. Checked when the trajectory finishes and the robot is stopped | +| ```tolerance_pos``` | Position tolerance checked thoughout the entire trajectory | +| ```tolerance_vel``` | Velocity tolerance checked thoughout the entire trajectory | +| ```tolerance_att``` | Attitude tolerance checked thoughout the entire trajectory | +| ```tolerance_omega``` | Omega tolerance checked thoughout the entire trajectory. | +| ```tolerance_time``` | Time for sync check | + +The planned trajectories are also checked against the keep-in and keep-out zones. + +The keep-in and keep-out zones describe safe and unsafe areas for flight +respectively. Each zone is a cuboid in 3-space, and is fully-defined by the +coordinates of two diagonally opposite vertices. The union of all keep-in zones +minus the union of all keep-out zones describes the free space in which safe +flight can occur. The default zones are provided as JSON formatted files with +suffix `.json` in the `astrobee/gds_config` directory. + +An example of a keep-in and keep-out zone JSON file might look like this: + + { + "timestamp" : "1475516840", + "zones": + [ + { + "name" : "keepout", + "safe" : false, + "sequence" : [ [ -1.0, -0.3, -3, -0.6, 1.0, 3.0 ], [ 0.5, -0.3, -3, 1.0, 1.0, 3.0 ] ] + }, + { + "name" : "keepin", + "safe" : true, + "sequence" : [ [ -1.5, -1.5, 0, 1.5, 1.5, 3.0 ] ] + } + ] + } + +Note that the "sequence" field takes an array of 6-vectors, and not just a +single 6-vector. Each element of this array represents a zone, with each vector +denoting the two coordinates that fully-define the cuboid. + +At any point while running the robot, the operator can load and parse a JSON file: + + `rosrun executive zones_pub -compression none .json` + +Or manually through the `SetZones` service call on the ROS namespace `~/mob/set_zones`. +Please refer to the definition of \ref ff_msgs_SetZones for more information on +how to update zones using a ROS service call. + +The resulting data structure is serialized into a binary file. If the option `zone_overwrite` +in `mobility/choreographer.config` is activated (default), it will overwrite the current +`.bin` file containing the default set of zones at start-up. + +The \ref mapper node publishes +[visualization_msgs::MarkerArrays](http://docs.ros.org/kinetic/api/visualization_msgs/html/msg/MarkerArray.html) +on the namespace `~/mob/mapper/zones`. When listened to in rviz, these marker +arrays render keep-in and keep-out zones as semi-transparent green and red +cuboids. The two example zones should be rendered as shown below: + +![alt text](../images/mobility/zones.png "How the RVIZ user interface draws zones") + diff --git a/mobility/choreographer/src/choreographer_nodelet.cc b/mobility/choreographer/src/choreographer_nodelet.cc index 1822c31eb6..067e84910e 100644 --- a/mobility/choreographer/src/choreographer_nodelet.cc +++ b/mobility/choreographer/src/choreographer_nodelet.cc @@ -97,13 +97,14 @@ class ChoreographerNodelet : public ff_util::FreeFlyerNodelet { PMC_TIMEOUT = (1<<10), // PMC has timed out CONTROL_SUCCESS = (1<<11), // Control success CONTROL_FAILED = (1<<12), // Control failure - TOLERANCE_POS = (1<<13), // Position olerance failure - TOLERANCE_ATT = (1<<14), // Attitude tolerance failure - TOLERANCE_VEL = (1<<15), // Velocity tolerance failure - TOLERANCE_OMEGA = (1<<16), // Omega tolerance failure - OBSTACLE_DETECTED = (1<<17), // Hazard: obstacle - MANUAL_STATE_SET = (1<<18), // Setting the state manually with service - REPLAN_TIMEOUT = (1<<19) // Ready to replan again + TOLERANCE_POS_ENDPOINT = (1<<13), // Endpoint position tolerance failure + TOLERANCE_POS = (1<<14), // Position tolerance failure + TOLERANCE_ATT = (1<<15), // Attitude tolerance failure + TOLERANCE_VEL = (1<<16), // Velocity tolerance failure + TOLERANCE_OMEGA = (1<<17), // Omega tolerance failure + OBSTACLE_DETECTED = (1<<18), // Hazard: obstacle + MANUAL_STATE_SET = (1<<19), // Setting the state manually with service + REPLAN_TIMEOUT = (1<<20) // Ready to replan again }; // The various types of control @@ -315,9 +316,12 @@ class ChoreographerNodelet : public ff_util::FreeFlyerNodelet { }); // [12] fsm_.Add(STATE::CONTROLLING, - TOLERANCE_POS | TOLERANCE_ATT | TOLERANCE_VEL | TOLERANCE_OMEGA, + TOLERANCE_POS | TOLERANCE_ATT | TOLERANCE_VEL + | TOLERANCE_OMEGA | TOLERANCE_POS_ENDPOINT, [this](FSM::Event const& event) -> FSM::State { switch (event) { + case TOLERANCE_POS_ENDPOINT: + return Result(RESPONSE::TOLERANCE_VIOLATION_POSITION_ENDPOINT); case TOLERANCE_POS: return Result(RESPONSE::TOLERANCE_VIOLATION_POSITION); case TOLERANCE_ATT: @@ -749,6 +753,9 @@ class ChoreographerNodelet : public ff_util::FreeFlyerNodelet { result.fsm_result = "Replanning failed"; client_c_.CancelGoal(); break; + case RESPONSE::TOLERANCE_VIOLATION_POSITION_ENDPOINT: + result.fsm_result = "Endpoint position tolerance violated"; + break; case RESPONSE::TOLERANCE_VIOLATION_POSITION: result.fsm_result = "Position tolerance violated"; client_c_.CancelGoal(); @@ -1080,7 +1087,7 @@ class ChoreographerNodelet : public ff_util::FreeFlyerNodelet { return false; } - // Called when the sentinel forecasts an upcoming collision + // Called when there is a propulsion event void PmcStateCallback(ff_hw_msgs::PmcState::ConstPtr const& msg) { // If prepping, send a state update to prevent execute and move // actions from timing out on the client side @@ -1181,6 +1188,7 @@ class ChoreographerNodelet : public ff_util::FreeFlyerNodelet { switch (fsm_.GetState()) { // Perform tolerance checking while controling case STATE::CONTROLLING: + pos_error_ = feedback->error_position; if (flight_mode_.tolerance_pos > 0.0 && feedback->error_position > flight_mode_.tolerance_pos) { // If tolerance is present more that the allowable time @@ -1258,6 +1266,17 @@ class ChoreographerNodelet : public ff_util::FreeFlyerNodelet { // Control result void CResultCallback(ff_util::FreeFlyerActionState::Enum result_code, ff_msgs::ControlResultConstPtr const& result) { + // Check if it reached the endpoint + if (flight_mode_.tolerance_pos_endpoint > 0.0 && + pos_error_ > flight_mode_.tolerance_pos_endpoint) { + // If tolerance is present more that the allowable time + NODELET_DEBUG_STREAM("Endpoint position tolerance violated"); + NODELET_DEBUG_STREAM("- Value: " << pos_error_ + << ", Thresh: " + << flight_mode_.tolerance_pos_endpoint); + return fsm_.Update(TOLERANCE_POS_ENDPOINT); + } + switch (result_code) { case ff_util::FreeFlyerActionState::SUCCESS: segment_ = result->segment; @@ -1397,6 +1416,8 @@ class ChoreographerNodelet : public ff_util::FreeFlyerNodelet { geometry_msgs::PointStamped obstacle_; // Obstacle // Tolerance check Timers double tolerance_max_time_; + // Position error + double pos_error_; ros::Time tolerance_pos_timer, tolerance_att_timer, tolerance_vel_timer, tolerance_omega_timer; // Cached number of replan attempts diff --git a/mobility/choreographer/src/validator.cc b/mobility/choreographer/src/validator.cc index 1ce88711b6..f50f0475e7 100644 --- a/mobility/choreographer/src/validator.cc +++ b/mobility/choreographer/src/validator.cc @@ -22,17 +22,126 @@ namespace choreographer { -// Check if a point is inside a cuboid -bool Validator::PointInsideCuboid(geometry_msgs::Point const& x, - geometry_msgs::Vector3 const& cubemin, - geometry_msgs::Vector3 const& cubemax) { - if (x.x < std::min(cubemin.x, cubemax.x) || - x.y < std::min(cubemin.y, cubemax.y) || - x.z < std::min(cubemin.z, cubemax.z) || - x.x > std::max(cubemin.x, cubemax.x) || - x.y > std::max(cubemin.y, cubemax.y) || - x.z > std::max(cubemin.z, cubemax.z)) - return false; + +// Process zone + void Validator::ProcessZone(std::vector &map, int type, char cell_value, bool surface) { + Vec3f zmin, zmax; + for (auto &zone : zones_.zones) { + if (zone.type == type) { + zmin << std::min(zone.min.x, zone.max.x), + std::min(zone.min.y, zone.max.y), std::min(zone.min.z, zone.max.z); + zmax << std::max(zone.min.x, zone.max.x), + std::max(zone.min.y, zone.max.y), std::max(zone.min.z, zone.max.z); + Vec3f tmp = Vec3f::Zero(); + for (int i = 0; i < 3; i++) { + int j = (i + 1) % 3; + int k = (i + 2) % 3; + for (auto zx = zmin(j); zx <= zmax(j); zx += map_res_) { + for (auto zy = zmin(k); zy <= zmax(k); zy += map_res_) { + // Attribute the desired value to the surface around zone + if (surface) { + tmp(j) = zx; + tmp(k) = zy; + tmp(i) = zmin(i) - map_res_ * 1.001; + map[jps_map_util_->getIndex(jps_map_util_->floatToInt(tmp))] = + cell_value; + tmp(i) = zmax(i) + map_res_ * 1.001; + map[jps_map_util_->getIndex(jps_map_util_->floatToInt(tmp))] = + cell_value; + // Attribute the desired value to the volume of the zone + } else { + for (auto zz = zmin(2); zz <= zmax(2); zz += map_res_) { + tmp(0) = zx; + tmp(1) = zy; + tmp(2) = zz; + map[jps_map_util_->getIndex(jps_map_util_->floatToInt(tmp))] = + cell_value; + } + } + } + } + } + } + } + } + +// Get Zones Map +bool Validator::GetZonesMap() { + ff_msgs::GetFloat srv; + if (get_resolution_.call(srv)) { + map_res_ = srv.response.data; + } + + + // Check min and max of keepin zones to use as map boundries + Vec3f min, max, zmin, zmax; + min << std::numeric_limits::max(), std::numeric_limits::max(), + std::numeric_limits::max(); + max << std::numeric_limits::min(), std::numeric_limits::min(), + std::numeric_limits::min(); + uint num_keepin = 0; + for (auto &zone : zones_.zones) { + if (zone.type == ff_msgs::Zone::KEEPIN) { + zmin << zone.min.x, zone.min.y, zone.min.z; + zmax << zone.max.x, zone.max.y, zone.max.z; + for (int i = 0; i < 3; i++) { + min(i) = std::min(min(i), zmin(i)); + min(i) = std::min(min(i), zmax(i)); + max(i) = std::max(max(i), zmin(i)); + max(i) = std::max(max(i), zmax(i)); + } + num_keepin++; + } + } + if (num_keepin == 0) { + ROS_ERROR("Zero keepin zones!! Plan failed"); + return false; + } + + // Based on zones boundries and obstacle map resolution, specify dimensions + min = Vec3f(std::floor(min(0) / map_res_) * map_res_, + std::floor(min(1) / map_res_) * map_res_, + std::floor(min(2) / map_res_) * map_res_) - Vec3f::Ones() * map_res_ * 2.0; + max = Vec3f(std::ceil(max(0) / map_res_) * map_res_, + std::ceil(max(1) / map_res_) * map_res_, + std::ceil(max(2) / map_res_) * map_res_) + Vec3f::Ones() * map_res_ * 2.0; + + + Vec3f origin = min; + Vec3f dimf = (max - min) / map_res_; + Vec3i dim(std::ceil(dimf(0)), std::ceil(dimf(1)), std::ceil(dimf(2))); + int num_cell = dim(0) * dim(1) * dim(2); + + // Keepin/Keepout zones: + // To reduce computational load, only the contour of the keepin/keepout + // zones is defined as occupied, to minimize the number of points that + // are inflated + // 0) The voxel map starts with all voxels set to unknown + // Declare voxel map + std::vector map(num_cell, val_unknown_); + jps_map_util_.reset(new JPS::VoxelMapUtil()); + jps_map_util_->setMap(origin, dim, map, map_res_); + + // 1) Keepin Zones add contour as occupied + ProcessZone(map, ff_msgs::Zone::KEEPIN, val_occ_, true); + + // 2) We set the interior of the keepin zones to free, this corrects the + // case where keepin zones are connected and a contourn was put between them + ProcessZone(map, ff_msgs::Zone::KEEPIN, val_free_, false); + + // 4) Add keepout zones + ProcessZone(map, ff_msgs::Zone::KEEPOUT, val_occ_, true); + + // set voxel map using keepin/keepout zones + jps_map_util_->setMap(origin, dim, map, map_res_); + + // 6) Inflate using the robot radius and the collision distance + double inflation = map_res_; + if (get_map_inflation_.call(srv)) { + inflation = srv.response.data; + } + jps_map_util_->dilate(inflation, inflation); // sets dilating radius + jps_map_util_->dilating(); // this dilates the entire map return true; } @@ -72,26 +181,13 @@ Validator::Response Validator::CheckSegment(ff_util::Segment const& msg, break; } // Now, check each setpoint in the segment against the zones - ff_util::Segment::const_iterator it; - for (it = seg.begin(); it != seg.end(); it++) { - std::vector::iterator jt; - // We must visit at least one keepin to be valid - bool point_exists_within_keepin = false; - for (jt = zones_.zones.begin(); jt != zones_.zones.end(); jt++) { - if (jt->type == ff_msgs::Zone::KEEPIN) - if (PointInsideCuboid(it->pose.position, jt->min, jt->max)) - point_exists_within_keepin = true; - if (jt->type == ff_msgs::Zone::KEEPOUT) { - if (PointInsideCuboid(it->pose.position, jt->min, jt->max)) { - ROS_DEBUG_STREAM("KEEPOUT violation at time" << it->when.toSec()); - ROS_DEBUG_STREAM(it->pose.position); - ROS_DEBUG_STREAM(zones_); - return VIOLATES_KEEP_OUT; - } - } - } - // Check that we are in a keepin - if (!point_exists_within_keepin) + ff_util::Segment::reverse_iterator it; + Vec3f tmp = Vec3f::Zero(); + for (it = seg.rbegin(); it != seg.rend(); it++) { + tmp << it->pose.position.x, it->pose.position.y, it->pose.position.z; + if (jps_map_util_->isOccupied(jps_map_util_->floatToInt(tmp))) + return VIOLATES_KEEP_OUT; + else if (jps_map_util_->isUnKnown(jps_map_util_->floatToInt(tmp))) return VIOLATES_KEEP_IN; } return SUCCESS; @@ -106,6 +202,13 @@ bool Validator::Init(ros::NodeHandle *nh, ff_util::ConfigServer & cfg) { &Validator::SetZonesCallback, this); get_zones_srv_ = nh->advertiseService(SERVICE_MOBILITY_GET_ZONES, &Validator::GetZonesCallback, this); + get_zones_map_srv_ = nh->advertiseService(SERVICE_MOBILITY_GET_ZONES_MAP, + &Validator::GetZonesMapCallback, this); + get_resolution_ = nh->serviceClient( + SERVICE_MOBILITY_GET_MAP_RESOLUTION); + get_map_inflation_ = nh->serviceClient( + SERVICE_MOBILITY_GET_MAP_INFLATION); + // Check if overwriting is allowed zone_file_ = cfg.Get("zone_file"); overwrite_ = cfg.Get("zone_overwrite"); @@ -116,6 +219,8 @@ bool Validator::Init(ros::NodeHandle *nh, ff_util::ConfigServer & cfg) { } else { PublishMarkers(); } + // Update map + GetZonesMap(); // Success return true; } @@ -182,6 +287,28 @@ bool Validator::GetZonesCallback( return true; } +// Callback to get the keep in/out zones +bool Validator::GetZonesMapCallback( + ff_msgs::GetOccupancyMap::Request &req, ff_msgs::GetOccupancyMap::Response &res) { + res.timestamp = zones_.timestamp; + std::vector map = jps_map_util_->getMap(); + res.map.resize(map.size()); + res.map = map; + + Vec3f origin = jps_map_util_->getOrigin(); + res.origin.x = origin[0]; + res.origin.y = origin[1]; + res.origin.z = origin[2]; + + Vec3i dim = jps_map_util_->getDim(); + res.dim.x = dim[0]; + res.dim.y = dim[1]; + res.dim.z = dim[2]; + + res.resolution = jps_map_util_->getRes(); + return true; +} + // Callback to set the keep in/out zones bool Validator::SetZonesCallback( ff_msgs::SetZones::Request &req, ff_msgs::SetZones::Response &res) { @@ -192,6 +319,8 @@ bool Validator::SetZonesCallback( ROS_WARN_STREAM("Cannot write zone file " << zone_file_); // Update visualization PublishMarkers(); + // Update map + GetZonesMap(); // Send result res.success = true; return true; diff --git a/mobility/mapper/include/mapper/mapper_nodelet.h b/mobility/mapper/include/mapper/mapper_nodelet.h index 33650b6f5f..ca567b9efb 100644 --- a/mobility/mapper/include/mapper/mapper_nodelet.h +++ b/mobility/mapper/include/mapper/mapper_nodelet.h @@ -53,6 +53,7 @@ // Service messages #include +#include // General messages #include @@ -120,16 +121,25 @@ class MapperNodelet : public ff_util::FreeFlyerNodelet { // Services (see services.cpp for implementation) ----------------- // Update resolution of the map - bool UpdateResolution(ff_msgs::SetFloat::Request &req, + bool SetResolution(ff_msgs::SetFloat::Request &req, ff_msgs::SetFloat::Response &res); + // Update resolution of the map + bool GetResolution(ff_msgs::GetFloat::Request &req, + ff_msgs::GetFloat::Response &res); // Update map memory time - bool UpdateMemoryTime(ff_msgs::SetFloat::Request &req, + bool SetMemoryTime(ff_msgs::SetFloat::Request &req, ff_msgs::SetFloat::Response &res); + // Update map memory time + bool GetMemoryTime(ff_msgs::GetFloat::Request &req, + ff_msgs::GetFloat::Response &res); - // Update map inflation - bool MapInflation(ff_msgs::SetFloat::Request &req, + // Update collision distance + bool SetCollisionDistance(ff_msgs::SetFloat::Request &req, ff_msgs::SetFloat::Response &res); + // Update collision distance + bool GetMapInflation(ff_msgs::GetFloat::Request &req, + ff_msgs::GetFloat::Response &res); // Reset the map bool ResetMap(std_srvs::Trigger::Request &req, @@ -174,16 +184,13 @@ class MapperNodelet : public ff_util::FreeFlyerNodelet { ros::Subscriber haz_sub_, perch_sub_, segment_sub_, reset_sub_; // Octomap services - ros::ServiceServer resolution_srv_, memory_time_srv_; - ros::ServiceServer map_inflation_srv_, reset_map_srv_; + ros::ServiceServer set_resolution_srv_, set_memory_time_srv_, set_collision_distance_srv_; + ros::ServiceServer get_resolution_srv_, get_memory_time_srv_, get_collision_distance_srv_; + ros::ServiceServer reset_map_srv_; // Thread rates (hz) double tf_update_rate_, fading_memory_update_rate_; - // // Path planning services - // ros::ServiceServer RRT_srv, octoRRT_srv, PRM_srv, graph_srv, Astar_srv; - ros::ServiceServer newTraj_srv; - // Trajectory validation variables ----------------------------- State state_; // State of the mapper (structure defined in struct.h) ff_util::Segment segment_; // Segment diff --git a/mobility/mapper/include/mapper/octoclass.h b/mobility/mapper/include/mapper/octoclass.h index 89bec3dc05..74b43f87fd 100644 --- a/mobility/mapper/include/mapper/octoclass.h +++ b/mobility/mapper/include/mapper/octoclass.h @@ -46,13 +46,16 @@ class OctoClass{ OctoClass(); // Mapping methods - void SetMemory(const double memory); // Fading memory time + void SetMemoryTime(const double memory); // Fading memory time + double GetMemoryTime(); // Returns fading memory time void SetMaxRange(const double max_range); // Max range for mapping void SetMinRange(const double min_range); // Min range for mapping inline double GetResolution() const {return resolution_;} // Returns resolution - void SetResolution(const double resolution_in); // Resolution of the octomap - void ResetMap(); // Reset the octomap structure + void SetResolution(const double resolution_in); // Resolution of the octomap + double GetResolution(); // Returns resolution of the octomap + void ResetMap(); // Reset the octomap structure void SetMapInflation(const double inflate_radius); // Set the inflation radius + double GetMapInflation(); // Returns the inflation radius void SetCamFrustum(const double fov, const double aspect_ratio); void SetOccupancyThreshold(const double occupancy_threshold); diff --git a/mobility/mapper/readme.md b/mobility/mapper/readme.md index cec76afd9b..7e8627755a 100644 --- a/mobility/mapper/readme.md +++ b/mobility/mapper/readme.md @@ -13,64 +13,10 @@ yet implemented. The \ref mapper node can be divided in the following tasks: -* `Trajectory Validator` - Validates planned trajectories based on keep-in and - keep-out zones. * `Octomapper` - Maps Astrobee's surroundings using an octomap (octree-based map); * `Sentinel` - Checks for imminent collisions and notifies the controller if an imminent collision has been detected; -## Trajectory Validator - -The keep-in and keep-out zones describe safe and unsafe areas for flight -respectively. Each zone is a cuboid in 3-space, and is fully-defined by the -coordinates of two diagonally opposite vertices. The union of all keep-in zones -minus the union of all keep-out zones describes the free space in which safe -flight can occur. The default zones are provided as JSON formatted files with -suffix `.json` in the `astrobee/gds_config` directory. - -An example of a keep-in and keep-out zone JSON file might look like this: - - { - "timestamp" : "1475516840", - "zones": - [ - { - "name" : "keepout", - "safe" : false, - "sequence" : [ [ -1.0, -0.3, -3, -0.6, 1.0, 3.0 ], [ 0.5, -0.3, -3, 1.0, 1.0, 3.0 ] ] - }, - { - "name" : "keepin", - "safe" : true, - "sequence" : [ [ -1.5, -1.5, 0, 1.5, 1.5, 3.0 ] ] - } - ] - } - -Note that the "sequence" field takes an array of 6-vectors, and not just a -single 6-vector. Each element of this array represents a zone, with each vector -denoting the two coordinates that fully-define the cuboid. - -At any point while running the robot, the operator can load and parse a JSON file: - - `rosrun executive zones_pub -compression none .json` - -Or manually through the `SetZones` service call on the ROS namespace `~/mob/set_zones`. -Please refer to the definition of \ref ff_msgs_SetZones for more information on -how to update zones using a ROS service call. - -The resulting data structure is serialized into a binary file. If the option `zone_overwrite` -in `mobility/choreographer.config` is activated (default), it will overwrite the current -`.bin` file containing the default set of zones at start-up. - -The \ref mapper node publishes -[visualization_msgs::MarkerArrays](http://docs.ros.org/kinetic/api/visualization_msgs/html/msg/MarkerArray.html) -on the namespace `~/mob/mapper/zones`. When listened to in rviz, these marker -arrays render keep-in and keep-out zones as semi-transparent green and red -cuboids. The two example zones should be rendered as shown below: - -![alt text](../images/mobility/zones.png "How the RVIZ user interface draws zones") - ## Octomapper The Octomapper portion of the \ref mapper creates a 3D occupancy map of the @@ -151,7 +97,8 @@ The Octomapper takes the following paramers as inputs (defined in mapper.config) * `min_range (meters)` - Minimum reliable range of the depth camera. * `memory_time (seconds)` - How long the octomap remembers the fading memory map. It remembers forever when this variable is <= 0. -* `inflate_radius (meters)` - Radial inflation size of the octomap. +* `robot_radius (meters)` - Radius of the robot considered in the planner. +* `collision_distance (meters)` - Minimum distance margin to maintain away from obstacles. * `cam_fov (radians)` - Camera horizontal field-of-view. The octomap will only update the map in the volume within the fov of the depth cam. * `cam_aspect_ratio` - Depth camera's width divided by height. Used for c diff --git a/mobility/mapper/src/mapper_nodelet.cc b/mobility/mapper/src/mapper_nodelet.cc index f2d7bd0e8a..29b451d764 100644 --- a/mobility/mapper/src/mapper_nodelet.cc +++ b/mobility/mapper/src/mapper_nodelet.cc @@ -57,7 +57,7 @@ void MapperNodelet::Initialize(ros::NodeHandle *nh) { &MapperNodelet::DiagnosticsCallback, this, false, true); // load parameters - double map_resolution, memory_time, max_range, min_range, inflate_radius; + double map_resolution, memory_time, max_range, min_range, collision_distance, robot_radius; double cam_fov, aspect_ratio; double occupancy_threshold, probability_hit, probability_miss; double clamping_threshold_max, clamping_threshold_min; @@ -67,7 +67,8 @@ void MapperNodelet::Initialize(ros::NodeHandle *nh) { max_range = cfg_.Get("max_range"); min_range = cfg_.Get("min_range"); memory_time = cfg_.Get("memory_time"); - inflate_radius = cfg_.Get("inflate_radius"); + collision_distance = cfg_.Get("collision_distance"); + robot_radius = cfg_.Get("robot_radius"); cam_fov = cfg_.Get("cam_fov"); aspect_ratio = cfg_.Get("cam_aspect_ratio"); occupancy_threshold = cfg_.Get("occupancy_threshold"); @@ -86,8 +87,8 @@ void MapperNodelet::Initialize(ros::NodeHandle *nh) { globals_.octomap.SetResolution(map_resolution); globals_.octomap.SetMaxRange(max_range); globals_.octomap.SetMinRange(min_range); - globals_.octomap.SetMemory(memory_time); - globals_.octomap.SetMapInflation(inflate_radius); + globals_.octomap.SetMemoryTime(memory_time); + globals_.octomap.SetMapInflation(collision_distance + robot_radius); globals_.octomap.SetCamFrustum(cam_fov, aspect_ratio); globals_.octomap.SetOccupancyThreshold(occupancy_threshold); globals_.octomap.SetHitMissProbabilities(probability_hit, probability_miss); @@ -160,12 +161,18 @@ void MapperNodelet::Initialize(ros::NodeHandle *nh) { &MapperNodelet::ResetCallback, this); // Services - resolution_srv_ = nh->advertiseService(SERVICE_MOBILITY_UPDATE_MAP_RESOLUTION, - &MapperNodelet::UpdateResolution, this); - memory_time_srv_ = nh->advertiseService(SERVICE_MOBILITY_UPDATE_MEMORY_TIME, - &MapperNodelet::UpdateMemoryTime, this); - map_inflation_srv_ = nh->advertiseService(SERVICE_MOBILITY_UPDATE_INFLATION, - &MapperNodelet::MapInflation, this); + set_resolution_srv_ = nh->advertiseService(SERVICE_MOBILITY_SET_MAP_RESOLUTION, + &MapperNodelet::SetResolution, this); + set_memory_time_srv_ = nh->advertiseService(SERVICE_MOBILITY_SET_MEMORY_TIME, + &MapperNodelet::SetMemoryTime, this); + set_collision_distance_srv_ = nh->advertiseService(SERVICE_MOBILITY_SET_COLLISION_DISTANCE, + &MapperNodelet::SetCollisionDistance, this); + get_resolution_srv_ = nh->advertiseService(SERVICE_MOBILITY_GET_MAP_RESOLUTION, + &MapperNodelet::GetResolution, this); + get_memory_time_srv_ = nh->advertiseService(SERVICE_MOBILITY_GET_MEMORY_TIME, + &MapperNodelet::GetMemoryTime, this); + get_collision_distance_srv_ = nh->advertiseService(SERVICE_MOBILITY_GET_MAP_INFLATION, + &MapperNodelet::GetMapInflation, this); reset_map_srv_ = nh->advertiseService(SERVICE_MOBILITY_RESET_MAP, &MapperNodelet::ResetMap, this); get_free_map_srv_ = nh->advertiseService(SERVICE_MOBILITY_GET_FREE_MAP, diff --git a/mobility/mapper/src/octoclass.cc b/mobility/mapper/src/octoclass.cc index 2832acaa5f..dd84b8992c 100644 --- a/mobility/mapper/src/octoclass.cc +++ b/mobility/mapper/src/octoclass.cc @@ -35,11 +35,15 @@ OctoClass::OctoClass(const double resolution) { OctoClass::OctoClass() {} -void OctoClass::SetMemory(const double memory) { +void OctoClass::SetMemoryTime(const double memory) { memory_time_ = memory; ROS_DEBUG("Fading memory time: %f seconds", memory_time_); } +double OctoClass::GetMemoryTime() { + return memory_time_; +} + void OctoClass::SetMaxRange(const double max_range) { max_range_ = max_range; ROS_DEBUG("Maximum range: %f meters", max_range_); @@ -79,6 +83,10 @@ void OctoClass::SetResolution(const double resolution_in) { ROS_DEBUG("Map resolution: %f meters", resolution_); } +double OctoClass::GetResolution() { + return resolution_; +} + void OctoClass::SetMapInflation(const double inflate_radius) { inflate_radius_ = inflate_radius; ResetMap(); @@ -102,6 +110,10 @@ void OctoClass::SetMapInflation(const double inflate_radius) { } } +double OctoClass::GetMapInflation() { + return inflate_radius_; +} + void OctoClass::SetCamFrustum(const double fov, const double aspect_ratio) { cam_frustum_ = algebra_3d::FrustumPlanes(fov, aspect_ratio); diff --git a/mobility/mapper/src/services.cc b/mobility/mapper/src/services.cc index ad62434856..140275f430 100644 --- a/mobility/mapper/src/services.cc +++ b/mobility/mapper/src/services.cc @@ -23,7 +23,7 @@ namespace mapper { // Update resolution of the map -bool MapperNodelet::UpdateResolution(ff_msgs::SetFloat::Request &req, +bool MapperNodelet::SetResolution(ff_msgs::SetFloat::Request &req, ff_msgs::SetFloat::Response &res) { mutexes_.octomap.lock(); globals_.octomap.SetResolution(req.data); @@ -31,25 +31,45 @@ bool MapperNodelet::UpdateResolution(ff_msgs::SetFloat::Request &req, res.success = true; return true; } +// Update resolution of the map +bool MapperNodelet::GetResolution(ff_msgs::GetFloat::Request &req, + ff_msgs::GetFloat::Response &res) { + res.data = globals_.octomap.GetResolution(); + res.success = true; + return true; +} // Update map memory time -bool MapperNodelet::UpdateMemoryTime(ff_msgs::SetFloat::Request &req, +bool MapperNodelet::SetMemoryTime(ff_msgs::SetFloat::Request &req, ff_msgs::SetFloat::Response &res) { mutexes_.octomap.lock(); - globals_.octomap.SetMemory(req.data); + globals_.octomap.SetMemoryTime(req.data); mutexes_.octomap.unlock(); res.success = true; return true; } +// Update map memory time +bool MapperNodelet::GetMemoryTime(ff_msgs::GetFloat::Request &req, + ff_msgs::GetFloat::Response &res) { + res.data = globals_.octomap.GetMemoryTime(); + res.success = true; + return true; +} -bool MapperNodelet::MapInflation(ff_msgs::SetFloat::Request &req, +bool MapperNodelet::SetCollisionDistance(ff_msgs::SetFloat::Request &req, ff_msgs::SetFloat::Response &res) { mutexes_.octomap.lock(); - globals_.octomap.SetMapInflation(req.data); + globals_.octomap.SetMapInflation(req.data + cfg_.Get("robot_radius")); mutexes_.octomap.unlock(); res.success = true; return true; } +bool MapperNodelet::GetMapInflation(ff_msgs::GetFloat::Request &req, + ff_msgs::GetFloat::Response &res) { + res.data = globals_.octomap.GetMapInflation(); + res.success = true; + return true; +} bool MapperNodelet::ResetMap(std_srvs::Trigger::Request &req, std_srvs::Trigger::Response &res) { diff --git a/mobility/planner_qp/planner_qp/src/planner_qp.cc b/mobility/planner_qp/planner_qp/src/planner_qp.cc index e1a403231e..619bcb9bec 100644 --- a/mobility/planner_qp/planner_qp/src/planner_qp.cc +++ b/mobility/planner_qp/planner_qp/src/planner_qp.cc @@ -27,6 +27,8 @@ #include // For the planner implementation API +#include +#include #include // Keepout zones for the planner @@ -37,9 +39,6 @@ #include #include -#include -#include - #include #include @@ -866,8 +865,8 @@ class Planner : public planner::PlannerImplementation { } } bool load_map() { - // get points from mapper - float resf; + // Get obstacle map + float resf = 0.08; pcl::PointCloud points; if (!GetObstacleMap(&points, &resf)) { ROS_ERROR_STREAM("PlannerQP: Failed to get points from mapper service"); @@ -879,132 +878,45 @@ class Planner : public planner::PlannerImplementation { for (auto &p : points) { mapper_points_.push_back(Vec3f(p.x, p.y, p.z)); } + // Declare keepout points + vec_Vec3f keepout_points_mapper = mapper_points_; - map_res_ = static_cast(resf); - - // get zones - std::vector zones; - bool got = GetZones(zones); - if (!got) return false; - - Vec3f min, max, zmin, zmax; - min << 1000.0, 1000.0, 1000.0; - max << -1000.0, -1000.0, -1000.0; - uint num_keepin = 0; - for (auto &zone : zones) { - if (zone.type == ff_msgs::Zone::KEEPIN) { - zmin << zone.min.x, zone.min.y, zone.min.z; - zmax << zone.max.x, zone.max.y, zone.max.z; - for (int i = 0; i < 3; i++) { - min(i) = std::min(min(i), zmin(i)); - min(i) = std::min(min(i), zmax(i)); - max(i) = std::max(max(i), zmin(i)); - max(i) = std::max(max(i), zmax(i)); - } - num_keepin++; - } - } - if (num_keepin == 0) { - ROS_ERROR("Zero keepin zones!! Plan failed"); + // Get zones map + std::vector map; + Vec3f origin; + Vec3i dim; + if (!GetZonesMap(map, origin, dim, map_res_)) { + ROS_ERROR_STREAM("PlannerQP: Failed to get points from zones service"); return false; } - min -= Vec3f::Ones() * map_res_ * 2.0; - max += Vec3f::Ones() * map_res_ * 2.0; - - Vec3f origin = min; - Vec3f dimf = (max - min) / map_res_; - Vec3i dim(std::ceil(dimf(0)), std::ceil(dimf(1)), std::ceil(dimf(2))); - int num_cell = dim(0) * dim(1) * dim(2); - - std::vector map(num_cell, 0); jps_map_util_.reset(new JPS::VoxelMapUtil()); jps_map_util_->setMap(origin, dim, map, map_res_); - vec_Vec3f keepout_points_mapper = mapper_points_; - vec_Vec3f keepout_points_zones; - - // add contour - for (auto &zone : zones) { - zmin << std::min(zone.min.x, zone.max.x), - std::min(zone.min.y, zone.max.y), std::min(zone.min.z, zone.max.z); - zmax << std::max(zone.min.x, zone.max.x), - std::max(zone.min.y, zone.max.y), std::max(zone.min.z, zone.max.z); - Vec3f tmp = Vec3f::Zero(); - for (int i = 0; i < 3; i++) { - int j = (i + 1) % 3; - int k = (i + 2) % 3; - if (zone.type == ff_msgs::Zone::KEEPIN) { - for (auto zx = zmin(j); zx <= zmax(j); zx += map_res_) { - for (auto zy = zmin(k); zy <= zmax(k); zy += map_res_) { - tmp(j) = zx; - tmp(k) = zy; - tmp(i) = zmin(i) - map_res_ * 1.001; - map[jps_map_util_->getIndex(jps_map_util_->floatToInt(tmp))] = - 100; - tmp(i) = zmax(i) + map_res_ * 1.001; - map[jps_map_util_->getIndex(jps_map_util_->floatToInt(tmp))] = - 100; - } - } - } - } - } + // Set all remaining space as free + jps_map_util_->freeUnKnown(); - for (auto &zone : zones) { - zmin << std::min(zone.min.x, zone.max.x), - std::min(zone.min.y, zone.max.y), std::min(zone.min.z, zone.max.z); - zmax << std::max(zone.min.x, zone.max.x), - std::max(zone.min.y, zone.max.y), std::max(zone.min.z, zone.max.z); - // add points on surface - Vec3f tmp = Vec3f::Zero(); - for (int i = 0; i < 3; i++) { - int j = (i + 1) % 3; - int k = (i + 2) % 3; - for (auto zx = zmin(j); zx <= zmax(j); zx += map_res_) - for (auto zy = zmin(k); zy <= zmax(k); zy += map_res_) { - if (zone.type == ff_msgs::Zone::KEEPOUT) { - tmp(j) = zx; - tmp(k) = zy; - tmp(i) = zmin(i); - keepout_points_zones.push_back(tmp); - tmp(i) = zmax(i); - keepout_points_zones.push_back(tmp); - } else { - for (auto zz = zmin(i); zz <= zmax(i); zz += map_res_) { - tmp(j) = zx; - tmp(k) = zy; - tmp(i) = zz; - map[jps_map_util_->getIndex(jps_map_util_->floatToInt(tmp))] = - 0; - } - } - } - } - OUTPUT_DEBUG("PlannerQP: Keepout: " << zmin.transpose() << " to " - << zmax.transpose()); - } - // reset map - jps_map_util_->setMap(origin, dim, map, map_res_); - // for(auto &p:keepout_points) - // OUTPUT_DEBUG("PlannerQP: Keepout point: " << p.transpose()); - OUTPUT_DEBUG("PlannerQP: add3DPoints zones: " << keepout_points_zones.size() - << " mapper: " << keepout_points_mapper.size()); - // dialate - double radius; - if (!cfg_.Get("robot_radius", radius)) radius = 0.16; + // Add obstacle map points + jps_map_util_->dilate(map_res_ / 2, map_res_ / 2); // sets dilating radius + jps_map_util_->add3DPoints(keepout_points_mapper); // add keepout points from mapper - jps_map_util_->freeUnKnown(); - jps_map_util_->dilate(radius, radius); - OUTPUT_DEBUG("PlannerQP: Dilating"); - jps_map_util_->dilating(); - // keepout add point includes dilating - jps_map_util_->add3DPoints(keepout_points_zones); - jps_map_util_->add3DPoints(keepout_points_mapper); OUTPUT_DEBUG("PlannerQP: Map origin " << origin.transpose() << " dim " << dim.transpose() << " resolution " << map_res_); + // inflate map for planning further from walls, the minimum inflation is the map resolution, + // this parameter is adjusted to generate trajectories further from the walls + double planner_distance = map_res_; + if (!cfg_.Get("planner_distance", planner_distance)) { + ROS_ERROR_STREAM("planner_distance not defined"); + } + if (planner_distance < map_res_) + planner_distance = map_res_; + + jps_map_util_->dilate(planner_distance, planner_distance); // sets dilating radius + jps_map_util_->dilating(); // dillate entire map + + jps_planner_.reset(new JPS::JPS3DUtil(false)); jps_planner_->setMapUtil(jps_map_util_.get()); diff --git a/mobility/planner_qp/readme.md b/mobility/planner_qp/readme.md index e67a59bfc6..1fe3abc990 100644 --- a/mobility/planner_qp/readme.md +++ b/mobility/planner_qp/readme.md @@ -43,7 +43,7 @@ The planner has adjustable parameters which can affect the optimizer's performan * `sample_rate` - The optimization uses a continuous trajectory representation which is sub-sampled at this rate before being sent to the choreographer. -* `robot_radius` - How large the robot's center must stay away from obstacles, increase this to create a trajectory further away from walls +* `planner_distance` - Additional distance that the planner should stay away from occupied areas. The occupied areas are defined by the zones and obstacles inflated using the robot radius plus the collision distance. The minimum planner distance is set to the map resolution because otherwise interpolation and upsampling of the trajectory points bordering the occupied areas will generate points inside the occupied areas. Increase this to create a trajectory further away from walls. * `two_d` - Force the z coordinate of the goal to be the same as the start. (Mainly for use on the granite table) diff --git a/scripts/build/genFaultTable.py b/scripts/build/genFaultTable.py index e4e35a773d..8923105d82 100755 --- a/scripts/build/genFaultTable.py +++ b/scripts/build/genFaultTable.py @@ -99,15 +99,15 @@ def ConvertToNumber(value): def main(): if len(sys.argv) < 4: - print "Incorrect nmuber of arguments! Please restart with path to " \ + print(("Incorrect nmuber of arguments! Please restart with path to " \ + "the FMECA csv file, the path to the config files, and the " \ - + "path to the shared file." + + "path to the shared file.")) return try: file = open(sys.argv[1], 'r') except IOError: - print "Couldn't open file " + sys.argv[1] + print(("Couldn't open file " + sys.argv[1])) return lines = file.readlines() @@ -251,8 +251,8 @@ def main(): # dictionary is keyed on the subsystem name, the second dictionary # is keyed on the node name and the list is a list of faults for the # node - if fault_table.has_key(subsystem): - if fault_table[subsystem].has_key(node_name): + if subsystem in fault_table: + if node_name in fault_table[subsystem]: fault_table[subsystem][node_name].append(fault_entry) else: # Node isn't in the dictionary fault_table[subsystem][node_name] = [fault_entry] @@ -294,42 +294,42 @@ def main(): index = index + 1 if id_index == -1: - print "Could not find fault id column!" + print("Could not find fault id column!") break if subsystem_index == -1: - print "Could not find subsystem column!" + print("Could not find subsystem column!") break if description_index == -1: - print "Could not find failure mechanism column!" + print("Could not find failure mechanism column!") break if warning_index == -1: - print "Could not find warning column!" + print("Could not find warning column!") break if blocking_index == -1: - print "Could not find blocking column!" + print("Could not find blocking column!") break if node_index == -1: - print "Could not find node name column!" + print("Could not find node name column!") break if response_index == -1: - print "Could not find response command column!" + print("Could not find response command column!") break if args_index == -1: - print "Could not find response command arguments column!" + print("Could not find response command arguments column!") break if key_index == -1: - print "Could not find key column!" + print("Could not find key column!") break if timeout_index == -1: - print "Could not find heartbeat timeout column!" + print("Could not find heartbeat timeout column!") break if misses_index == -1: - print "Could not find heartbeat timeout column!" + print("Could not find heartbeat timeout column!") break if titles_read == False: - print "Could not find row with column headers!" + print("Could not find row with column headers!") else: fc_file = open(faults_config_file_name, 'w') ftc_file = open(fault_table_config_file_name, 'w') @@ -341,9 +341,9 @@ def main(): line = line + "require \"management/fault_functions\"\n\n" ftc_file.write(line + "subsystems={\n") smf_file.write(line) - for subsys_key in fault_table.keys(): + for subsys_key in list(fault_table.keys()): ftc_file.write(" {name=\"" + subsys_key + "\", nodes={\n") - for nodes_key in fault_table[subsys_key].keys(): + for nodes_key in list(fault_table[subsys_key].keys()): if (nodes_key == "sys_monitor"): sm_entry_added = False for fault in fault_table[subsys_key][nodes_key]: @@ -466,7 +466,7 @@ def main(): list_faults = list(faults_not_added) list_faults[last_comma] = '!' faults_not_added = ''.join(list_faults) - print faults_not_added + print(faults_not_added) if __name__ == '__main__': main() diff --git a/scripts/build/luaTable.py b/scripts/build/luaTable.py index fe5122de22..af6124b217 100644 --- a/scripts/build/luaTable.py +++ b/scripts/build/luaTable.py @@ -3,7 +3,7 @@ Output Python data structure as a Lua table constructor. """ -from cStringIO import StringIO +from io import StringIO def q(s): @@ -18,7 +18,7 @@ def dumpStream(out, d, lvl=0): def w(s): out.write(s) - if isinstance(d, basestring): + if isinstance(d, str): w(q(d)) elif isinstance(d, (list, tuple)): @@ -40,7 +40,7 @@ def w(s): if d: w('{\n') n = len(d) - keys = d.keys() + keys = list(d.keys()) keys.sort() for i, k in enumerate(keys): v = d[k] diff --git a/scripts/calibrate/calibration_utils.py b/scripts/calibrate/calibration_utils.py index 4e48217750..5531797c25 100644 --- a/scripts/calibrate/calibration_utils.py +++ b/scripts/calibrate/calibration_utils.py @@ -2,101 +2,129 @@ # # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import re, sys +import re +import sys + def find_close_parenthese(s): - assert(s[0] == '(') - count = 1 - for i in range(1, len(s)): - if s[i] == '(': - count += 1 - elif s[i] == ')': - count -= 1 - if count == 0: - return i - return -1 + assert s[0] == "(" + count = 1 + for i in range(1, len(s)): + if s[i] == "(": + count += 1 + elif s[i] == ")": + count -= 1 + if count == 0: + return i + return -1 + def find_close_bracket(s): - assert(s[0] == '{') - count = 1 - for i in range(1, len(s)): - if s[i] == '{': - count += 1 - elif s[i] == '}': - count -= 1 - if count == 0: - return i - return -1 + assert s[0] == "{" + count = 1 + for i in range(1, len(s)): + if s[i] == "{": + count += 1 + elif s[i] == "}": + count -= 1 + if count == 0: + return i + return -1 + def lua_find_transform(text, transform_name): - # Search for lines starting with the transform name, ignoring, for - # example, lines starting with lua comments, that is, with "--". - prog = re.compile('(^|\n\s*)' + re.escape(transform_name) + '\s*=\s*transform\(') - match = re.search(prog, text) - if not match: - return (None, None, None) - open_bracket = match.end() - 1 - close_bracket = find_close_parenthese(text[open_bracket:]) - if close_bracket < 0: - return (None, None, None) - close_bracket = open_bracket + close_bracket - transform_text = text[open_bracket:close_bracket+1] - return (transform_text, open_bracket, close_bracket) + # Search for lines starting with the transform name, ignoring, for + # example, lines starting with lua comments, that is, with "--". + prog = re.compile("(^|\n\s*)" + re.escape(transform_name) + "\s*=\s*transform\(") + match = re.search(prog, text) + if not match: + return (None, None, None) + open_bracket = match.end() - 1 + close_bracket = find_close_parenthese(text[open_bracket:]) + if close_bracket < 0: + return (None, None, None) + close_bracket = open_bracket + close_bracket + transform_text = text[open_bracket : close_bracket + 1] + return (transform_text, open_bracket, close_bracket) + def lua_read_transform(filename, transform_name): - try: - with open(filename, 'r') as f: - contents = f.read() - except IOError: - return None - (transform_text, open_bracket, close_bracket) = lua_find_transform(contents, transform_name) - prog = re.compile('\(vec3\((.*)\),\s*quat4\((.*)\)\s*\)') - m = re.match(prog, transform_text) - if m == None: - print >> sys.stderr, "Could not extract the transform from string: %s " % transform_text - return None - trans = map(float, map(lambda x: x.strip(), m.group(1).split(','))) - quat = map(float, map(lambda x: x.strip(), m.group(2).split(','))) - t = quat[0] - return (trans, quat) + try: + with open(filename, "r") as f: + contents = f.read() + except IOError: + return None + (transform_text, open_bracket, close_bracket) = lua_find_transform( + contents, transform_name + ) + prog = re.compile("\(vec3\((.*)\),\s*quat4\((.*)\)\s*\)") + m = re.match(prog, transform_text) + if m == None: + print( + "Could not extract the transform from string: %s " % transform_text, + file=sys.stderr, + ) + return None + trans = list(map(float, [x.strip() for x in m.group(1).split(",")])) + quat = list(map(float, [x.strip() for x in m.group(2).split(",")])) + t = quat[0] + return (trans, quat) + def lua_replace_transform(filename, transform_name, transform): - try: - with open(filename, 'r') as f: - contents = f.read() - except IOError: - print >> sys.stderr, 'Failed to open lua file.' - return True - (transform_text, open_bracket, close_bracket) = lua_find_transform(contents, transform_name) - if transform_text == None: - print >> sys.stderr, 'Failed to read field %s from %s.' % (transform_name, filename) - return True + try: + with open(filename, "r") as f: + contents = f.read() + except IOError: + print("Failed to open lua file.", file=sys.stderr) + return True + (transform_text, open_bracket, close_bracket) = lua_find_transform( + contents, transform_name + ) + if transform_text == None: + print( + "Failed to read field %s from %s." + % ( + transform_name, + filename, + ), + file=sys.stderr, + ) + return True - new_transform_text = '(vec3(%.8g, %.8g, %.8g), quat4(%.8g, %.8g, %.8g, %.8g))' % \ - (transform[0][0], transform[0][1], transform[0][2], \ - transform[1][0], transform[1][1], transform[1][2], transform[1][3]) - output_text = contents[:open_bracket] + new_transform_text + contents[close_bracket+1:] + new_transform_text = "(vec3(%.8g, %.8g, %.8g), quat4(%.8g, %.8g, %.8g, %.8g))" % ( + transform[0][0], + transform[0][1], + transform[0][2], + transform[1][0], + transform[1][1], + transform[1][2], + transform[1][3], + ) + output_text = ( + contents[:open_bracket] + new_transform_text + contents[close_bracket + 1 :] + ) - print("Updating " + transform_name + " in file: " + filename) - try: - with open(filename, 'w') as f: - f.write(output_text) - except IOError: - print >> sys.stderr, 'Failed to open lua file ' + filename - return True - return False + print(("Updating " + transform_name + " in file: " + filename)) + try: + with open(filename, "w") as f: + f.write(output_text) + except IOError: + print("Failed to open lua file " + filename, file=sys.stderr) + return True + return False diff --git a/scripts/calibrate/extrinsics_calibrate.py b/scripts/calibrate/extrinsics_calibrate.py index e58f2f1de8..b3250fce56 100755 --- a/scripts/calibrate/extrinsics_calibrate.py +++ b/scripts/calibrate/extrinsics_calibrate.py @@ -2,15 +2,15 @@ # # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -23,205 +23,298 @@ import re import subprocess import sys -import yaml + import numpy as np import numpy.linalg -from tf import transformations +import yaml from calibration_utils import * +from tf import transformations + # returns extrinsics (T_cam_imu) from the yaml file -def read_yaml_extrinsics(filename,cameras): - extrinsics = [] - with open(filename, 'r') as f: - try: - d = yaml.load(f) - for cam in cameras: - extrinsics.append(np.array(d[cam]['T_cam_imu'])) - except yaml.YAMLError as exc: - print >> sys.stderr, exc - return extrinsics +def read_yaml_extrinsics(filename, cameras): + extrinsics = [] + with open(filename, "r") as f: + try: + d = yaml.load(f) + for cam in cameras: + extrinsics.append(np.array(d[cam]["T_cam_imu"])) + except yaml.YAMLError as exc: + print(exc, file=sys.stderr) + return extrinsics + def has_depth_topic(filename): - '''See if a depth topic is in the file.''' - with open(filename, 'r') as f: - for line in f: - prog = re.compile('^\s*rostopic:\s*/hw/depth') - m = re.match(prog, line) - if m: - return True + """See if a depth topic is in the file.""" + with open(filename, "r") as f: + for line in f: + prog = re.compile("^\s*rostopic:\s*/hw/depth") + m = re.match(prog, line) + if m: + return True + + return False - return False def has_dock_topic(filename): - '''See if a dock topic is in the file.''' - with open(filename, 'r') as f: - for line in f: - prog = re.compile('^\s*rostopic:\s*/hw/.*?dock') - m = re.match(prog, line) - if m: - return True + """See if a dock topic is in the file.""" + with open(filename, "r") as f: + for line in f: + prog = re.compile("^\s*rostopic:\s*/hw/.*?dock") + m = re.match(prog, line) + if m: + return True + + return False + + +def calibrate_extrinsics( + bag, + calibration_target, + intrinsics_yaml, + imu_yaml, + from_time, + to_time, + padding, + verbose=False, +): + + extra_flags = " --verbose" if verbose else " --dont-show-report" + + if from_time is not None and to_time is not None: + extra_flags += " --bag-from-to " + from_time + " " + to_time + + if padding is not None: + extra_flags += " --timeoffset-padding " + padding + + if not os.path.isfile(bag): + print("Bag file %s does not exist." % (bag), file=sys.stderr) + return None + if not os.path.isfile(calibration_target): + print("Target file %s does not exist." % (calibration_target), file=sys.stderr) + return None + if not os.path.isfile(intrinsics_yaml): + print("Intrinsics file %s does not exist." % (intrinsics_yaml), file=sys.stderr) + return None + if not os.path.isfile(imu_yaml): + print("IMU file %s does not exist." % (imu_yaml), file=sys.stderr) + return None + + bag_dir = os.path.dirname(os.path.abspath(bag)) + bag_file = os.path.basename(bag) + bag_name = os.path.splitext(bag_file)[0] + + # Display in the terminal what is going on + output_arg = None + + cmd = ( + "rosrun kalibr kalibr_calibrate_imu_camera --bag %s --cam %s --imu %s " + + "--target %s --time-calibration%s" + ) % (bag_file, intrinsics_yaml, imu_yaml, calibration_target, extra_flags) + + print(("Running in: " + bag_dir)) + print(cmd) + ret = subprocess.call( + cmd.split(), cwd=bag_dir, stdout=output_arg, stderr=output_arg + ) + if ret != 0: + print("Failed to calibrate extrinsics from bag file.", file=sys.stderr) + return None + return bag_dir + "/camchain-imucam-" + bag_name + ".yaml" + + +def trans_quat_to_transform(xxx_todo_changeme): + (trans, quat) = xxx_todo_changeme + if trans is None or quat is None: + return None + # I think Jesse is confused with what is actually stored in the file + # because his quaternion_to_dcm seems to give the conjugate of the right quaternion + m = transformations.quaternion_matrix(transformations.quaternion_conjugate(quat)) + m[0, 3] = -trans[0] + m[1, 3] = -trans[1] + m[2, 3] = -trans[2] + return m - return False - - -def calibrate_extrinsics(bag, calibration_target, intrinsics_yaml, imu_yaml, - from_time, to_time, padding, - verbose=False): - - extra_flags = ' --verbose' if verbose else ' --dont-show-report' - - if from_time is not None and to_time is not None: - extra_flags += ' --bag-from-to ' + from_time + ' ' + to_time - - if padding is not None: - extra_flags += ' --timeoffset-padding ' + padding - - if not os.path.isfile(bag): - print >> sys.stderr, 'Bag file %s does not exist.' % (bag) - return None - if not os.path.isfile(calibration_target): - print >> sys.stderr, 'Target file %s does not exist.' % (calibration_target) - return None - if not os.path.isfile(intrinsics_yaml): - print >> sys.stderr, 'Intrinsics file %s does not exist.' % (intrinsics_yaml) - return None - if not os.path.isfile(imu_yaml): - print >> sys.stderr, 'IMU file %s does not exist.' % (imu_yaml) - return None - - bag_dir = os.path.dirname(os.path.abspath(bag)) - bag_file = os.path.basename(bag) - bag_name = os.path.splitext(bag_file)[0] - - # Display in the terminal what is going on - output_arg = None - - cmd = ('rosrun kalibr kalibr_calibrate_imu_camera --bag %s --cam %s --imu %s ' + - '--target %s --time-calibration%s') % \ - (bag_file, intrinsics_yaml, imu_yaml, calibration_target, extra_flags) - - print("Running in: " + bag_dir) - print(cmd) - ret = subprocess.call(cmd.split(), cwd=bag_dir, stdout=output_arg, stderr=output_arg) - if ret != 0: - print >> sys.stderr, 'Failed to calibrate extrinsics from bag file.' - return None - return bag_dir + '/camchain-imucam-' + bag_name + '.yaml' - -def trans_quat_to_transform((trans, quat)): - if trans is None or quat is None: - return None - # I think Jesse is confused with what is actually stored in the file - # because his quaternion_to_dcm seems to give the conjugate of the right quaternion - m = transformations.quaternion_matrix(transformations.quaternion_conjugate(quat)) - m[0,3] = -trans[0] - m[1,3] = -trans[1] - m[2,3] = -trans[2] - return m def has_two_cameras(filename): - try: - with open(filename, 'r') as f: - contents = f.read() - except IOError: - print >> sys.stderr, 'Failed to open file: ' + filename - return False - - prog = re.compile('cam1') - match = re.search(prog, contents) - if match: - return True - else: - return False + try: + with open(filename, "r") as f: + contents = f.read() + except IOError: + print("Failed to open file: " + filename, file=sys.stderr) + return False + + prog = re.compile("cam1") + match = re.search(prog, contents) + if match: + return True + else: + return False + def main(): - SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) - default_calibration_target = SCRIPT_DIR + '/config/granite_april_tag.yaml' - - parser = argparse.ArgumentParser(description='Calibrate extrinsics parameters.') - parser.add_argument('robot', help='The name of the robot to configure (i.e., put p4d to edit p4d.config).') - parser.add_argument('intrinsics_yaml', help='The yaml file with intrinsics calibration data.') - parser.add_argument('extrinsics_bag', help='The bag file with extrinsics calibration data.') - parser.add_argument('-d', '--dock_cam', dest='dock_cam', action='store_true', help='Calibrate dock camera.') - parser.add_argument('-f', '--from', dest='from_time', help='Use the bag data starting at this time, in seconds.') - parser.add_argument('-t', '--to', dest='to_time', help='Use the bag data until this time, in seconds.') - parser.add_argument('--timeoffset-padding', dest='padding', help='Maximum range in which the timeoffset may change during estimation, in seconds. See readme.md for more info. (default: 0.01)') - parser.add_argument('--calibration_target', dest='calibration_target', help='Use this yaml file to desribe the calibration target, overriding the default: ' + default_calibration_target) - parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='Verbose mode.') - args = parser.parse_args() - - if args.calibration_target is None: - args.calibration_target = default_calibration_target - - imu_yaml = SCRIPT_DIR + '/config/imu.yaml' - config_file = SCRIPT_DIR + '/../../astrobee/config/robots/' + args.robot + '.config' - - # Sanity check - has_dock = has_dock_topic(args.intrinsics_yaml) - if has_dock and (not args.dock_cam): - print >> sys.stderr, 'File ' + args.intrinsics_yaml + ' + has a dock topic, ' + \ - ' yet the --dock-cam flag was not used.' - return - - print 'Calibrating camera extrinsics...' - extrinsics_yaml = calibrate_extrinsics(args.extrinsics_bag, args.calibration_target, - args.intrinsics_yaml, imu_yaml, - args.from_time, args.to_time, args.padding, - verbose=args.verbose) - - if extrinsics_yaml == None: - print >> sys.stderr, 'Failed to calibrate extrinsics.' - return - - two_cams = has_two_cameras(extrinsics_yaml) - if two_cams: - imu_to_camera = read_yaml_extrinsics(extrinsics_yaml, ['cam0', 'cam1']) - else: - imu_to_camera = read_yaml_extrinsics(extrinsics_yaml, ['cam0']) - - if not imu_to_camera: - print >> sys.stderr, 'Failed to read extrinsics from yaml file.' - return - - body_to_imu = trans_quat_to_transform(lua_read_transform(config_file, 'imu_transform')) - if body_to_imu is None: - print >> sys.stderr, 'Failed to read imu transform.' - return - - imu_to_body = np.linalg.inv(body_to_imu) - body_to_camera = np.linalg.inv(imu_to_body.dot(np.linalg.inv(imu_to_camera[0]))) - - q = transformations.quaternion_conjugate(transformations.quaternion_from_matrix(body_to_camera)) - t = imu_to_body.dot(np.linalg.inv(imu_to_camera[0])[0:4,3]) - print 'Cam0:' - print 'Translation: ', t - print 'Rotation quaternion: ', q - - if two_cams: - # Modify the transform for the second camera, that is the depth one - body_to_depth_camera = np.linalg.inv(imu_to_body.dot(np.linalg.inv(imu_to_camera[1]))) - q_depth = transformations.quaternion_conjugate(transformations.quaternion_from_matrix(body_to_depth_camera)) - t_depth = imu_to_body.dot(np.linalg.inv(imu_to_camera[1])[0:4,3]) - print 'Cam1:' - print 'Translation: ', t_depth - print 'Rotation quaternion: ', q_depth - if lua_replace_transform(config_file, 'perch_cam_transform' if args.dock_cam else 'haz_cam_transform', (t_depth, q_depth)): - print >> sys.stderr, 'Failed to update config file with depth camera parameters.' - return - - has_depth = has_depth_topic(args.intrinsics_yaml) - if has_depth and (not two_cams): - # Only the depth camera is present, so the first transform corresponds to the depth camera - if lua_replace_transform(config_file, 'perch_cam_transform' if args.dock_cam else 'haz_cam_transform', (t, q)): - print >> sys.stderr, 'Failed to update config file with depth camera parameters' - return - else: - # Either both cameras are present, or only the HD cam, so the first transform - # corresponds to the HD camera - if lua_replace_transform(config_file, 'dock_cam_transform' if args.dock_cam else 'nav_cam_transform', (t, q)): - print >> sys.stderr, 'Failed to update config file with HD camera parameters' - return - -if __name__ == '__main__': - main() + SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) + default_calibration_target = SCRIPT_DIR + "/config/granite_april_tag.yaml" + + parser = argparse.ArgumentParser(description="Calibrate extrinsics parameters.") + parser.add_argument( + "robot", + help="The name of the robot to configure (i.e., put p4d to edit p4d.config).", + ) + parser.add_argument( + "intrinsics_yaml", help="The yaml file with intrinsics calibration data." + ) + parser.add_argument( + "extrinsics_bag", help="The bag file with extrinsics calibration data." + ) + parser.add_argument( + "-d", + "--dock_cam", + dest="dock_cam", + action="store_true", + help="Calibrate dock camera.", + ) + parser.add_argument( + "-f", + "--from", + dest="from_time", + help="Use the bag data starting at this time, in seconds.", + ) + parser.add_argument( + "-t", + "--to", + dest="to_time", + help="Use the bag data until this time, in seconds.", + ) + parser.add_argument( + "--timeoffset-padding", + dest="padding", + help="Maximum range in which the timeoffset may change during estimation, in seconds. See readme.md for more info. (default: 0.01)", + ) + parser.add_argument( + "--calibration_target", + dest="calibration_target", + help="Use this yaml file to desribe the calibration target, overriding the default: " + + default_calibration_target, + ) + parser.add_argument( + "-v", "--verbose", dest="verbose", action="store_true", help="Verbose mode." + ) + args = parser.parse_args() + + if args.calibration_target is None: + args.calibration_target = default_calibration_target + + imu_yaml = SCRIPT_DIR + "/config/imu.yaml" + config_file = SCRIPT_DIR + "/../../astrobee/config/robots/" + args.robot + ".config" + + # Sanity check + has_dock = has_dock_topic(args.intrinsics_yaml) + if has_dock and (not args.dock_cam): + print( + "File " + + args.intrinsics_yaml + + " + has a dock topic, " + + " yet the --dock-cam flag was not used.", + file=sys.stderr, + ) + return + + print("Calibrating camera extrinsics...") + extrinsics_yaml = calibrate_extrinsics( + args.extrinsics_bag, + args.calibration_target, + args.intrinsics_yaml, + imu_yaml, + args.from_time, + args.to_time, + args.padding, + verbose=args.verbose, + ) + + if extrinsics_yaml == None: + print("Failed to calibrate extrinsics.", file=sys.stderr) + return + + two_cams = has_two_cameras(extrinsics_yaml) + if two_cams: + imu_to_camera = read_yaml_extrinsics(extrinsics_yaml, ["cam0", "cam1"]) + else: + imu_to_camera = read_yaml_extrinsics(extrinsics_yaml, ["cam0"]) + + if not imu_to_camera: + print("Failed to read extrinsics from yaml file.", file=sys.stderr) + return + + body_to_imu = trans_quat_to_transform( + lua_read_transform(config_file, "imu_transform") + ) + if body_to_imu is None: + print("Failed to read imu transform.", file=sys.stderr) + return + + imu_to_body = np.linalg.inv(body_to_imu) + body_to_camera = np.linalg.inv(imu_to_body.dot(np.linalg.inv(imu_to_camera[0]))) + + q = transformations.quaternion_conjugate( + transformations.quaternion_from_matrix(body_to_camera) + ) + t = imu_to_body.dot(np.linalg.inv(imu_to_camera[0])[0:4, 3]) + print("Cam0:") + print("Translation: ", t) + print("Rotation quaternion: ", q) + + if two_cams: + # Modify the transform for the second camera, that is the depth one + body_to_depth_camera = np.linalg.inv( + imu_to_body.dot(np.linalg.inv(imu_to_camera[1])) + ) + q_depth = transformations.quaternion_conjugate( + transformations.quaternion_from_matrix(body_to_depth_camera) + ) + t_depth = imu_to_body.dot(np.linalg.inv(imu_to_camera[1])[0:4, 3]) + print("Cam1:") + print("Translation: ", t_depth) + print("Rotation quaternion: ", q_depth) + if lua_replace_transform( + config_file, + "perch_cam_transform" if args.dock_cam else "haz_cam_transform", + (t_depth, q_depth), + ): + print( + "Failed to update config file with depth camera parameters.", + file=sys.stderr, + ) + return + + has_depth = has_depth_topic(args.intrinsics_yaml) + if has_depth and (not two_cams): + # Only the depth camera is present, so the first transform corresponds to the depth camera + if lua_replace_transform( + config_file, + "perch_cam_transform" if args.dock_cam else "haz_cam_transform", + (t, q), + ): + print( + "Failed to update config file with depth camera parameters", + file=sys.stderr, + ) + return + else: + # Either both cameras are present, or only the HD cam, so the first transform + # corresponds to the HD camera + if lua_replace_transform( + config_file, + "dock_cam_transform" if args.dock_cam else "nav_cam_transform", + (t, q), + ): + print( + "Failed to update config file with HD camera parameters", + file=sys.stderr, + ) + return + + +if __name__ == "__main__": + main() diff --git a/scripts/calibrate/intrinsics_calibrate.py b/scripts/calibrate/intrinsics_calibrate.py index 48510395da..a7aae5b8e5 100755 --- a/scripts/calibrate/intrinsics_calibrate.py +++ b/scripts/calibrate/intrinsics_calibrate.py @@ -2,15 +2,15 @@ # # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the @@ -23,271 +23,434 @@ import re import subprocess import sys -import yaml + import numpy as np import numpy.linalg -from tf import transformations +import yaml from calibration_utils import * +from tf import transformations + # returns intrinsics, distortion, and transforms between cameras from a yaml file def read_yaml(filename, cameras): - dist = [] - intrinsics = [] - transforms = [] - with open(filename, 'r') as f: - try: - d = yaml.load(f) - for cam in cameras: - dist.append(d[cam]['distortion_coeffs']) - intrinsics.append(d[cam]['intrinsics']) - if cam == 'cam0': - # No transform from cam0 to itself, just use a placeholder - transforms.append(np.array([[]])) - else: - # Transform from current camera to previous one - transforms.append(np.array(d[cam]['T_cn_cnm1'])) - except yaml.YAMLError as exc: - print >> sys.stderr, exc - - return (intrinsics, dist, transforms) + dist = [] + intrinsics = [] + transforms = [] + with open(filename, "r") as f: + try: + d = yaml.load(f) + for cam in cameras: + dist.append(d[cam]["distortion_coeffs"]) + intrinsics.append(d[cam]["intrinsics"]) + if cam == "cam0": + # No transform from cam0 to itself, just use a placeholder + transforms.append(np.array([[]])) + else: + # Transform from current camera to previous one + transforms.append(np.array(d[cam]["T_cn_cnm1"])) + except yaml.YAMLError as exc: + print(exc, file=sys.stderr) + + return (intrinsics, dist, transforms) + def update_intrinsics(filename, camera, intrinsics, distortion): - try: - with open(filename, 'r') as f: - contents = f.read() - except IOError: - print >> sys.stderr, 'Failed to open lua file.' - return True - prog = re.compile(re.escape(camera) + '\s*=\s*\{') - match = re.search(prog, contents) - if not match: - print >> sys.stderr, 'Camera %s not found in lua file %s.' % (camera, filename) - return True - open_bracket = match.end() - 1 - close_bracket = find_close_bracket(contents[open_bracket:]) - if close_bracket < 0: - print >> sys.stderr, 'Camera config did not terminate.' - return True - close_bracket = open_bracket + close_bracket - camera_text = contents[open_bracket:close_bracket+1] - - # Update the distortion - prog = re.compile('distortion_coeff\s*=\s*.*?\n') - if type(distortion) is float: - (camera_text, replacements) = re.subn(prog, 'distortion_coeff = %g,\n' % (distortion), camera_text) - else: - (camera_text, replacements) = re.subn(prog, 'distortion_coeff = {%g, %g, %g, %g},\n' % (distortion[0], distortion[1], distortion[2], distortion[3]), camera_text) - - if replacements != 1: - print >> sys.stderr, 'Failed to replace distortion.' - return True - - # Update the intrinsics - prog = re.compile('intrinsic_matrix\s*=\s*\{.*?\}', re.DOTALL) - intrinsic_string = 'intrinsic_matrix = {\n %.8g, 0.0, %.8g,\n 0.0, %.8g, %.8g,\n 0.0, 0.0, 1.0\n }' % (\ - intrinsics[0], intrinsics[2], intrinsics[1], intrinsics[3]) - (camera_text, replacements) = re.subn(prog, intrinsic_string, camera_text) - if replacements != 1: - print >> sys.stderr, 'Failed to replace intrinsics matrix.' - return True - - output_text = contents[:open_bracket] + camera_text + contents[close_bracket+1:] - try: - with open(filename, 'w') as f: - f.write(output_text) - except IOError: - print >> sys.stderr, 'Failed to open lua file ' + filename - return True - return False - -def calibrate_camera(bag, calibration_target, from_time, to_time, approx_sync, - nav_cam_topic, haz_cam_topic, sci_cam_topic, - dock_cam, depth_cam, sci_cam, only_depth_cam, - verbose): - - fov_model = 'pinhole-fov' - radtan_model = 'pinhole-radtan' - - HD_CAMERA_TOPIC = '/hw/cam_dock' if dock_cam else nav_cam_topic - - if HD_CAMERA_TOPIC == '/hw/cam_dock': - DEPTH_CAMERA_TOPIC = '/hw/depth_perch/extended/amplitude_int' - else: - DEPTH_CAMERA_TOPIC = haz_cam_topic - - extra_flags = ' --verbose' if verbose else ' --dont-show-report' - - if from_time is not None and to_time is not None: - extra_flags += ' --bag-from-to ' + from_time + ' ' + to_time - - if approx_sync is not None: - extra_flags += ' --approx-sync ' + approx_sync - - if not os.path.isfile(bag): - print >> sys.stderr, 'Bag file %s does not exist.' % (bag) - return None - if not os.path.isfile(calibration_target): - print >> sys.stderr, 'Target file %s does not exist.' % (calibration_target) - return None - - bag_dir = os.path.dirname(os.path.abspath(bag)) - bag_file = os.path.basename(bag) - bag_name = os.path.splitext(bag_file)[0] - - if only_depth_cam: - # Calibrate only the depth camera - cmd = ('rosrun kalibr kalibr_calibrate_cameras --topics %s --models %s ' + - '--target %s --bag %s%s') % \ - (DEPTH_CAMERA_TOPIC, radtan_model, calibration_target, bag_file, extra_flags) - elif sci_cam: - # Calibrate the nav, haz, and sci cameras - cmd = ('rosrun kalibr kalibr_calibrate_cameras --topics %s %s %s --models %s %s %s ' + - '--target %s --bag %s%s') % \ - (HD_CAMERA_TOPIC, DEPTH_CAMERA_TOPIC, sci_cam_topic, fov_model, radtan_model, radtan_model, - calibration_target, bag_file, extra_flags) - elif depth_cam: - # Calibrate the HD and depth cameras - cmd = ('rosrun kalibr kalibr_calibrate_cameras --topics %s %s --models %s %s ' + - '--target %s --bag %s%s') % \ - (HD_CAMERA_TOPIC, DEPTH_CAMERA_TOPIC, fov_model, radtan_model, - calibration_target, bag_file, extra_flags) - else: - # Calibrate only the HD camera (nav or dock) - cmd = ('rosrun kalibr kalibr_calibrate_cameras --topics %s ' + - '--models %s --target %s --bag %s%s') % \ - (HD_CAMERA_TOPIC, fov_model, calibration_target, bag_file, extra_flags) - - print("Running in: " + bag_dir) - print(cmd) - output_arg = None # Display in the terminal what is going on - ret = subprocess.call(cmd.split(), cwd=bag_dir, stdout=output_arg, stderr=output_arg) - - if ret != 0: - print >> sys.stderr, 'Failed to calibrate camera from bag file.' - return None - - return bag_dir + '/camchain-' + bag_name + '.yaml' # The file where kalibr writes its output + try: + with open(filename, "r") as f: + contents = f.read() + except IOError: + print("Failed to open lua file.", file=sys.stderr) + return True + prog = re.compile(re.escape(camera) + "\s*=\s*\{") + match = re.search(prog, contents) + if not match: + print( + "Camera %s not found in lua file %s." % (camera, filename), file=sys.stderr + ) + return True + open_bracket = match.end() - 1 + close_bracket = find_close_bracket(contents[open_bracket:]) + if close_bracket < 0: + print("Camera config did not terminate.", file=sys.stderr) + return True + close_bracket = open_bracket + close_bracket + camera_text = contents[open_bracket : close_bracket + 1] + + # Update the distortion + prog = re.compile("distortion_coeff\s*=\s*.*?\n") + if type(distortion) is float: + (camera_text, replacements) = re.subn( + prog, "distortion_coeff = %g,\n" % (distortion), camera_text + ) + else: + (camera_text, replacements) = re.subn( + prog, + "distortion_coeff = {%g, %g, %g, %g},\n" + % (distortion[0], distortion[1], distortion[2], distortion[3]), + camera_text, + ) + + if replacements != 1: + print("Failed to replace distortion.", file=sys.stderr) + return True + + # Update the intrinsics + prog = re.compile("intrinsic_matrix\s*=\s*\{.*?\}", re.DOTALL) + intrinsic_string = ( + "intrinsic_matrix = {\n %.8g, 0.0, %.8g,\n 0.0, %.8g, %.8g,\n 0.0, 0.0, 1.0\n }" + % (intrinsics[0], intrinsics[2], intrinsics[1], intrinsics[3]) + ) + (camera_text, replacements) = re.subn(prog, intrinsic_string, camera_text) + if replacements != 1: + print("Failed to replace intrinsics matrix.", file=sys.stderr) + return True + + output_text = contents[:open_bracket] + camera_text + contents[close_bracket + 1 :] + try: + with open(filename, "w") as f: + f.write(output_text) + except IOError: + print("Failed to open lua file " + filename, file=sys.stderr) + return True + return False + + +def calibrate_camera( + bag, + calibration_target, + from_time, + to_time, + approx_sync, + nav_cam_topic, + haz_cam_topic, + sci_cam_topic, + dock_cam, + depth_cam, + sci_cam, + only_depth_cam, + verbose, +): + + fov_model = "pinhole-fov" + radtan_model = "pinhole-radtan" + + HD_CAMERA_TOPIC = "/hw/cam_dock" if dock_cam else nav_cam_topic + + if HD_CAMERA_TOPIC == "/hw/cam_dock": + DEPTH_CAMERA_TOPIC = "/hw/depth_perch/extended/amplitude_int" + else: + DEPTH_CAMERA_TOPIC = haz_cam_topic + + extra_flags = " --verbose" if verbose else " --dont-show-report" + + if from_time is not None and to_time is not None: + extra_flags += " --bag-from-to " + from_time + " " + to_time + + if approx_sync is not None: + extra_flags += " --approx-sync " + approx_sync + + if not os.path.isfile(bag): + print("Bag file %s does not exist." % (bag), file=sys.stderr) + return None + if not os.path.isfile(calibration_target): + print("Target file %s does not exist." % (calibration_target), file=sys.stderr) + return None + + bag_dir = os.path.dirname(os.path.abspath(bag)) + bag_file = os.path.basename(bag) + bag_name = os.path.splitext(bag_file)[0] + + if only_depth_cam: + # Calibrate only the depth camera + cmd = ( + "rosrun kalibr kalibr_calibrate_cameras --topics %s --models %s " + + "--target %s --bag %s%s" + ) % ( + DEPTH_CAMERA_TOPIC, + radtan_model, + calibration_target, + bag_file, + extra_flags, + ) + elif sci_cam: + # Calibrate the nav, haz, and sci cameras + cmd = ( + "rosrun kalibr kalibr_calibrate_cameras --topics %s %s %s --models %s %s %s " + + "--target %s --bag %s%s" + ) % ( + HD_CAMERA_TOPIC, + DEPTH_CAMERA_TOPIC, + sci_cam_topic, + fov_model, + radtan_model, + radtan_model, + calibration_target, + bag_file, + extra_flags, + ) + elif depth_cam: + # Calibrate the HD and depth cameras + cmd = ( + "rosrun kalibr kalibr_calibrate_cameras --topics %s %s --models %s %s " + + "--target %s --bag %s%s" + ) % ( + HD_CAMERA_TOPIC, + DEPTH_CAMERA_TOPIC, + fov_model, + radtan_model, + calibration_target, + bag_file, + extra_flags, + ) + else: + # Calibrate only the HD camera (nav or dock) + cmd = ( + "rosrun kalibr kalibr_calibrate_cameras --topics %s " + + "--models %s --target %s --bag %s%s" + ) % (HD_CAMERA_TOPIC, fov_model, calibration_target, bag_file, extra_flags) + + print(("Running in: " + bag_dir)) + print(cmd) + output_arg = None # Display in the terminal what is going on + ret = subprocess.call( + cmd.split(), cwd=bag_dir, stdout=output_arg, stderr=output_arg + ) + + if ret != 0: + print("Failed to calibrate camera from bag file.", file=sys.stderr) + return None + + return ( + bag_dir + "/camchain-" + bag_name + ".yaml" + ) # The file where kalibr writes its output + def main(): - SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) - default_calibration_target = SCRIPT_DIR + '/config/granite_april_tag.yaml' - - parser = argparse.ArgumentParser(description='Calibrate intrinsic parameters.') - parser.add_argument('robot', help='The name of the robot to configure (i.e., put p4d to edit p4d.config).') - parser.add_argument('bag', help='The bag file with calibration data.') - parser.add_argument('-d', '--dock_cam', dest='dock_cam', action='store_true', help='Calibrate dock camera.') - parser.add_argument('-depth', '--depth_cam', dest='depth_cam', action='store_true', help='Calibrate respective depth camera.') - parser.add_argument('--only_depth_cam', dest='only_depth_cam', action='store_true', help='Calibrate only the depth camera (haz or perch).') - parser.add_argument('--sci_cam', dest='sci_cam', action='store_true', help='Calibrate nav_cam, haz_cam, and sci_cam.') - parser.add_argument('-f', '--from', dest='from_time', help='Use the bag data starting at this time, in seconds.') - parser.add_argument('-t', '--to', dest='to_time', help='Use the bag data until this time, in seconds.') - parser.add_argument('--calibration_target', dest='calibration_target', help='Use this yaml file to desribe the calibration target, overriding the default: ' + default_calibration_target) - parser.add_argument('--approx_sync', dest='approx_sync', help='Time tolerance for approximate image synchronization [s] (default: 0.02).') - parser.add_argument('--nav_cam_topic', default = '/hw/cam_nav', help='The nav cam topic name.') - parser.add_argument('--haz_cam_topic', default = '/hw/depth_haz/extended/amplitude_int', help='The haz cam topic name.') - parser.add_argument('--sci_cam_topic', default = '/hw/cam_sci', help='The sci cam topic name.') - parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='Verbose mode.') - args = parser.parse_args() - - if args.calibration_target is None: - args.calibration_target = default_calibration_target - - # Sanity checks for sci_cam - if args.sci_cam: - if args.only_depth_cam or args.dock_cam: - print >> sys.stderr, "The sci cam must be calibrated together with the nav and haz cams." - return - if not args.depth_cam: - print("Since sci cam is to be calibrated, also calibrate the haz cam.") - args.depth_cam = True - - # Setup files - yaml_file = calibrate_camera(args.bag, args.calibration_target, - args.from_time, args.to_time, args.approx_sync, - args.nav_cam_topic, args.haz_cam_topic, args.sci_cam_topic, - args.dock_cam, args.depth_cam, args.sci_cam, args.only_depth_cam, - args.verbose) - - if yaml_file is None: - print >> sys.stderr, 'Failed to run calibration.' - return - - print("Reading calibration report file: " + yaml_file) - - if args.sci_cam: - # The yaml file has info for nav, haz, and sci cameras - (intrinsics, distortion, transforms) = read_yaml(yaml_file, ['cam0', 'cam1', 'cam2']) - if len(intrinsics) < 3 or len(distortion) < 3: - print >> sys.stderr, 'Failed to read intrinsics for the nav, haz, and sci cameras.' - return - elif args.depth_cam: - # The yaml file has info for both cameras - (intrinsics, distortion, transforms) = read_yaml(yaml_file, ['cam0', 'cam1']) - if len(intrinsics) < 2 or len(distortion) < 2: - print >> sys.stderr, 'Failed to read depth camera parameters.' - return - elif args.only_depth_cam: - # The yaml file only has the depth camera info - (intrinsics, distortion, transforms) = read_yaml(yaml_file, ['cam0']) - else: - # The yaml file has only the hd camera info - (intrinsics, distortion, transforms) = read_yaml(yaml_file, ['cam0']) - - if not intrinsics or not distortion: - print >> sys.stderr, 'Failed to read camera intrinsics.' - return - - config_file = SCRIPT_DIR + '/../../astrobee/config/robots/' + args.robot + '.config' - print("Updating intrinsics in file: " + config_file) - - if args.only_depth_cam: - if update_intrinsics(config_file, 'perch_cam' if args.dock_cam else 'haz_cam', - intrinsics[0], distortion[0]): - print >> sys.stderr, 'Failed to update config file with depth camera intrinsics.' - return - else: - # Replace the intrinsics of the HD camera - if update_intrinsics(config_file, 'dock_cam' if args.dock_cam else 'nav_cam', - intrinsics[0], distortion[0][0]): - print >> sys.stderr, 'Failed to update config file with HD camera intrinsics.' - return - - # Replace the intrinsics of the depth camera in addition to the HD camera - if args.depth_cam: - if update_intrinsics(config_file, 'perch_cam' if args.dock_cam else 'haz_cam', - intrinsics[1], distortion[1]): - print >> sys.stderr, 'Failed to update config file with depth camera intrinsics.' + SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) + default_calibration_target = SCRIPT_DIR + "/config/granite_april_tag.yaml" + + parser = argparse.ArgumentParser(description="Calibrate intrinsic parameters.") + parser.add_argument( + "robot", + help="The name of the robot to configure (i.e., put p4d to edit p4d.config).", + ) + parser.add_argument("bag", help="The bag file with calibration data.") + parser.add_argument( + "-d", + "--dock_cam", + dest="dock_cam", + action="store_true", + help="Calibrate dock camera.", + ) + parser.add_argument( + "-depth", + "--depth_cam", + dest="depth_cam", + action="store_true", + help="Calibrate respective depth camera.", + ) + parser.add_argument( + "--only_depth_cam", + dest="only_depth_cam", + action="store_true", + help="Calibrate only the depth camera (haz or perch).", + ) + parser.add_argument( + "--sci_cam", + dest="sci_cam", + action="store_true", + help="Calibrate nav_cam, haz_cam, and sci_cam.", + ) + parser.add_argument( + "-f", + "--from", + dest="from_time", + help="Use the bag data starting at this time, in seconds.", + ) + parser.add_argument( + "-t", + "--to", + dest="to_time", + help="Use the bag data until this time, in seconds.", + ) + parser.add_argument( + "--calibration_target", + dest="calibration_target", + help="Use this yaml file to desribe the calibration target, overriding the default: " + + default_calibration_target, + ) + parser.add_argument( + "--approx_sync", + dest="approx_sync", + help="Time tolerance for approximate image synchronization [s] (default: 0.02).", + ) + parser.add_argument( + "--nav_cam_topic", default="/hw/cam_nav", help="The nav cam topic name." + ) + parser.add_argument( + "--haz_cam_topic", + default="/hw/depth_haz/extended/amplitude_int", + help="The haz cam topic name.", + ) + parser.add_argument( + "--sci_cam_topic", default="/hw/cam_sci", help="The sci cam topic name." + ) + parser.add_argument( + "-v", "--verbose", dest="verbose", action="store_true", help="Verbose mode." + ) + args = parser.parse_args() + + if args.calibration_target is None: + args.calibration_target = default_calibration_target + + # Sanity checks for sci_cam + if args.sci_cam: + if args.only_depth_cam or args.dock_cam: + print( + "The sci cam must be calibrated together with the nav and haz cams.", + file=sys.stderr, + ) + return + if not args.depth_cam: + print("Since sci cam is to be calibrated, also calibrate the haz cam.") + args.depth_cam = True + + # Setup files + yaml_file = calibrate_camera( + args.bag, + args.calibration_target, + args.from_time, + args.to_time, + args.approx_sync, + args.nav_cam_topic, + args.haz_cam_topic, + args.sci_cam_topic, + args.dock_cam, + args.depth_cam, + args.sci_cam, + args.only_depth_cam, + args.verbose, + ) + + if yaml_file is None: + print("Failed to run calibration.", file=sys.stderr) return - # Update the haz cam to nav cam transform. This was not implemented for - # the dock and perch cameras. - if not args.dock_cam: - trans = transforms[1] - t = trans[0:3,3] - q = transformations.quaternion_from_matrix(trans) - trans_name = 'hazcam_to_navcam_transform' - if lua_replace_transform(config_file, trans_name, (t, q)): - print >> sys.stderr, 'Will not update the value of ' + trans_name + \ - ' (this one is needed only for the dense mapper).' - return - - # If the sci cam is present, also update sci cam intrinsics and the - # sci cam to haz cam transform - if args.sci_cam: - - if update_intrinsics(config_file, 'sci_cam', - intrinsics[2], distortion[2]): - print >> sys.stderr, 'Failed to update config file with sci camera intrinsics.' + print(("Reading calibration report file: " + yaml_file)) + + if args.sci_cam: + # The yaml file has info for nav, haz, and sci cameras + (intrinsics, distortion, transforms) = read_yaml( + yaml_file, ["cam0", "cam1", "cam2"] + ) + if len(intrinsics) < 3 or len(distortion) < 3: + print( + "Failed to read intrinsics for the nav, haz, and sci cameras.", + file=sys.stderr, + ) + return + elif args.depth_cam: + # The yaml file has info for both cameras + (intrinsics, distortion, transforms) = read_yaml(yaml_file, ["cam0", "cam1"]) + if len(intrinsics) < 2 or len(distortion) < 2: + print("Failed to read depth camera parameters.", file=sys.stderr) + return + elif args.only_depth_cam: + # The yaml file only has the depth camera info + (intrinsics, distortion, transforms) = read_yaml(yaml_file, ["cam0"]) + else: + # The yaml file has only the hd camera info + (intrinsics, distortion, transforms) = read_yaml(yaml_file, ["cam0"]) + + if not intrinsics or not distortion: + print("Failed to read camera intrinsics.", file=sys.stderr) + return + + config_file = SCRIPT_DIR + "/../../astrobee/config/robots/" + args.robot + ".config" + print(("Updating intrinsics in file: " + config_file)) + + if args.only_depth_cam: + if update_intrinsics( + config_file, + "perch_cam" if args.dock_cam else "haz_cam", + intrinsics[0], + distortion[0], + ): + print( + "Failed to update config file with depth camera intrinsics.", + file=sys.stderr, + ) return - - trans = transforms[2] - t = trans[0:3,3] - q = transformations.quaternion_from_matrix(trans) - trans_name = 'scicam_to_hazcam_transform' - if lua_replace_transform(config_file, trans_name, (t, q)): - print >> sys.stderr, 'Will not update the value of ' + trans_name + \ - ' (this one is needed only for the dense mapper).' + else: + # Replace the intrinsics of the HD camera + if update_intrinsics( + config_file, + "dock_cam" if args.dock_cam else "nav_cam", + intrinsics[0], + distortion[0][0], + ): + print( + "Failed to update config file with HD camera intrinsics.", + file=sys.stderr, + ) return -if __name__ == '__main__': - main() + # Replace the intrinsics of the depth camera in addition to the HD camera + if args.depth_cam: + if update_intrinsics( + config_file, + "perch_cam" if args.dock_cam else "haz_cam", + intrinsics[1], + distortion[1], + ): + print( + "Failed to update config file with depth camera intrinsics.", + file=sys.stderr, + ) + return + + # Update the haz cam to nav cam transform. This was not implemented for + # the dock and perch cameras. + if not args.dock_cam: + trans = transforms[1] + t = trans[0:3, 3] + q = transformations.quaternion_from_matrix(trans) + trans_name = "hazcam_to_navcam_transform" + if lua_replace_transform(config_file, trans_name, (t, q)): + print( + "Will not update the value of " + + trans_name + + " (this one is needed only for the dense mapper).", + file=sys.stderr, + ) + return + + # If the sci cam is present, also update sci cam intrinsics and the + # sci cam to haz cam transform + if args.sci_cam: + + if update_intrinsics( + config_file, "sci_cam", intrinsics[2], distortion[2] + ): + print( + "Failed to update config file with sci camera intrinsics.", + file=sys.stderr, + ) + return + + trans = transforms[2] + t = trans[0:3, 3] + q = transformations.quaternion_from_matrix(trans) + trans_name = "scicam_to_hazcam_transform" + if lua_replace_transform(config_file, trans_name, (t, q)): + print( + "Will not update the value of " + + trans_name + + " (this one is needed only for the dense mapper).", + file=sys.stderr, + ) + return + + +if __name__ == "__main__": + main() diff --git a/scripts/calibrate/markers_to_Kalibr.py b/scripts/calibrate/markers_to_Kalibr.py index 0f74018d1e..b6947d5e8f 100755 --- a/scripts/calibrate/markers_to_Kalibr.py +++ b/scripts/calibrate/markers_to_Kalibr.py @@ -6,28 +6,36 @@ import re import subprocess import sys + import yaml def main(): - try: - path = os.environ['BUILD_PATH'] - except KeyError: - print >> sys.stderr, 'BUILD_PATH is not defined' - return 0 - - SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) - - parser = argparse.ArgumentParser(description='Convert Alvar to Kalibr.') - parser.add_argument('--config', default=SCRIPT_DIR + '/../../astrobee/config/dock_markers_specs.config', dest='config', help='The target configuration file with AR markers specs. Default = dock_markers_specs.config', required=False) - args = parser.parse_args() - - #Setup Files - target_file = SCRIPT_DIR + '/config/granite_april_tag.yaml' - - cmd = ( (path + '/bin/markers_to_Kalibr %s %s') % (args.config, target_file)) - print(cmd) - ret = subprocess.call(cmd.split()) - -if __name__ == '__main__': - main() + try: + path = os.environ["BUILD_PATH"] + except KeyError: + print("BUILD_PATH is not defined", file=sys.stderr) + return 0 + + SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) + + parser = argparse.ArgumentParser(description="Convert Alvar to Kalibr.") + parser.add_argument( + "--config", + default=SCRIPT_DIR + "/../../astrobee/config/dock_markers_specs.config", + dest="config", + help="The target configuration file with AR markers specs. Default = dock_markers_specs.config", + required=False, + ) + args = parser.parse_args() + + # Setup Files + target_file = SCRIPT_DIR + "/config/granite_april_tag.yaml" + + cmd = (path + "/bin/markers_to_Kalibr %s %s") % (args.config, target_file) + print(cmd) + ret = subprocess.call(cmd.split()) + + +if __name__ == "__main__": + main() diff --git a/scripts/debug/show_avg_features.py b/scripts/debug/show_avg_features.py index 8b65239d4d..6bcc5a8c4f 100755 --- a/scripts/debug/show_avg_features.py +++ b/scripts/debug/show_avg_features.py @@ -1,24 +1,26 @@ #!/usr/bin/env python -import rospy -import signal -import time -import sys import argparse -import rosgraph +import signal import socket +import sys +import time + import numpy as np -from ff_msgs.msg import EkfState, VisualLandmarks, Feature2dArray +import rosgraph +import rospy +from ff_msgs.msg import EkfState, Feature2dArray, VisualLandmarks ARRAY_SIZE = 75 DEFAULT_SPAN = 5 -ML_TOPIC = '/loc/ml/features' -OF_TOPIC = '/loc/of/features' -EKF_TOPIC = '/gnc/ekf' +ML_TOPIC = "/loc/ml/features" +OF_TOPIC = "/loc/of/features" +EKF_TOPIC = "/gnc/ekf" favg = None + class FeatureAverager(object): def __init__(self, span): self.span = span @@ -27,25 +29,32 @@ def __init__(self, span): self.timer = None self.ml_data = { - 'ml_time': lambda x: rospy.Time(x.header.stamp.secs, x.header.stamp.nsecs).to_sec(), - 'ml_count': lambda x: len(x.landmarks) + "ml_time": lambda x: rospy.Time( + x.header.stamp.secs, x.header.stamp.nsecs + ).to_sec(), + "ml_count": lambda x: len(x.landmarks), } self.of_data = { - 'of_time': lambda x: rospy.Time(x.header.stamp.secs, x.header.stamp.nsecs).to_sec(), - 'of_count': lambda x: len(x.feature_array) + "of_time": lambda x: rospy.Time( + x.header.stamp.secs, x.header.stamp.nsecs + ).to_sec(), + "of_count": lambda x: len(x.feature_array), } self.ekf_data = { - 'ekf_time': lambda x: rospy.Time(x.header.stamp.secs, x.header.stamp.nsecs).to_sec(), - 'ekf_ml_count': lambda x: x.ml_count, - 'ekf_of_count': lambda x: x.of_count + "ekf_time": lambda x: rospy.Time( + x.header.stamp.secs, x.header.stamp.nsecs + ).to_sec(), + "ekf_ml_count": lambda x: x.ml_count, + "ekf_of_count": lambda x: x.of_count, } self.callbacks = [self.ml_data, self.of_data, self.ekf_data] for d in self.callbacks: - for k in d.keys(): self.data[k] = np.full(ARRAY_SIZE * self.span, 1e-10) + for k in list(d.keys()): + self.data[k] = np.full(ARRAY_SIZE * self.span, 1e-10) def __add_data(self, callbacks, data): for key in callbacks: @@ -76,31 +85,34 @@ def __check_topics(self): topics = rospy.get_published_topics() for t in topics: - if t[0] == ML_TOPIC: ml_found = True - if t[0] == OF_TOPIC: of_found = True - if t[0] == EKF_TOPIC: ekf_found = True + if t[0] == ML_TOPIC: + ml_found = True + if t[0] == OF_TOPIC: + of_found = True + if t[0] == EKF_TOPIC: + ekf_found = True if not ml_found: - print ' > Topic %s was not found' % ML_TOPIC + print((" > Topic %s was not found" % ML_TOPIC)) if not of_found: - print ' > Topic %s was not found' % OF_TOPIC + print((" > Topic %s was not found" % OF_TOPIC)) if not ekf_found: - print ' > Topic %s was not found' % EKF_TOPIC + print((" > Topic %s was not found" % EKF_TOPIC)) return ml_found and of_found and ekf_found def __delete_last_line(self): - #cursor up one line - sys.stdout.write('\x1b[1A') + # cursor up one line + sys.stdout.write("\x1b[1A") - #delete last line - sys.stdout.write('\x1b[2K') + # delete last line + sys.stdout.write("\x1b[2K") def is_ros_running(self): try: - rosgraph.Master('/rostopic').getPid() + rosgraph.Master("/rostopic").getPid() except socket.error: return False @@ -108,25 +120,30 @@ def is_ros_running(self): def start(self): if not self.is_ros_running(): - print " > Could not find ROS master" + print(" > Could not find ROS master") return False rospy.init_node("loc_feature_avg", anonymous=False, disable_signals=True) if not self.__check_topics(): - print " > Some topics were not found. Behavior may be erratic. Do you wish to continue?" - raw_input(" > Press any key to continue or Ctrl + C the program") + print( + " > Some topics were not found. Behavior may be erratic. Do you wish to continue?" + ) + eval(input(" > Press any key to continue or Ctrl + C the program")) sys.stdout.flush() - self.subscribers['ml'] = rospy.Subscriber(ML_TOPIC, - VisualLandmarks, self.__ml_callback) - self.subscribers['of'] = rospy.Subscriber(OF_TOPIC, - Feature2dArray, self.__of_callback) - self.subscribers['ekf'] = rospy.Subscriber(EKF_TOPIC, - EkfState, self.__ekf_callback) - - print '\nTimespan: %d second%s' % (self.span, 's' if self.span > 1 else '') - print "\nAverage features per second:\n" + self.subscribers["ml"] = rospy.Subscriber( + ML_TOPIC, VisualLandmarks, self.__ml_callback + ) + self.subscribers["of"] = rospy.Subscriber( + OF_TOPIC, Feature2dArray, self.__of_callback + ) + self.subscribers["ekf"] = rospy.Subscriber( + EKF_TOPIC, EkfState, self.__ekf_callback + ) + + print(("\nTimespan: %d second%s" % (self.span, "s" if self.span > 1 else ""))) + print("\nAverage features per second:\n") sys.stdout.flush() self.timer = rospy.Timer(rospy.Duration(1), self.average_features) @@ -137,37 +154,50 @@ def average_features(self, event): idx = ml_count = 0 t = event.current_real.to_sec() - ml_avg = self.__get_average(t, self.data['ml_time'], self.data['ml_count']) - of_avg = self.__get_average(t, self.data['of_time'], self.data['of_count']) - ekf_ml_avg = self.__get_average(t, self.data['ekf_time'], self.data['ekf_ml_count']) - ekf_of_avg = self.__get_average(t, self.data['ekf_time'], self.data['ekf_of_count']) - - msg = '\rRAW ML: %s | RAW OF: %s | EKF ML: %s | EKF OF: %s' % (ml_avg, - of_avg, ekf_ml_avg, ekf_of_avg) + ml_avg = self.__get_average(t, self.data["ml_time"], self.data["ml_count"]) + of_avg = self.__get_average(t, self.data["of_time"], self.data["of_count"]) + ekf_ml_avg = self.__get_average( + t, self.data["ekf_time"], self.data["ekf_ml_count"] + ) + ekf_of_avg = self.__get_average( + t, self.data["ekf_time"], self.data["ekf_of_count"] + ) + + msg = "\rRAW ML: %s | RAW OF: %s | EKF ML: %s | EKF OF: %s" % ( + ml_avg, + of_avg, + ekf_ml_avg, + ekf_of_avg, + ) # Delete old line - sys.stdout.write('\x1b[2K') + sys.stdout.write("\x1b[2K") # Write new line sys.stdout.write(msg) sys.stdout.flush() + def main(): - parser = argparse.ArgumentParser(description='Average Localization Feature Display') + parser = argparse.ArgumentParser(description="Average Localization Feature Display") - parser.add_argument('--span', dest='span', action='store', - help='Timespan (in seconds) to average features') + parser.add_argument( + "--span", + dest="span", + action="store", + help="Timespan (in seconds) to average features", + ) args, unknown = parser.parse_known_args() if args.span == None: args.span = DEFAULT_SPAN elif not args.span.isdigit(): - print " > Timespan must be an integer value higher than zero" + print(" > Timespan must be an integer value higher than zero") exit() else: args.span = int(args.span) if args.span < 1: - print " > Timespan must be higher than 0" + print(" > Timespan must be higher than 0") exit() global favg @@ -176,16 +206,17 @@ def main(): if favg.start(): rospy.spin() else: - print " > Unable to complete node start sequence" + print(" > Unable to complete node start sequence") time.sleep(0.5) def handler(signum, frame): - print '\n\nShutting down gracefully...' + print("\n\nShutting down gracefully...") favg.timer.shutdown() rospy.signal_shutdown("User stopped the program") -if __name__ == '__main__': + +if __name__ == "__main__": signal.signal(signal.SIGINT, handler) main() diff --git a/scripts/docker/astrobee_bionic.Dockerfile b/scripts/docker/astrobee.Dockerfile similarity index 85% rename from scripts/docker/astrobee_bionic.Dockerfile rename to scripts/docker/astrobee.Dockerfile index e9aa8daa69..1039a98530 100755 --- a/scripts/docker/astrobee_bionic.Dockerfile +++ b/scripts/docker/astrobee.Dockerfile @@ -2,7 +2,8 @@ # This image builds on top of the base melodic image building the code. # You must set the docker context to be the repository root directory -FROM astrobee/astrobee:base-latest-bionic +ARG UBUNTU_VERSION=16.04 +FROM astrobee/astrobee:base-latest-ubuntu${UBUNTU_VERSION} ENV USERNAME astrobee diff --git a/scripts/docker/astrobee_base_focal.Dockerfile b/scripts/docker/astrobee_base.Dockerfile similarity index 75% rename from scripts/docker/astrobee_base_focal.Dockerfile rename to scripts/docker/astrobee_base.Dockerfile index a2678f62bf..227b5aecb7 100755 --- a/scripts/docker/astrobee_base_focal.Dockerfile +++ b/scripts/docker/astrobee_base.Dockerfile @@ -4,7 +4,11 @@ # but it doesn't copy or build the entire code. # You must set the docker context to be the repository root directory -FROM nvidia/opengl:1.0-glvnd-runtime-ubuntu20.04 +ARG UBUNTU_VERSION=16.04 +FROM nvidia/opengl:1.0-glvnd-runtime-ubuntu$UBUNTU_VERSION + +ARG ROS_VERSION=kinetic +ARG PYTHON='' # try to suppress certain warnings during apt-get calls ARG DEBIAN_FRONTEND=noninteractive @@ -14,17 +18,17 @@ RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selectio RUN apt-get update \ && apt-get install -y apt-utils 2>&1 | grep -v "debconf: delaying package configuration, since apt-utils is not installed" \ && apt-get install -y \ - build-essential \ - git \ - lsb-release \ - sudo \ - wget \ + build-essential \ + git \ + lsb-release \ + sudo \ + wget \ && rm -rf /var/lib/apt/lists/* # suppress detached head warnings later RUN git config --global advice.detachedHead false -# Install ROS Noetic---------------------------------------------------------------- +# Install ROS -------------------------------------------------------------------- COPY ./scripts/setup/*.sh /setup/astrobee/ # this command is expected to have output on stderr, so redirect to suppress warning @@ -34,8 +38,8 @@ RUN apt-get update \ && apt-get install -y \ debhelper \ libtinyxml-dev \ - ros-noetic-desktop \ - python3-rosdep \ + ros-${ROS_VERSION}-desktop \ + python${PYTHON}-rosdep \ && rm -rf /var/lib/apt/lists/* # Install Astrobee---------------------------------------------------------------- @@ -54,14 +58,14 @@ RUN /setup/astrobee/install_desktop_packages.sh \ #Add new sudo user ENV USERNAME astrobee RUN useradd -m $USERNAME && \ - echo "$USERNAME:$USERNAME" | chpasswd && \ - usermod --shell /bin/bash $USERNAME && \ - usermod -aG sudo $USERNAME && \ - echo "$USERNAME ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers.d/$USERNAME && \ - chmod 0440 /etc/sudoers.d/$USERNAME && \ - # Replace 1000 with your user/group id - usermod --uid 1000 $USERNAME && \ - groupmod --gid 1000 $USERNAME + echo "$USERNAME:$USERNAME" | chpasswd && \ + usermod --shell /bin/bash $USERNAME && \ + usermod -aG sudo $USERNAME && \ + echo "$USERNAME ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers.d/$USERNAME && \ + chmod 0440 /etc/sudoers.d/$USERNAME && \ + # Replace 1000 with your user/group id + usermod --uid 1000 $USERNAME && \ + groupmod --gid 1000 $USERNAME #Add the entrypoint for docker RUN echo "#!/bin/bash\nset -e\n\nsource \"/opt/ros/noetic/setup.bash\"\nsource \"/build/astrobee/devel/setup.bash\"\nexport ASTROBEE_CONFIG_DIR=\"/src/astrobee/astrobee/config\"\nexec \"\$@\"" > /astrobee_init.sh && \ diff --git a/scripts/docker/astrobee_base_bionic.Dockerfile b/scripts/docker/astrobee_base_bionic.Dockerfile deleted file mode 100755 index e5d50173dd..0000000000 --- a/scripts/docker/astrobee_base_bionic.Dockerfile +++ /dev/null @@ -1,70 +0,0 @@ -# This will set up an Astrobee melodic docker container using the non-NASA install -# instructions. -# This image is the base, meaning that it contains all the installation context, -# but it doesn't copy or build the entire code. -# You must set the docker context to be the repository root directory - -FROM nvidia/opengl:1.0-glvnd-runtime-ubuntu18.04 - -# try to suppress certain warnings during apt-get calls -ARG DEBIAN_FRONTEND=noninteractive -RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections - -# install of apt-utils suppresses bogus warnings later -RUN apt-get update \ - && apt-get install -y apt-utils 2>&1 | grep -v "debconf: delaying package configuration, since apt-utils is not installed" \ - && apt-get install -y \ - build-essential \ - git \ - lsb-release \ - sudo \ - wget \ - && rm -rf /var/lib/apt/lists/* - -# suppress detached head warnings later -RUN git config --global advice.detachedHead false - -# Install ROS Melodic---------------------------------------------------------------- -COPY ./scripts/setup/*.sh /setup/astrobee/ - -# this command is expected to have output on stderr, so redirect to suppress warning -RUN /setup/astrobee/add_ros_repository.sh >/dev/null 2>&1 - -RUN apt-get update \ - && apt-get install -y \ - debhelper \ - libtinyxml-dev \ - ros-melodic-desktop \ - python-rosdep \ - && rm -rf /var/lib/apt/lists/* - -# Install Astrobee---------------------------------------------------------------- -COPY ./scripts/setup/debians /setup/astrobee/debians - -RUN apt-get update \ - && /setup/astrobee/debians/build_install_debians.sh \ - && rm -rf /var/lib/apt/lists/* \ - && rm -rf /setup/astrobee/debians - -COPY ./scripts/setup/packages_*.lst /setup/astrobee/ -# note apt-get update is run within the following shell script -RUN /setup/astrobee/install_desktop_packages.sh \ - && rm -rf /var/lib/apt/lists/* - -#Add new sudo user -ENV USERNAME astrobee -RUN useradd -m $USERNAME && \ - echo "$USERNAME:$USERNAME" | chpasswd && \ - usermod --shell /bin/bash $USERNAME && \ - usermod -aG sudo $USERNAME && \ - echo "$USERNAME ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers.d/$USERNAME && \ - chmod 0440 /etc/sudoers.d/$USERNAME && \ - # Replace 1000 with your user/group id - usermod --uid 1000 $USERNAME && \ - groupmod --gid 1000 $USERNAME - -#Add the entrypoint for docker -RUN echo "#!/bin/bash\nset -e\n\nsource \"/opt/ros/melodic/setup.bash\"\nsource \"/build/astrobee/devel/setup.bash\"\nexport ASTROBEE_CONFIG_DIR=\"/src/astrobee/astrobee/config\"\nexec \"\$@\"" > /astrobee_init.sh && \ - chmod +x /astrobee_init.sh && \ - rosdep init && \ - rosdep update 2>&1 | egrep -v 'as root|fix-permissions' \ No newline at end of file diff --git a/scripts/docker/astrobee_base_xenial.Dockerfile b/scripts/docker/astrobee_base_xenial.Dockerfile deleted file mode 100644 index 08bc1486e3..0000000000 --- a/scripts/docker/astrobee_base_xenial.Dockerfile +++ /dev/null @@ -1,64 +0,0 @@ -# This will set up an Astrobee kinetic docker container using the non-NASA install -# instructions. -# This image is the base, meaning that it contains all the installation context, -# but it doesn't copy or build the entire code. -# This image builds on top of the base kinetic image building the code. - -# You must set the docker context to be the repository root directory - -FROM nvidia/opengl:1.0-glvnd-runtime-ubuntu16.04 - -# try to suppress certain warnings during apt-get calls -ARG DEBIAN_FRONTEND=noninteractive -RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections - -# install of apt-utils suppresses bogus warnings later -RUN apt-get update \ - && apt-get install -y apt-utils 2>&1 | grep -v "debconf: delaying package configuration, since apt-utils is not installed" \ - && apt-get install -y \ - build-essential \ - git \ - lsb-release \ - sudo \ - wget \ - && rm -rf /var/lib/apt/lists/* -COPY ./scripts/setup/*.sh /setup/astrobee/ -COPY ./scripts/setup/debians /setup/astrobee/debians - -# this command is expected to have output on stderr, so redirect to suppress warning -RUN /setup/astrobee/add_ros_repository.sh >/dev/null 2>&1 - -RUN apt-get update && apt-get install -y \ - libtinyxml-dev \ - ros-kinetic-opencv3 \ - && rm -rf /var/lib/apt/lists/* - -# suppress detached head warnings later -RUN git config --global advice.detachedHead false - -RUN apt-get update \ - && /setup/astrobee/debians/build_install_debians.sh \ - && rm -rf /var/lib/apt/lists/* - -COPY ./scripts/setup/packages_*.lst /setup/astrobee/ -# note apt-get update is run within the following shell script -RUN /setup/astrobee/install_desktop_packages.sh \ - && rm -rf /var/lib/apt/lists/* - -#Add new sudo user -ENV USERNAME astrobee -RUN useradd -m $USERNAME && \ - echo "$USERNAME:$USERNAME" | chpasswd && \ - usermod --shell /bin/bash $USERNAME && \ - usermod -aG sudo $USERNAME && \ - echo "$USERNAME ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers.d/$USERNAME && \ - chmod 0440 /etc/sudoers.d/$USERNAME && \ - # Replace 1000 with your user/group id - usermod --uid 1000 $USERNAME && \ - groupmod --gid 1000 $USERNAME - -#Add the entrypoint for docker -RUN echo "#!/bin/bash\nset -e\n\nsource \"/opt/ros/kinetic/setup.bash\"\nsource \"/build/astrobee/devel/setup.bash\"\nexport ASTROBEE_CONFIG_DIR=\"/src/astrobee/astrobee/config\"\nexec \"\$@\"" > /astrobee_init.sh && \ - chmod +x /astrobee_init.sh && \ - rosdep init && \ - rosdep update 2>&1 | egrep -v 'as root|fix-permissions' \ No newline at end of file diff --git a/scripts/docker/astrobee_focal.Dockerfile b/scripts/docker/astrobee_focal.Dockerfile deleted file mode 100755 index 5e458b8817..0000000000 --- a/scripts/docker/astrobee_focal.Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -# This will set up an Astrobee melodic docker container using the non-NASA install instructions. -# This image builds on top of the base melodic image building the code. -# You must set the docker context to be the repository root directory - -FROM astrobee/astrobee:base-latest-focal - -ENV USERNAME astrobee - -COPY . /src/astrobee -RUN /src/astrobee/scripts/configure.sh -l -F -D -T -p /opt/astrobee -b /build/astrobee -RUN cd /build/astrobee && make -j`nproc` - -COPY ./astrobee/resources /opt/astrobee/share/astrobee/resources diff --git a/scripts/docker/astrobee_xenial.Dockerfile b/scripts/docker/astrobee_xenial.Dockerfile deleted file mode 100644 index b442b36bd5..0000000000 --- a/scripts/docker/astrobee_xenial.Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -# This will set up an Astrobee kinetic docker container using the non-NASA install instructions. -# This image builds on top of the base kinetic image building the code. -# You must set the docker context to be the repository root directory - -FROM astrobee/astrobee:base-latest-xenial - -ENV USERNAME astrobee - -COPY . /src/astrobee -RUN /src/astrobee/scripts/configure.sh -l -F -D -T -p /opt/astrobee -b /build/astrobee -RUN cd /build/astrobee && make -j`nproc` - -COPY ./astrobee/resources /opt/astrobee/share/astrobee/resources diff --git a/scripts/docker/build.sh b/scripts/docker/build.sh index e32c8fc393..b776e5afc6 100755 --- a/scripts/docker/build.sh +++ b/scripts/docker/build.sh @@ -19,27 +19,29 @@ set -e # short help -usage_string="$scriptname [-h] [-n ]" -#[-t make_target] +usage_string="$scriptname [-h] [-x ]\ + [-b ] [-f ]" usage() { echo "usage: sysinfo_page [[[-a file ] [-i]] | [-h]]" } -os="xenial" +os=`cat /etc/os-release | grep -oP "(?<=VERSION_CODENAME=).*"` while [ "$1" != "" ]; do case $1 in + -x | --xenial ) os="xenial" + ;; -b | --bionic ) os="bionic" ;; -f | --focal ) os="focal" ;; - -h | --help ) usage - exit - ;; - * ) usage - exit 1 + -h | --help ) usage + exit + ;; + * ) usage + exit 1 esac shift done @@ -48,26 +50,30 @@ done thisdir=$(dirname "$(readlink -f "$0")") rootdir=${thisdir}/../.. echo "Astrobee path: "${rootdir}/ -if [ "$os" = "xenial" ]; then - docker build ${rootdir}/ \ - -f ${rootdir}/scripts/docker/astrobee_base_xenial.Dockerfile \ - -t astrobee/astrobee:base-latest-xenial - docker build ${rootdir}/ \ - -f ${rootdir}/scripts/docker/astrobee_xenial.Dockerfile \ - -t astrobee/astrobee:latest-xenial -elif [ "$os" = "bionic" ]; then - docker build ${rootdir}/ \ - -f ${rootdir}/scripts/docker/astrobee_base_bionic.Dockerfile \ - -t astrobee/astrobee:base-latest-bionic - docker build ${rootdir}/ \ - -f ${rootdir}/scripts/docker/astrobee_bionic.Dockerfile \ - -t astrobee/astrobee:latest-bionic + +UBUNTU_VERSION=16.04 +ROS_VERSION=kinetic +PYTHON='' + +if [ "$os" = "bionic" ]; then + UBUNTU_VERSION=18.04 + ROS_VERSION=melodic + PYTHON='' + elif [ "$os" = "focal" ]; then - docker build ${rootdir}/ \ - -f ${rootdir}/scripts/docker/astrobee_base_focal.Dockerfile \ - -t astrobee/astrobee:base-latest-focal - docker build ${rootdir}/ \ - -f ${rootdir}/scripts/docker/astrobee_focal.Dockerfile \ - -t astrobee/astrobee:latest-focal + UBUNTU_VERSION=20.04 + ROS_VERSION=noetic + PYTHON='3' fi +echo "Building Ubuntu $UBUNTU_VERSION image" +docker build ${rootdir}/ \ + -f ${rootdir}/scripts/docker/astrobee_base.Dockerfile \ + --build-arg UBUNTU_VERSION=${UBUNTU_VERSION} \ + --build-arg ROS_VERSION=${ROS_VERSION} \ + --build-arg PYTHON=${PYTHON} \ + -t astrobee/astrobee:base-latest-ubuntu${UBUNTU_VERSION} +docker build ${rootdir}/ \ + -f ${rootdir}/scripts/docker/astrobee.Dockerfile \ + --build-arg UBUNTU_VERSION=${UBUNTU_VERSION} \ + -t astrobee/astrobee:latest-ubuntu${UBUNTU_VERSION} \ No newline at end of file diff --git a/scripts/docker/readme.md b/scripts/docker/readme.md index e3307b1137..0bea4c6d5b 100644 --- a/scripts/docker/readme.md +++ b/scripts/docker/readme.md @@ -12,13 +12,18 @@ The docker image for the astrobee FSW is divided throughout 2 docker files. Available docker files: - astrobee_base_xenial - Contains installation of all astrobee dependencies the Ubuntu 16.04 + ROS kinetic setup. - astrobee_base_bionic - Contains installation of all astrobee dependencies the Ubuntu 18.04 + ROS melodic setup. - astrobee_base_focal - Contains installation of all astrobee dependencies the Ubuntu 20.04 + ROS noetic setup. +- `astrobee_base.Dockerfile` - Contains installation of all astrobee dependencies the Ubuntu + ROS setup. +- `astrobee.Dockerfile` - Builds the astrobee FSW code on top of astrobee_base. - astrobee_xenial - Builds the astrobee FSW code on top of astrobee_base_xenial. - astrobee_bionic - Builds the astrobee FSW code on top of astrobee_base_bionic. - astrobee_focal - Builds the astrobee FSW code on top of astrobee_base_focal. +The Docker files accept the following version args (note that they must match up): + +- `UBUNTU_VERSION` - The version of Ubuntu to use. Valid values are "16.04", "18.04", and "20.04". +- `ROS_VERSION` - The version of ROS to use. Valid values are "kinetic", "melodic", and "noetic". +- `PYTHON` - The version of Python to use. Valid values are "" (an empty string representing Python 2) and "3". + +If `UBUNTU_VERSION` is `"16.04"`, `ROS_VERSION` and `PYTHON` must be `"kinetic"` and `""` respectively. +If `UBUNTU_VERSION` is `"18.04"`, `ROS_VERSION` and `PYTHON` must be `"melodic"` and `""` respectively. +If `UBUNTU_VERSION` is `"20.04"`, `ROS_VERSION` and `PYTHON` must be `"neotic"` and `"3"` respectively. ## Building the docker images @@ -26,14 +31,17 @@ Available docker files: To build the docker images, run: ./build.sh -The option --bionic is used for ubuntu 18 docker images, and --focal is used for ubuntu 20 docker images. +The build script will automatically detect the current Ubuntu OS version and define the docker files variables +`UBUNTU_VERSION`, `ROS_VERSION`, and `PYTHON`. If a specific version is desired, the option --xenial, --bionic, +and --focal is used for ubuntu 16.04, 18.04, and 20.04 docker images, respectively. ## Run the container To run the docker container: ./run.sh -The option --bionic is used for ubuntu 18 docker images, and --focal is used for ubuntu 20 docker images. +It will automatically detect the current Ubuntu OS version. If a specific version is desired, the option +--xenial, --bionic, and --focal is used for ubuntu 16.04, 18.04, and 20.04 docker images, respectively. To open another terminal inside the docker container: diff --git a/scripts/docker/run.sh b/scripts/docker/run.sh index aa332cd22e..501bac6007 100755 --- a/scripts/docker/run.sh +++ b/scripts/docker/run.sh @@ -18,17 +18,20 @@ # under the License. # short help -usage_string="$scriptname [-h] [-n ]" +usage_string="$scriptname [-h] [-x ]\ + [-b ] [-f ]" #[-t make_target] usage() { echo "usage: sysinfo_page [[[-a file ] [-i]] | [-h]]" } -os="xenial" +os=`cat /etc/os-release | grep -oP "(?<=VERSION_CODENAME=).*"` while [ "$1" != "" ]; do case $1 in + -x | --xenial ) os="xenial" + ;; -b | --bionic ) os="bionic" ;; -f | --focal ) os="focal" @@ -59,7 +62,7 @@ docker run -it --rm --name astrobee \ --env="DISPLAY" \ --user="astrobee" \ --gpus all \ - astrobee/astrobee:latest-xenial \ + astrobee/astrobee:latest-ubuntu16.04 \ /astrobee_init.sh roslaunch astrobee sim.launch dds:=false elif [ "$os" = "bionic" ]; then docker run -it --rm --name astrobee \ @@ -68,7 +71,7 @@ docker run -it --rm --name astrobee \ --env="XAUTHORITY=${XAUTH}" \ --env="DISPLAY" \ --user="astrobee" \ - astrobee/astrobee:latest-bionic \ + astrobee/astrobee:latest-ubuntu18.04 \ /astrobee_init.sh roslaunch astrobee sim.launch dds:=false elif [ "$os" = "focal" ]; then docker run -it --rm --name astrobee \ @@ -77,6 +80,6 @@ docker run -it --rm --name astrobee \ --env="XAUTHORITY=${XAUTH}" \ --env="DISPLAY" \ --user="astrobee" \ - astrobee/astrobee:latest-focal \ + astrobee/astrobee:latest-ubuntu20.04 \ /astrobee_init.sh roslaunch astrobee sim.launch dds:=false fi diff --git a/scripts/docker/test_astrobee_bionic.Dockerfile b/scripts/docker/test_astrobee.Dockerfile similarity index 72% rename from scripts/docker/test_astrobee_bionic.Dockerfile rename to scripts/docker/test_astrobee.Dockerfile index 3817927a39..04c5b3949e 100644 --- a/scripts/docker/test_astrobee_bionic.Dockerfile +++ b/scripts/docker/test_astrobee.Dockerfile @@ -1,7 +1,8 @@ # This will test an Astrobee bionic docker container. # You must set the docker context to be the repository root directory -FROM astrobee/astrobee:latest-bionic +ARG UBUNTU_VERSION=16.04 +FROM astrobee/astrobee:latest-ubuntu${UBUNTU_VERSION} # Run tests RUN cd /build/astrobee && make -j`nproc` tests && make -j`nproc` test diff --git a/scripts/docker/test_astrobee_focal.Dockerfile b/scripts/docker/test_astrobee_focal.Dockerfile deleted file mode 100644 index 1c50520e9e..0000000000 --- a/scripts/docker/test_astrobee_focal.Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -# This will test an Astrobee focal docker container. -# You must set the docker context to be the repository root directory - -FROM astrobee/astrobee:latest-focal - -# Run tests -RUN cd /build/astrobee && make -j`nproc` tests && make -j`nproc` test diff --git a/scripts/docker/test_astrobee_xenial.Dockerfile b/scripts/docker/test_astrobee_xenial.Dockerfile deleted file mode 100644 index b6289e8b1d..0000000000 --- a/scripts/docker/test_astrobee_xenial.Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -# This will test an Astrobee xenial docker container. -# You must set the docker context to be the repository root directory - -FROM astrobee/astrobee:latest-xenial - -# Run tests - -COPY . /src/astrobee -RUN cd /build/astrobee && make -j`nproc` tests && make -j`nproc` test diff --git a/scripts/git/cpplint.py b/scripts/git/cpplint.py index 1b04b2946b..8abae00375 100755 --- a/scripts/git/cpplint.py +++ b/scripts/git/cpplint.py @@ -177,217 +177,223 @@ # If you add a new error message with a new category, add it to the list # here! cpplint_unittest.py should tell you if you forget to do this. _ERROR_CATEGORIES = [ - 'build/class', - 'build/c++11', - 'build/deprecated', - 'build/endif_comment', - 'build/explicit_make_pair', - 'build/forward_decl', - 'build/header_guard', - 'build/include', - 'build/include_alpha', - 'build/include_order', - 'build/include_what_you_use', - 'build/namespaces', - 'build/printf_format', - 'build/storage_class', - 'legal/copyright', - 'readability/alt_tokens', - 'readability/braces', - 'readability/casting', - 'readability/check', - 'readability/constructors', - 'readability/fn_size', - 'readability/function', - 'readability/inheritance', - 'readability/multiline_comment', - 'readability/multiline_string', - 'readability/namespace', - 'readability/nolint', - 'readability/nul', - 'readability/streams', - 'readability/todo', - 'readability/utf8', - 'runtime/arrays', - 'runtime/casting', - 'runtime/explicit', - 'runtime/int', - 'runtime/init', - 'runtime/invalid_increment', - 'runtime/member_string_references', - 'runtime/memset', - 'runtime/indentation_namespace', - 'runtime/operator', - 'runtime/printf', - 'runtime/printf_format', - 'runtime/references', - 'runtime/string', - 'runtime/threadsafe_fn', - 'runtime/vlog', - 'whitespace/blank_line', - 'whitespace/braces', - 'whitespace/comma', - 'whitespace/comments', - 'whitespace/empty_conditional_body', - 'whitespace/empty_loop_body', - 'whitespace/end_of_line', - 'whitespace/ending_newline', - 'whitespace/forcolon', - 'whitespace/indent', - 'whitespace/line_length', - 'whitespace/newline', - 'whitespace/operators', - 'whitespace/parens', - 'whitespace/semicolon', - 'whitespace/tab', - 'whitespace/todo' - ] + "build/class", + "build/c++11", + "build/deprecated", + "build/endif_comment", + "build/explicit_make_pair", + "build/forward_decl", + "build/header_guard", + "build/include", + "build/include_alpha", + "build/include_order", + "build/include_what_you_use", + "build/namespaces", + "build/printf_format", + "build/storage_class", + "legal/copyright", + "readability/alt_tokens", + "readability/braces", + "readability/casting", + "readability/check", + "readability/constructors", + "readability/fn_size", + "readability/function", + "readability/inheritance", + "readability/multiline_comment", + "readability/multiline_string", + "readability/namespace", + "readability/nolint", + "readability/nul", + "readability/streams", + "readability/todo", + "readability/utf8", + "runtime/arrays", + "runtime/casting", + "runtime/explicit", + "runtime/int", + "runtime/init", + "runtime/invalid_increment", + "runtime/member_string_references", + "runtime/memset", + "runtime/indentation_namespace", + "runtime/operator", + "runtime/printf", + "runtime/printf_format", + "runtime/references", + "runtime/string", + "runtime/threadsafe_fn", + "runtime/vlog", + "whitespace/blank_line", + "whitespace/braces", + "whitespace/comma", + "whitespace/comments", + "whitespace/empty_conditional_body", + "whitespace/empty_loop_body", + "whitespace/end_of_line", + "whitespace/ending_newline", + "whitespace/forcolon", + "whitespace/indent", + "whitespace/line_length", + "whitespace/newline", + "whitespace/operators", + "whitespace/parens", + "whitespace/semicolon", + "whitespace/tab", + "whitespace/todo", +] # The default state of the category filter. This is overridden by the --filter= # flag. By default all errors are on, so only add here categories that should be # off by default (i.e., categories that must be enabled by the --filter= flags). # All entries here should start with a '-' or '+', as in the --filter= flag. -_DEFAULT_FILTERS = ['-build/include_alpha', '-readability/streams', '-runtime/indentation_namespace'] +_DEFAULT_FILTERS = [ + "-build/include_alpha", + "-readability/streams", + "-runtime/indentation_namespace", +] # We used to check for high-bit characters, but after much discussion we # decided those were OK, as long as they were in UTF-8 and didn't represent # hard-coded international strings, which belong in a separate i18n file. # C++ headers -_CPP_HEADERS = frozenset([ - # Legacy - 'algobase.h', - 'algo.h', - 'alloc.h', - 'builtinbuf.h', - 'bvector.h', - 'complex.h', - 'defalloc.h', - 'deque.h', - 'editbuf.h', - 'fstream.h', - 'function.h', - 'hash_map', - 'hash_map.h', - 'hash_set', - 'hash_set.h', - 'hashtable.h', - 'heap.h', - 'indstream.h', - 'iomanip.h', - 'iostream.h', - 'istream.h', - 'iterator.h', - 'list.h', - 'map.h', - 'multimap.h', - 'multiset.h', - 'ostream.h', - 'pair.h', - 'parsestream.h', - 'pfstream.h', - 'procbuf.h', - 'pthread_alloc', - 'pthread_alloc.h', - 'rope', - 'rope.h', - 'ropeimpl.h', - 'set.h', - 'slist', - 'slist.h', - 'stack.h', - 'stdiostream.h', - 'stl_alloc.h', - 'stl_relops.h', - 'streambuf.h', - 'stream.h', - 'strfile.h', - 'strstream.h', - 'tempbuf.h', - 'tree.h', - 'type_traits.h', - 'vector.h', - # 17.6.1.2 C++ library headers - 'algorithm', - 'array', - 'atomic', - 'bitset', - 'chrono', - 'codecvt', - 'complex', - 'condition_variable', - 'deque', - 'exception', - 'forward_list', - 'fstream', - 'functional', - 'future', - 'initializer_list', - 'iomanip', - 'ios', - 'iosfwd', - 'iostream', - 'istream', - 'iterator', - 'limits', - 'list', - 'locale', - 'map', - 'memory', - 'mutex', - 'new', - 'numeric', - 'ostream', - 'queue', - 'random', - 'ratio', - 'regex', - 'set', - 'sstream', - 'stack', - 'stdexcept', - 'streambuf', - 'string', - 'strstream', - 'system_error', - 'thread', - 'tuple', - 'typeindex', - 'typeinfo', - 'type_traits', - 'unordered_map', - 'unordered_set', - 'utility', - 'valarray', - 'vector', - # 17.6.1.2 C++ headers for C library facilities - 'cassert', - 'ccomplex', - 'cctype', - 'cerrno', - 'cfenv', - 'cfloat', - 'cinttypes', - 'ciso646', - 'climits', - 'clocale', - 'cmath', - 'csetjmp', - 'csignal', - 'cstdalign', - 'cstdarg', - 'cstdbool', - 'cstddef', - 'cstdint', - 'cstdio', - 'cstdlib', - 'cstring', - 'ctgmath', - 'ctime', - 'cuchar', - 'cwchar', - 'cwctype', - ]) +_CPP_HEADERS = frozenset( + [ + # Legacy + "algobase.h", + "algo.h", + "alloc.h", + "builtinbuf.h", + "bvector.h", + "complex.h", + "defalloc.h", + "deque.h", + "editbuf.h", + "fstream.h", + "function.h", + "hash_map", + "hash_map.h", + "hash_set", + "hash_set.h", + "hashtable.h", + "heap.h", + "indstream.h", + "iomanip.h", + "iostream.h", + "istream.h", + "iterator.h", + "list.h", + "map.h", + "multimap.h", + "multiset.h", + "ostream.h", + "pair.h", + "parsestream.h", + "pfstream.h", + "procbuf.h", + "pthread_alloc", + "pthread_alloc.h", + "rope", + "rope.h", + "ropeimpl.h", + "set.h", + "slist", + "slist.h", + "stack.h", + "stdiostream.h", + "stl_alloc.h", + "stl_relops.h", + "streambuf.h", + "stream.h", + "strfile.h", + "strstream.h", + "tempbuf.h", + "tree.h", + "type_traits.h", + "vector.h", + # 17.6.1.2 C++ library headers + "algorithm", + "array", + "atomic", + "bitset", + "chrono", + "codecvt", + "complex", + "condition_variable", + "deque", + "exception", + "forward_list", + "fstream", + "functional", + "future", + "initializer_list", + "iomanip", + "ios", + "iosfwd", + "iostream", + "istream", + "iterator", + "limits", + "list", + "locale", + "map", + "memory", + "mutex", + "new", + "numeric", + "ostream", + "queue", + "random", + "ratio", + "regex", + "set", + "sstream", + "stack", + "stdexcept", + "streambuf", + "string", + "strstream", + "system_error", + "thread", + "tuple", + "typeindex", + "typeinfo", + "type_traits", + "unordered_map", + "unordered_set", + "utility", + "valarray", + "vector", + # 17.6.1.2 C++ headers for C library facilities + "cassert", + "ccomplex", + "cctype", + "cerrno", + "cfenv", + "cfloat", + "cinttypes", + "ciso646", + "climits", + "clocale", + "cmath", + "csetjmp", + "csignal", + "cstdalign", + "cstdarg", + "cstdbool", + "cstddef", + "cstdint", + "cstdio", + "cstdlib", + "cstring", + "ctgmath", + "ctime", + "cuchar", + "cwchar", + "cwctype", + ] +) # These headers are excluded from [build/include] and [build/include_order] @@ -396,40 +402,56 @@ # uppercase character, such as Python.h or nsStringAPI.h, for example). # - Lua headers. _THIRD_PARTY_HEADERS_PATTERN = re.compile( - r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$') + r"^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$" +) # Assertion macros. These are defined in base/logging.h and # testing/base/gunit.h. Note that the _M versions need to come first # for substring matching to work. _CHECK_MACROS = [ - 'DCHECK', 'CHECK', - 'EXPECT_TRUE_M', 'EXPECT_TRUE', - 'ASSERT_TRUE_M', 'ASSERT_TRUE', - 'EXPECT_FALSE_M', 'EXPECT_FALSE', - 'ASSERT_FALSE_M', 'ASSERT_FALSE', - ] + "DCHECK", + "CHECK", + "EXPECT_TRUE_M", + "EXPECT_TRUE", + "ASSERT_TRUE_M", + "ASSERT_TRUE", + "EXPECT_FALSE_M", + "EXPECT_FALSE", + "ASSERT_FALSE_M", + "ASSERT_FALSE", +] # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE _CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) -for op, replacement in [('==', 'EQ'), ('!=', 'NE'), - ('>=', 'GE'), ('>', 'GT'), - ('<=', 'LE'), ('<', 'LT')]: - _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement - _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement - _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement - _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement - _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement - _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement - -for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), - ('>=', 'LT'), ('>', 'LE'), - ('<=', 'GT'), ('<', 'GE')]: - _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement - _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement - _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement - _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement +for op, replacement in [ + ("==", "EQ"), + ("!=", "NE"), + (">=", "GE"), + (">", "GT"), + ("<=", "LE"), + ("<", "LT"), +]: + _CHECK_REPLACEMENT["DCHECK"][op] = "DCHECK_%s" % replacement + _CHECK_REPLACEMENT["CHECK"][op] = "CHECK_%s" % replacement + _CHECK_REPLACEMENT["EXPECT_TRUE"][op] = "EXPECT_%s" % replacement + _CHECK_REPLACEMENT["ASSERT_TRUE"][op] = "ASSERT_%s" % replacement + _CHECK_REPLACEMENT["EXPECT_TRUE_M"][op] = "EXPECT_%s_M" % replacement + _CHECK_REPLACEMENT["ASSERT_TRUE_M"][op] = "ASSERT_%s_M" % replacement + +for op, inv_replacement in [ + ("==", "NE"), + ("!=", "EQ"), + (">=", "LT"), + (">", "LE"), + ("<=", "GT"), + ("<", "GE"), +]: + _CHECK_REPLACEMENT["EXPECT_FALSE"][op] = "EXPECT_%s" % inv_replacement + _CHECK_REPLACEMENT["ASSERT_FALSE"][op] = "ASSERT_%s" % inv_replacement + _CHECK_REPLACEMENT["EXPECT_FALSE_M"][op] = "EXPECT_%s_M" % inv_replacement + _CHECK_REPLACEMENT["ASSERT_FALSE_M"][op] = "ASSERT_%s_M" % inv_replacement # Alternative tokens and their replacements. For full list, see section 2.5 # Alternative tokens [lex.digraph] in the C++ standard. @@ -437,18 +459,18 @@ # Digraphs (such as '%:') are not included here since it's a mess to # match those on a word boundary. _ALT_TOKEN_REPLACEMENT = { - 'and': '&&', - 'bitor': '|', - 'or': '||', - 'xor': '^', - 'compl': '~', - 'bitand': '&', - 'and_eq': '&=', - 'or_eq': '|=', - 'xor_eq': '^=', - 'not': '!', - 'not_eq': '!=' - } + "and": "&&", + "bitor": "|", + "or": "||", + "xor": "^", + "compl": "~", + "bitand": "&", + "and_eq": "&=", + "or_eq": "|=", + "xor_eq": "^=", + "not": "!", + "not_eq": "!=", +} # Compile regular expression that matches all the above keywords. The "[ =()]" # bit is meant to avoid matching these keywords outside of boolean expressions. @@ -456,7 +478,8 @@ # False positives include C-style multi-line comments and multi-line strings # but those have always been troublesome for cpplint. _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( - r'[ =()](' + ('|'.join(list(_ALT_TOKEN_REPLACEMENT.keys()))) + r')(?=[ (]|$)') + r"[ =()](" + ("|".join(list(_ALT_TOKEN_REPLACEMENT.keys()))) + r")(?=[ (]|$)" +) # These constants define types of headers for use with @@ -468,15 +491,15 @@ _OTHER_HEADER = 5 # These constants define the current inline assembly state -_NO_ASM = 0 # Outside of inline assembly block -_INSIDE_ASM = 1 # Inside inline assembly block -_END_ASM = 2 # Last line of inline assembly block -_BLOCK_ASM = 3 # The whole block is an inline assembly block +_NO_ASM = 0 # Outside of inline assembly block +_INSIDE_ASM = 1 # Inside inline assembly block +_END_ASM = 2 # Last line of inline assembly block +_BLOCK_ASM = 3 # The whole block is an inline assembly block # Match start of assembly blocks -_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' - r'(?:\s+(volatile|__volatile__))?' - r'\s*[{(]') +_MATCH_ASM = re.compile( + r"^\s*(?:asm|_asm|__asm|__asm__)" r"(?:\s+(volatile|__volatile__))?" r"\s*[{(]" +) _regexp_compile_cache = {} @@ -495,639 +518,670 @@ # The allowed extensions for file names # This is set by --extensions flag. -_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh', 'in', 'cxx', 'hxx']) +_valid_extensions = set(["cc", "h", "cpp", "cu", "cuh", "in", "cxx", "hxx"]) + def ParseNolintSuppressions(filename, raw_line, linenum, error): - """Updates the global list of error-suppressions. - - Parses any NOLINT comments on the current line, updating the global - error_suppressions store. Reports an error if the NOLINT comment - was malformed. - - Args: - filename: str, the name of the input file. - raw_line: str, the line of input text, with comments. - linenum: int, the number of the current line. - error: function, an error handler. - """ - matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line) - if matched: - if matched.group(1): - suppressed_line = linenum + 1 - else: - suppressed_line = linenum - category = matched.group(2) - if category in (None, '(*)'): # => "suppress all" - _error_suppressions.setdefault(None, set()).add(suppressed_line) - else: - if category.startswith('(') and category.endswith(')'): - category = category[1:-1] - if category in _ERROR_CATEGORIES: - _error_suppressions.setdefault(category, set()).add(suppressed_line) + """Updates the global list of error-suppressions. + + Parses any NOLINT comments on the current line, updating the global + error_suppressions store. Reports an error if the NOLINT comment + was malformed. + + Args: + filename: str, the name of the input file. + raw_line: str, the line of input text, with comments. + linenum: int, the number of the current line. + error: function, an error handler. + """ + matched = Search(r"\bNOLINT(NEXTLINE)?\b(\([^)]+\))?", raw_line) + if matched: + if matched.group(1): + suppressed_line = linenum + 1 + else: + suppressed_line = linenum + category = matched.group(2) + if category in (None, "(*)"): # => "suppress all" + _error_suppressions.setdefault(None, set()).add(suppressed_line) else: - error(filename, linenum, 'readability/nolint', 5, - 'Unknown NOLINT error category: %s' % category) + if category.startswith("(") and category.endswith(")"): + category = category[1:-1] + if category in _ERROR_CATEGORIES: + _error_suppressions.setdefault(category, set()).add(suppressed_line) + else: + error( + filename, + linenum, + "readability/nolint", + 5, + "Unknown NOLINT error category: %s" % category, + ) def ResetNolintSuppressions(): - """Resets the set of NOLINT suppressions to empty.""" - _error_suppressions.clear() + """Resets the set of NOLINT suppressions to empty.""" + _error_suppressions.clear() def IsErrorSuppressedByNolint(category, linenum): - """Returns true if the specified error category is suppressed on this line. - - Consults the global error_suppressions map populated by - ParseNolintSuppressions/ResetNolintSuppressions. - - Args: - category: str, the category of the error. - linenum: int, the current line number. - Returns: - bool, True iff the error should be suppressed due to a NOLINT comment. - """ - return (linenum in _error_suppressions.get(category, set()) or - linenum in _error_suppressions.get(None, set())) - - -def Match(pattern, s): - """Matches the string with the pattern, caching the compiled regexp.""" - # The regexp compilation caching is inlined in both Match and Search for - # performance reasons; factoring it out into a separate function turns out - # to be noticeably expensive. - if pattern not in _regexp_compile_cache: - _regexp_compile_cache[pattern] = sre_compile.compile(pattern) - return _regexp_compile_cache[pattern].match(s) - - -def ReplaceAll(pattern, rep, s): - """Replaces instances of pattern in a string with a replacement. - - The compiled regex is kept in a cache shared by Match and Search. - - Args: - pattern: regex pattern - rep: replacement text - s: search string - - Returns: - string with replacements made (or original string if no replacements) - """ - if pattern not in _regexp_compile_cache: - _regexp_compile_cache[pattern] = sre_compile.compile(pattern) - return _regexp_compile_cache[pattern].sub(rep, s) - + """Returns true if the specified error category is suppressed on this line. -def Search(pattern, s): - """Searches the string for the pattern, caching the compiled regexp.""" - if pattern not in _regexp_compile_cache: - _regexp_compile_cache[pattern] = sre_compile.compile(pattern) - return _regexp_compile_cache[pattern].search(s) - - -class _IncludeState(object): - """Tracks line numbers for includes, and the order in which includes appear. - - include_list contains list of lists of (header, line number) pairs. - It's a lists of lists rather than just one flat list to make it - easier to update across preprocessor boundaries. - - Call CheckNextIncludeOrder() once for each header in the file, passing - in the type constants defined above. Calls in an illegal order will - raise an _IncludeError with an appropriate error message. - - """ - # self._section will move monotonically through this set. If it ever - # needs to move backwards, CheckNextIncludeOrder will raise an error. - _INITIAL_SECTION = 0 - _MY_H_SECTION = 1 - _C_SECTION = 2 - _CPP_SECTION = 3 - _OTHER_H_SECTION = 4 - - _TYPE_NAMES = { - _C_SYS_HEADER: 'C system header', - _CPP_SYS_HEADER: 'C++ system header', - _LIKELY_MY_HEADER: 'header this file implements', - _POSSIBLE_MY_HEADER: 'header this file may implement', - _OTHER_HEADER: 'other header', - } - _SECTION_NAMES = { - _INITIAL_SECTION: "... nothing. (This can't be an error.)", - _MY_H_SECTION: 'a header this file implements', - _C_SECTION: 'C system header', - _CPP_SECTION: 'C++ system header', - _OTHER_H_SECTION: 'other header', - } - - def __init__(self): - self.include_list = [[]] - self.ResetSection('') - - def FindHeader(self, header): - """Check if a header has already been included. + Consults the global error_suppressions map populated by + ParseNolintSuppressions/ResetNolintSuppressions. Args: - header: header to check. + category: str, the category of the error. + linenum: int, the current line number. Returns: - Line number of previous occurrence, or -1 if the header has not - been seen before. + bool, True iff the error should be suppressed due to a NOLINT comment. """ - for section_list in self.include_list: - for f in section_list: - if f[0] == header: - return f[1] - return -1 + return linenum in _error_suppressions.get( + category, set() + ) or linenum in _error_suppressions.get(None, set()) - def ResetSection(self, directive): - """Reset section checking for preprocessor directive. - Args: - directive: preprocessor directive (e.g. "if", "else"). - """ - # The name of the current section. - self._section = self._INITIAL_SECTION - # The path of last found header. - self._last_header = '' - - # Update list of includes. Note that we never pop from the - # include list. - if directive in ('if', 'ifdef', 'ifndef'): - self.include_list.append([]) - elif directive in ('else', 'elif'): - self.include_list[-1] = [] +def Match(pattern, s): + """Matches the string with the pattern, caching the compiled regexp.""" + # The regexp compilation caching is inlined in both Match and Search for + # performance reasons; factoring it out into a separate function turns out + # to be noticeably expensive. + if pattern not in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].match(s) - def SetLastHeader(self, header_path): - self._last_header = header_path - def CanonicalizeAlphabeticalOrder(self, header_path): - """Returns a path canonicalized for alphabetical comparison. +def ReplaceAll(pattern, rep, s): + """Replaces instances of pattern in a string with a replacement. - - replaces "-" with "_" so they both cmp the same. - - removes '-inl' since we don't require them to be after the main header. - - lowercase everything, just in case. + The compiled regex is kept in a cache shared by Match and Search. Args: - header_path: Path to be canonicalized. + pattern: regex pattern + rep: replacement text + s: search string Returns: - Canonicalized path. + string with replacements made (or original string if no replacements) """ - return header_path.replace('-inl.h', '.h').replace('-', '_').lower() - - def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): - """Check if a header is in alphabetical order with the previous header. + if pattern not in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].sub(rep, s) - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - header_path: Canonicalized header to be checked. - Returns: - Returns true if the header is in alphabetical order. - """ - # If previous section is different from current section, _last_header will - # be reset to empty string, so it's always less than current header. - # - # If previous line was a blank line, assume that the headers are - # intentionally sorted the way they are. - if (self._last_header > header_path and - not Match(r'^\s*$', clean_lines.elided[linenum - 1])): - return False - return True +def Search(pattern, s): + """Searches the string for the pattern, caching the compiled regexp.""" + if pattern not in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].search(s) - def CheckNextIncludeOrder(self, header_type): - """Returns a non-empty error message if the next header is out of order. - This function also updates the internal state to be ready to check - the next include. +class _IncludeState(object): + """Tracks line numbers for includes, and the order in which includes appear. - Args: - header_type: One of the _XXX_HEADER constants defined above. + include_list contains list of lists of (header, line number) pairs. + It's a lists of lists rather than just one flat list to make it + easier to update across preprocessor boundaries. - Returns: - The empty string if the header is in the right order, or an - error message describing what's wrong. + Call CheckNextIncludeOrder() once for each header in the file, passing + in the type constants defined above. Calls in an illegal order will + raise an _IncludeError with an appropriate error message. """ - error_message = ('Found %s after %s' % - (self._TYPE_NAMES[header_type], - self._SECTION_NAMES[self._section])) - - last_section = self._section - - if header_type == _C_SYS_HEADER: - if self._section <= self._C_SECTION: - self._section = self._C_SECTION - else: - self._last_header = '' - return error_message - elif header_type == _CPP_SYS_HEADER: - if self._section <= self._CPP_SECTION: - self._section = self._CPP_SECTION - else: - self._last_header = '' - return error_message - elif header_type == _LIKELY_MY_HEADER: - if self._section <= self._MY_H_SECTION: - self._section = self._MY_H_SECTION - else: - self._section = self._OTHER_H_SECTION - elif header_type == _POSSIBLE_MY_HEADER: - if self._section <= self._MY_H_SECTION: - self._section = self._MY_H_SECTION - else: - # This will always be the fallback because we're not sure - # enough that the header is associated with this file. - self._section = self._OTHER_H_SECTION - else: - assert header_type == _OTHER_HEADER - self._section = self._OTHER_H_SECTION - if last_section != self._section: - self._last_header = '' - - return '' + # self._section will move monotonically through this set. If it ever + # needs to move backwards, CheckNextIncludeOrder will raise an error. + _INITIAL_SECTION = 0 + _MY_H_SECTION = 1 + _C_SECTION = 2 + _CPP_SECTION = 3 + _OTHER_H_SECTION = 4 + + _TYPE_NAMES = { + _C_SYS_HEADER: "C system header", + _CPP_SYS_HEADER: "C++ system header", + _LIKELY_MY_HEADER: "header this file implements", + _POSSIBLE_MY_HEADER: "header this file may implement", + _OTHER_HEADER: "other header", + } + _SECTION_NAMES = { + _INITIAL_SECTION: "... nothing. (This can't be an error.)", + _MY_H_SECTION: "a header this file implements", + _C_SECTION: "C system header", + _CPP_SECTION: "C++ system header", + _OTHER_H_SECTION: "other header", + } + def __init__(self): + self.include_list = [[]] + self.ResetSection("") + + def FindHeader(self, header): + """Check if a header has already been included. + + Args: + header: header to check. + Returns: + Line number of previous occurrence, or -1 if the header has not + been seen before. + """ + for section_list in self.include_list: + for f in section_list: + if f[0] == header: + return f[1] + return -1 + + def ResetSection(self, directive): + """Reset section checking for preprocessor directive. + + Args: + directive: preprocessor directive (e.g. "if", "else"). + """ + # The name of the current section. + self._section = self._INITIAL_SECTION + # The path of last found header. + self._last_header = "" + + # Update list of includes. Note that we never pop from the + # include list. + if directive in ("if", "ifdef", "ifndef"): + self.include_list.append([]) + elif directive in ("else", "elif"): + self.include_list[-1] = [] + + def SetLastHeader(self, header_path): + self._last_header = header_path + + def CanonicalizeAlphabeticalOrder(self, header_path): + """Returns a path canonicalized for alphabetical comparison. + + - replaces "-" with "_" so they both cmp the same. + - removes '-inl' since we don't require them to be after the main header. + - lowercase everything, just in case. + + Args: + header_path: Path to be canonicalized. + + Returns: + Canonicalized path. + """ + return header_path.replace("-inl.h", ".h").replace("-", "_").lower() + + def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): + """Check if a header is in alphabetical order with the previous header. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + header_path: Canonicalized header to be checked. + + Returns: + Returns true if the header is in alphabetical order. + """ + # If previous section is different from current section, _last_header will + # be reset to empty string, so it's always less than current header. + # + # If previous line was a blank line, assume that the headers are + # intentionally sorted the way they are. + if self._last_header > header_path and not Match( + r"^\s*$", clean_lines.elided[linenum - 1] + ): + return False + return True -class _CppLintState(object): - """Maintains module-wide state..""" - - def __init__(self): - self.verbose_level = 1 # global setting. - self.error_count = 0 # global count of reported errors - # filters to apply when emitting error messages - self.filters = _DEFAULT_FILTERS[:] - # backup of filter list. Used to restore the state after each file. - self._filters_backup = self.filters[:] - self.counting = 'total' # In what way are we counting errors? - self.errors_by_category = {} # string to int dict storing error counts - - # output format: - # "emacs" - format that emacs can parse (default) - # "vs7" - format that Microsoft Visual Studio 7 can parse - self.output_format = 'emacs' - - def SetOutputFormat(self, output_format): - """Sets the output format for errors.""" - self.output_format = output_format - - def SetVerboseLevel(self, level): - """Sets the module's verbosity, and returns the previous setting.""" - last_verbose_level = self.verbose_level - self.verbose_level = level - return last_verbose_level + def CheckNextIncludeOrder(self, header_type): + """Returns a non-empty error message if the next header is out of order. + + This function also updates the internal state to be ready to check + the next include. + + Args: + header_type: One of the _XXX_HEADER constants defined above. + + Returns: + The empty string if the header is in the right order, or an + error message describing what's wrong. + + """ + error_message = "Found %s after %s" % ( + self._TYPE_NAMES[header_type], + self._SECTION_NAMES[self._section], + ) + + last_section = self._section + + if header_type == _C_SYS_HEADER: + if self._section <= self._C_SECTION: + self._section = self._C_SECTION + else: + self._last_header = "" + return error_message + elif header_type == _CPP_SYS_HEADER: + if self._section <= self._CPP_SECTION: + self._section = self._CPP_SECTION + else: + self._last_header = "" + return error_message + elif header_type == _LIKELY_MY_HEADER: + if self._section <= self._MY_H_SECTION: + self._section = self._MY_H_SECTION + else: + self._section = self._OTHER_H_SECTION + elif header_type == _POSSIBLE_MY_HEADER: + if self._section <= self._MY_H_SECTION: + self._section = self._MY_H_SECTION + else: + # This will always be the fallback because we're not sure + # enough that the header is associated with this file. + self._section = self._OTHER_H_SECTION + else: + assert header_type == _OTHER_HEADER + self._section = self._OTHER_H_SECTION - def SetCountingStyle(self, counting_style): - """Sets the module's counting options.""" - self.counting = counting_style + if last_section != self._section: + self._last_header = "" - def SetFilters(self, filters): - """Sets the error-message filters. + return "" - These filters are applied when deciding whether to emit a given - error message. - Args: - filters: A string of comma-separated filters (eg "+whitespace/indent"). - Each filter should start with + or -; else we die. +class _CppLintState(object): + """Maintains module-wide state..""" + + def __init__(self): + self.verbose_level = 1 # global setting. + self.error_count = 0 # global count of reported errors + # filters to apply when emitting error messages + self.filters = _DEFAULT_FILTERS[:] + # backup of filter list. Used to restore the state after each file. + self._filters_backup = self.filters[:] + self.counting = "total" # In what way are we counting errors? + self.errors_by_category = {} # string to int dict storing error counts + + # output format: + # "emacs" - format that emacs can parse (default) + # "vs7" - format that Microsoft Visual Studio 7 can parse + self.output_format = "emacs" + + def SetOutputFormat(self, output_format): + """Sets the output format for errors.""" + self.output_format = output_format + + def SetVerboseLevel(self, level): + """Sets the module's verbosity, and returns the previous setting.""" + last_verbose_level = self.verbose_level + self.verbose_level = level + return last_verbose_level + + def SetCountingStyle(self, counting_style): + """Sets the module's counting options.""" + self.counting = counting_style + + def SetFilters(self, filters): + """Sets the error-message filters. + + These filters are applied when deciding whether to emit a given + error message. + + Args: + filters: A string of comma-separated filters (eg "+whitespace/indent"). + Each filter should start with + or -; else we die. + + Raises: + ValueError: The comma-separated filters did not all start with '+' or '-'. + E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" + """ + # Default filters always have less priority than the flag ones. + self.filters = _DEFAULT_FILTERS[:] + self.AddFilters(filters) + + def AddFilters(self, filters): + """Adds more filters to the existing list of error-message filters.""" + for filt in filters.split(","): + clean_filt = filt.strip() + if clean_filt: + self.filters.append(clean_filt) + for filt in self.filters: + if not (filt.startswith("+") or filt.startswith("-")): + raise ValueError( + "Every filter in --filters must start with + or -" + " (%s does not)" % filt + ) + + def BackupFilters(self): + """Saves the current filter list to backup storage.""" + self._filters_backup = self.filters[:] + + def RestoreFilters(self): + """Restores filters previously backed up.""" + self.filters = self._filters_backup[:] + + def ResetErrorCounts(self): + """Sets the module's error statistic back to zero.""" + self.error_count = 0 + self.errors_by_category = {} + + def IncrementErrorCount(self, category): + """Bumps the module's error statistic.""" + self.error_count += 1 + if self.counting in ("toplevel", "detailed"): + if self.counting != "detailed": + category = category.split("/")[0] + if category not in self.errors_by_category: + self.errors_by_category[category] = 0 + self.errors_by_category[category] += 1 + + def PrintErrorCounts(self): + """Print a summary of errors by category, and the total.""" + for category, count in list(self.errors_by_category.items()): + sys.stderr.write("Category '%s' errors found: %d\n" % (category, count)) + sys.stderr.write("Total errors found: %d\n" % self.error_count) - Raises: - ValueError: The comma-separated filters did not all start with '+' or '-'. - E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" - """ - # Default filters always have less priority than the flag ones. - self.filters = _DEFAULT_FILTERS[:] - self.AddFilters(filters) - - def AddFilters(self, filters): - """ Adds more filters to the existing list of error-message filters. """ - for filt in filters.split(','): - clean_filt = filt.strip() - if clean_filt: - self.filters.append(clean_filt) - for filt in self.filters: - if not (filt.startswith('+') or filt.startswith('-')): - raise ValueError('Every filter in --filters must start with + or -' - ' (%s does not)' % filt) - - def BackupFilters(self): - """ Saves the current filter list to backup storage.""" - self._filters_backup = self.filters[:] - - def RestoreFilters(self): - """ Restores filters previously backed up.""" - self.filters = self._filters_backup[:] - - def ResetErrorCounts(self): - """Sets the module's error statistic back to zero.""" - self.error_count = 0 - self.errors_by_category = {} - - def IncrementErrorCount(self, category): - """Bumps the module's error statistic.""" - self.error_count += 1 - if self.counting in ('toplevel', 'detailed'): - if self.counting != 'detailed': - category = category.split('/')[0] - if category not in self.errors_by_category: - self.errors_by_category[category] = 0 - self.errors_by_category[category] += 1 - - def PrintErrorCounts(self): - """Print a summary of errors by category, and the total.""" - for category, count in self.errors_by_category.items(): - sys.stderr.write('Category \'%s\' errors found: %d\n' % - (category, count)) - sys.stderr.write('Total errors found: %d\n' % self.error_count) _cpplint_state = _CppLintState() def _OutputFormat(): - """Gets the module's output format.""" - return _cpplint_state.output_format + """Gets the module's output format.""" + return _cpplint_state.output_format def _SetOutputFormat(output_format): - """Sets the module's output format.""" - _cpplint_state.SetOutputFormat(output_format) + """Sets the module's output format.""" + _cpplint_state.SetOutputFormat(output_format) def _VerboseLevel(): - """Returns the module's verbosity setting.""" - return _cpplint_state.verbose_level + """Returns the module's verbosity setting.""" + return _cpplint_state.verbose_level def _SetVerboseLevel(level): - """Sets the module's verbosity, and returns the previous setting.""" - return _cpplint_state.SetVerboseLevel(level) + """Sets the module's verbosity, and returns the previous setting.""" + return _cpplint_state.SetVerboseLevel(level) def _SetCountingStyle(level): - """Sets the module's counting options.""" - _cpplint_state.SetCountingStyle(level) + """Sets the module's counting options.""" + _cpplint_state.SetCountingStyle(level) def _Filters(): - """Returns the module's list of output filters, as a list.""" - return _cpplint_state.filters + """Returns the module's list of output filters, as a list.""" + return _cpplint_state.filters def _SetFilters(filters): - """Sets the module's error-message filters. + """Sets the module's error-message filters. - These filters are applied when deciding whether to emit a given - error message. - - Args: - filters: A string of comma-separated filters (eg "whitespace/indent"). - Each filter should start with + or -; else we die. - """ - _cpplint_state.SetFilters(filters) - -def _AddFilters(filters): - """Adds more filter overrides. - - Unlike _SetFilters, this function does not reset the current list of filters - available. - - Args: - filters: A string of comma-separated filters (eg "whitespace/indent"). - Each filter should start with + or -; else we die. - """ - _cpplint_state.AddFilters(filters) - -def _BackupFilters(): - """ Saves the current filter list to backup storage.""" - _cpplint_state.BackupFilters() - -def _RestoreFilters(): - """ Restores filters previously backed up.""" - _cpplint_state.RestoreFilters() - -class _FunctionState(object): - """Tracks current function name and the number of lines in its body.""" - - _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. - _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. - - def __init__(self): - self.in_a_function = False - self.lines_in_function = 0 - self.current_function = '' - - def Begin(self, function_name): - """Start analyzing function body. + These filters are applied when deciding whether to emit a given + error message. Args: - function_name: The name of the function being tracked. + filters: A string of comma-separated filters (eg "whitespace/indent"). + Each filter should start with + or -; else we die. """ - self.in_a_function = True - self.lines_in_function = 0 - self.current_function = function_name + _cpplint_state.SetFilters(filters) + - def Count(self): - """Count line in current function body.""" - if self.in_a_function: - self.lines_in_function += 1 +def _AddFilters(filters): + """Adds more filter overrides. - def Check(self, error, filename, linenum): - """Report if too many lines in function body. + Unlike _SetFilters, this function does not reset the current list of filters + available. Args: - error: The function to call with any errors found. - filename: The name of the current file. - linenum: The number of the line to check. + filters: A string of comma-separated filters (eg "whitespace/indent"). + Each filter should start with + or -; else we die. """ - if Match(r'T(EST|est)', self.current_function): - base_trigger = self._TEST_TRIGGER - else: - base_trigger = self._NORMAL_TRIGGER - trigger = base_trigger * 2**_VerboseLevel() + _cpplint_state.AddFilters(filters) - if self.lines_in_function > trigger: - error_level = int(math.log(self.lines_in_function / base_trigger, 2)) - # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... - if error_level > 5: - error_level = 5 - error(filename, linenum, 'readability/fn_size', error_level, - 'Small and focused functions are preferred:' - ' %s has %d non-comment lines' - ' (error triggered by exceeding %d lines).' % ( - self.current_function, self.lines_in_function, trigger)) - def End(self): - """Stop analyzing function body.""" - self.in_a_function = False +def _BackupFilters(): + """Saves the current filter list to backup storage.""" + _cpplint_state.BackupFilters() -class _IncludeError(Exception): - """Indicates a problem with the include order in a file.""" - pass +def _RestoreFilters(): + """Restores filters previously backed up.""" + _cpplint_state.RestoreFilters() -class FileInfo(object): - """Provides utility functions for filenames. +class _FunctionState(object): + """Tracks current function name and the number of lines in its body.""" + + _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. + _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. + + def __init__(self): + self.in_a_function = False + self.lines_in_function = 0 + self.current_function = "" + + def Begin(self, function_name): + """Start analyzing function body. + + Args: + function_name: The name of the function being tracked. + """ + self.in_a_function = True + self.lines_in_function = 0 + self.current_function = function_name + + def Count(self): + """Count line in current function body.""" + if self.in_a_function: + self.lines_in_function += 1 + + def Check(self, error, filename, linenum): + """Report if too many lines in function body. + + Args: + error: The function to call with any errors found. + filename: The name of the current file. + linenum: The number of the line to check. + """ + if Match(r"T(EST|est)", self.current_function): + base_trigger = self._TEST_TRIGGER + else: + base_trigger = self._NORMAL_TRIGGER + trigger = base_trigger * 2 ** _VerboseLevel() + + if self.lines_in_function > trigger: + error_level = int(math.log(self.lines_in_function / base_trigger, 2)) + # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... + if error_level > 5: + error_level = 5 + error( + filename, + linenum, + "readability/fn_size", + error_level, + "Small and focused functions are preferred:" + " %s has %d non-comment lines" + " (error triggered by exceeding %d lines)." + % (self.current_function, self.lines_in_function, trigger), + ) + + def End(self): + """Stop analyzing function body.""" + self.in_a_function = False - FileInfo provides easy access to the components of a file's path - relative to the project root. - """ - def __init__(self, filename): - self._filename = filename +class _IncludeError(Exception): + """Indicates a problem with the include order in a file.""" - def FullName(self): - """Make Windows paths like Unix.""" - return os.path.abspath(self._filename).replace('\\', '/') + pass - def RepositoryName(self): - """FullName after removing the local path to the repository. - If we have a real absolute path name here we can try to do something smart: - detecting the root of the checkout and truncating /path/to/checkout from - the name so that we get header guards that don't include things like - "C:\Documents and Settings\..." or "/home/username/..." in them and thus - people on different computers who have checked the source out to different - locations won't see bogus errors. - """ - fullname = self.FullName() - - if os.path.exists(fullname): - project_dir = os.path.dirname(fullname) - - if os.path.exists(os.path.join(project_dir, ".svn")): - # If there's a .svn file in the current directory, we recursively look - # up the directory tree for the top of the SVN checkout - root_dir = project_dir - one_up_dir = os.path.dirname(root_dir) - while os.path.exists(os.path.join(one_up_dir, ".svn")): - root_dir = os.path.dirname(root_dir) - one_up_dir = os.path.dirname(one_up_dir) - - prefix = os.path.commonprefix([root_dir, project_dir]) - return fullname[len(prefix) + 1:] - - # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by - # searching up from the current path. - root_dir = os.path.dirname(fullname) - while (root_dir != os.path.dirname(root_dir) and - not os.path.exists(os.path.join(root_dir, ".git")) and - not os.path.exists(os.path.join(root_dir, ".hg")) and - not os.path.exists(os.path.join(root_dir, ".svn"))): - root_dir = os.path.dirname(root_dir) - - if (os.path.exists(os.path.join(root_dir, ".git")) or - os.path.exists(os.path.join(root_dir, ".hg")) or - os.path.exists(os.path.join(root_dir, ".svn"))): - prefix = os.path.commonprefix([root_dir, project_dir]) - return fullname[len(prefix) + 1:] - - # Don't know what to do; header guard warnings may be wrong... - return fullname - - def Split(self): - """Splits the file into the directory, basename, and extension. - - For 'chrome/browser/browser.cc', Split() would - return ('chrome/browser', 'browser', '.cc') +class FileInfo(object): + """Provides utility functions for filenames. - Returns: - A tuple of (directory, basename, extension). + FileInfo provides easy access to the components of a file's path + relative to the project root. """ - googlename = self.RepositoryName() - project, rest = os.path.split(googlename) - return (project,) + os.path.splitext(rest) - - def BaseName(self): - """File base name - text after the final slash, before the final period.""" - return self.Split()[1] + def __init__(self, filename): + self._filename = filename + + def FullName(self): + """Make Windows paths like Unix.""" + return os.path.abspath(self._filename).replace("\\", "/") + + def RepositoryName(self): + """FullName after removing the local path to the repository. + + If we have a real absolute path name here we can try to do something smart: + detecting the root of the checkout and truncating /path/to/checkout from + the name so that we get header guards that don't include things like + "C:\Documents and Settings\..." or "/home/username/..." in them and thus + people on different computers who have checked the source out to different + locations won't see bogus errors. + """ + fullname = self.FullName() + + if os.path.exists(fullname): + project_dir = os.path.dirname(fullname) + + if os.path.exists(os.path.join(project_dir, ".svn")): + # If there's a .svn file in the current directory, we recursively look + # up the directory tree for the top of the SVN checkout + root_dir = project_dir + one_up_dir = os.path.dirname(root_dir) + while os.path.exists(os.path.join(one_up_dir, ".svn")): + root_dir = os.path.dirname(root_dir) + one_up_dir = os.path.dirname(one_up_dir) + + prefix = os.path.commonprefix([root_dir, project_dir]) + return fullname[len(prefix) + 1 :] + + # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by + # searching up from the current path. + root_dir = os.path.dirname(fullname) + while ( + root_dir != os.path.dirname(root_dir) + and not os.path.exists(os.path.join(root_dir, ".git")) + and not os.path.exists(os.path.join(root_dir, ".hg")) + and not os.path.exists(os.path.join(root_dir, ".svn")) + ): + root_dir = os.path.dirname(root_dir) + + if ( + os.path.exists(os.path.join(root_dir, ".git")) + or os.path.exists(os.path.join(root_dir, ".hg")) + or os.path.exists(os.path.join(root_dir, ".svn")) + ): + prefix = os.path.commonprefix([root_dir, project_dir]) + return fullname[len(prefix) + 1 :] + + # Don't know what to do; header guard warnings may be wrong... + return fullname + + def Split(self): + """Splits the file into the directory, basename, and extension. + + For 'chrome/browser/browser.cc', Split() would + return ('chrome/browser', 'browser', '.cc') + + Returns: + A tuple of (directory, basename, extension). + """ + + googlename = self.RepositoryName() + project, rest = os.path.split(googlename) + return (project,) + os.path.splitext(rest) + + def BaseName(self): + """File base name - text after the final slash, before the final period.""" + return self.Split()[1] + + def Extension(self): + """File extension - text following the final period.""" + return self.Split()[2] + + def NoExtension(self): + """File has no source file extension.""" + return "/".join(self.Split()[0:2]) + + def IsSource(self): + """File has a source file extension.""" + return self.Extension()[1:] in ("c", "cc", "cpp", "cxx") - def Extension(self): - """File extension - text following the final period.""" - return self.Split()[2] - def NoExtension(self): - """File has no source file extension.""" - return '/'.join(self.Split()[0:2]) +def _ShouldPrintError(category, confidence, linenum): + """If confidence >= verbose, category passes filter and is not suppressed.""" - def IsSource(self): - """File has a source file extension.""" - return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx') + # There are three ways we might decide not to print an error message: + # a "NOLINT(category)" comment appears in the source, + # the verbosity level isn't high enough, or the filters filter it out. + if IsErrorSuppressedByNolint(category, linenum): + return False + if confidence < _cpplint_state.verbose_level: + return False -def _ShouldPrintError(category, confidence, linenum): - """If confidence >= verbose, category passes filter and is not suppressed.""" + is_filtered = False + for one_filter in _Filters(): + if one_filter.startswith("-"): + if category.startswith(one_filter[1:]): + is_filtered = True + elif one_filter.startswith("+"): + if category.startswith(one_filter[1:]): + is_filtered = False + else: + assert False # should have been checked for in SetFilter. + if is_filtered: + return False - # There are three ways we might decide not to print an error message: - # a "NOLINT(category)" comment appears in the source, - # the verbosity level isn't high enough, or the filters filter it out. - if IsErrorSuppressedByNolint(category, linenum): - return False + return True - if confidence < _cpplint_state.verbose_level: - return False - is_filtered = False - for one_filter in _Filters(): - if one_filter.startswith('-'): - if category.startswith(one_filter[1:]): - is_filtered = True - elif one_filter.startswith('+'): - if category.startswith(one_filter[1:]): - is_filtered = False - else: - assert False # should have been checked for in SetFilter. - if is_filtered: - return False +def Error(filename, linenum, category, confidence, message): + """Logs the fact we've found a lint error. - return True + We log where the error was found, and also our confidence in the error, + that is, how certain we are this is a legitimate style regression, and + not a misidentification or a use that's sometimes justified. + False positives can be suppressed by the use of + "cpplint(category)" comments on the offending line. These are + parsed into _error_suppressions. -def Error(filename, linenum, category, confidence, message): - """Logs the fact we've found a lint error. - - We log where the error was found, and also our confidence in the error, - that is, how certain we are this is a legitimate style regression, and - not a misidentification or a use that's sometimes justified. - - False positives can be suppressed by the use of - "cpplint(category)" comments on the offending line. These are - parsed into _error_suppressions. - - Args: - filename: The name of the file containing the error. - linenum: The number of the line containing the error. - category: A string used to describe the "category" this bug - falls under: "whitespace", say, or "runtime". Categories - may have a hierarchy separated by slashes: "whitespace/indent". - confidence: A number from 1-5 representing a confidence score for - the error, with 5 meaning that we are certain of the problem, - and 1 meaning that it could be a legitimate construct. - message: The error message. - """ - if _ShouldPrintError(category, confidence, linenum): - _cpplint_state.IncrementErrorCount(category) - if print_stdout: - if _cpplint_state.output_format == 'vs7': - sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( - filename, linenum, message, category, confidence)) - elif _cpplint_state.output_format == 'eclipse': - sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( - filename, linenum, message, category, confidence)) - else: - sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( - filename, linenum, message, category, confidence)) - else: - output.append((linenum, message)) + Args: + filename: The name of the file containing the error. + linenum: The number of the line containing the error. + category: A string used to describe the "category" this bug + falls under: "whitespace", say, or "runtime". Categories + may have a hierarchy separated by slashes: "whitespace/indent". + confidence: A number from 1-5 representing a confidence score for + the error, with 5 meaning that we are certain of the problem, + and 1 meaning that it could be a legitimate construct. + message: The error message. + """ + if _ShouldPrintError(category, confidence, linenum): + _cpplint_state.IncrementErrorCount(category) + if print_stdout: + if _cpplint_state.output_format == "vs7": + sys.stderr.write( + "%s(%s): %s [%s] [%d]\n" + % (filename, linenum, message, category, confidence) + ) + elif _cpplint_state.output_format == "eclipse": + sys.stderr.write( + "%s:%s: warning: %s [%s] [%d]\n" + % (filename, linenum, message, category, confidence) + ) + else: + sys.stderr.write( + "%s:%s: %s [%s] [%d]\n" + % (filename, linenum, message, category, confidence) + ) + else: + output.append((linenum, message)) # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. -_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( - r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') +_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') # Match a single C style comment on the same line. -_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/' +_RE_PATTERN_C_COMMENTS = r"/\*(?:[^*]|\*(?!/))*\*/" # Matches multi-line C style comments. # This RE is a little bit more complicated than one might expect, because we # have to take care of space removals tools so we can handle comments inside @@ -1137,702 +1191,768 @@ def Error(filename, linenum, category, confidence, message): # if this doesn't work we try on left side but only if there's a non-character # on the right. _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( - r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' + - _RE_PATTERN_C_COMMENTS + r'\s+|' + - r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' + - _RE_PATTERN_C_COMMENTS + r')') + r"(\s*" + + _RE_PATTERN_C_COMMENTS + + r"\s*$|" + + _RE_PATTERN_C_COMMENTS + + r"\s+|" + + r"\s+" + + _RE_PATTERN_C_COMMENTS + + r"(?=\W)|" + + _RE_PATTERN_C_COMMENTS + + r")" +) def IsCppString(line): - """Does line terminate so, that the next symbol is in string constant. + """Does line terminate so, that the next symbol is in string constant. - This function does not consider single-line nor multi-line comments. + This function does not consider single-line nor multi-line comments. - Args: - line: is a partial line of code starting from the 0..n. + Args: + line: is a partial line of code starting from the 0..n. - Returns: - True, if next character appended to 'line' is inside a - string constant. - """ + Returns: + True, if next character appended to 'line' is inside a + string constant. + """ - line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" - return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 + line = line.replace(r"\\", "XX") # after this, \\" does not match to \" + return ((line.count('"') - line.count(r"\"") - line.count("'\"'")) & 1) == 1 def CleanseRawStrings(raw_lines): - """Removes C++11 raw strings from lines. - - Before: - static const char kData[] = R"( - multi-line string - )"; - - After: - static const char kData[] = "" - (replaced by blank line) - ""; - - Args: - raw_lines: list of raw lines. - - Returns: - list of lines with C++11 raw strings replaced by empty strings. - """ - - delimiter = None - lines_without_raw_strings = [] - for line in raw_lines: - if delimiter: - # Inside a raw string, look for the end - end = line.find(delimiter) - if end >= 0: - # Found the end of the string, match leading space for this - # line and resume copying the original lines, and also insert - # a "" on the last line. - leading_space = Match(r'^(\s*)\S', line) - line = leading_space.group(1) + '""' + line[end + len(delimiter):] - delimiter = None - else: - # Haven't found the end yet, append a blank line. - line = '""' - - # Look for beginning of a raw string, and replace them with - # empty strings. This is done in a loop to handle multiple raw - # strings on the same line. - while delimiter is None: - # Look for beginning of a raw string. - # See 2.14.15 [lex.string] for syntax. - matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) - if matched: - delimiter = ')' + matched.group(2) + '"' - - end = matched.group(3).find(delimiter) - if end >= 0: - # Raw string ended on same line - line = (matched.group(1) + '""' + - matched.group(3)[end + len(delimiter):]) - delimiter = None - else: - # Start of a multi-line raw string - line = matched.group(1) + '""' - else: - break + """Removes C++11 raw strings from lines. + + Before: + static const char kData[] = R"( + multi-line string + )"; + + After: + static const char kData[] = "" + (replaced by blank line) + ""; + + Args: + raw_lines: list of raw lines. - lines_without_raw_strings.append(line) + Returns: + list of lines with C++11 raw strings replaced by empty strings. + """ - # TODO(unknown): if delimiter is not None here, we might want to - # emit a warning for unterminated string. - return lines_without_raw_strings + delimiter = None + lines_without_raw_strings = [] + for line in raw_lines: + if delimiter: + # Inside a raw string, look for the end + end = line.find(delimiter) + if end >= 0: + # Found the end of the string, match leading space for this + # line and resume copying the original lines, and also insert + # a "" on the last line. + leading_space = Match(r"^(\s*)\S", line) + line = leading_space.group(1) + '""' + line[end + len(delimiter) :] + delimiter = None + else: + # Haven't found the end yet, append a blank line. + line = '""' + + # Look for beginning of a raw string, and replace them with + # empty strings. This is done in a loop to handle multiple raw + # strings on the same line. + while delimiter is None: + # Look for beginning of a raw string. + # See 2.14.15 [lex.string] for syntax. + matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) + if matched: + delimiter = ")" + matched.group(2) + '"' + + end = matched.group(3).find(delimiter) + if end >= 0: + # Raw string ended on same line + line = ( + matched.group(1) + + '""' + + matched.group(3)[end + len(delimiter) :] + ) + delimiter = None + else: + # Start of a multi-line raw string + line = matched.group(1) + '""' + else: + break + + lines_without_raw_strings.append(line) + + # TODO(unknown): if delimiter is not None here, we might want to + # emit a warning for unterminated string. + return lines_without_raw_strings def FindNextMultiLineCommentStart(lines, lineix): - """Find the beginning marker for a multiline comment.""" - while lineix < len(lines): - if lines[lineix].strip().startswith('/*'): - # Only return this marker if the comment goes beyond this line - if lines[lineix].strip().find('*/', 2) < 0: - return lineix - lineix += 1 - return len(lines) + """Find the beginning marker for a multiline comment.""" + while lineix < len(lines): + if lines[lineix].strip().startswith("/*"): + # Only return this marker if the comment goes beyond this line + if lines[lineix].strip().find("*/", 2) < 0: + return lineix + lineix += 1 + return len(lines) def FindNextMultiLineCommentEnd(lines, lineix): - """We are inside a comment, find the end marker.""" - while lineix < len(lines): - if lines[lineix].strip().endswith('*/'): - return lineix - lineix += 1 - return len(lines) + """We are inside a comment, find the end marker.""" + while lineix < len(lines): + if lines[lineix].strip().endswith("*/"): + return lineix + lineix += 1 + return len(lines) def RemoveMultiLineCommentsFromRange(lines, begin, end): - """Clears a range of lines for multi-line comments.""" - # Having // dummy comments makes the lines non-empty, so we will not get - # unnecessary blank line warnings later in the code. - for i in range(begin, end): - lines[i] = '// dummy' + """Clears a range of lines for multi-line comments.""" + # Having // dummy comments makes the lines non-empty, so we will not get + # unnecessary blank line warnings later in the code. + for i in range(begin, end): + lines[i] = "// dummy" def RemoveMultiLineComments(filename, lines, error): - """Removes multiline (c-style) comments from lines.""" - lineix = 0 - while lineix < len(lines): - lineix_begin = FindNextMultiLineCommentStart(lines, lineix) - if lineix_begin >= len(lines): - return - lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) - if lineix_end >= len(lines): - error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, - 'Could not find end of multi-line comment') - return - RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) - lineix = lineix_end + 1 + """Removes multiline (c-style) comments from lines.""" + lineix = 0 + while lineix < len(lines): + lineix_begin = FindNextMultiLineCommentStart(lines, lineix) + if lineix_begin >= len(lines): + return + lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) + if lineix_end >= len(lines): + error( + filename, + lineix_begin + 1, + "readability/multiline_comment", + 5, + "Could not find end of multi-line comment", + ) + return + RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) + lineix = lineix_end + 1 def CleanseComments(line): - """Removes //-comments and single-line C-style /* */ comments. + """Removes //-comments and single-line C-style /* */ comments. - Args: - line: A line of C++ source. + Args: + line: A line of C++ source. - Returns: - The line with single-line comments removed. - """ - commentpos = line.find('//') - if commentpos != -1 and not IsCppString(line[:commentpos]): - line = line[:commentpos].rstrip() - # get rid of /* ... */ - return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) + Returns: + The line with single-line comments removed. + """ + commentpos = line.find("//") + if commentpos != -1 and not IsCppString(line[:commentpos]): + line = line[:commentpos].rstrip() + # get rid of /* ... */ + return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub("", line) class CleansedLines(object): - """Holds 3 copies of all lines with different preprocessing applied to them. - - 1) elided member contains lines without strings and comments, - 2) lines member contains lines without comments, and - 3) raw_lines member contains all the lines without processing. - All these three members are of , and of the same length. - """ - - def __init__(self, lines): - self.elided = [] - self.lines = [] - self.raw_lines = lines - self.num_lines = len(lines) - self.lines_without_raw_strings = CleanseRawStrings(lines) - for linenum in range(len(self.lines_without_raw_strings)): - self.lines.append(CleanseComments( - self.lines_without_raw_strings[linenum])) - elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) - self.elided.append(CleanseComments(elided)) - - def NumLines(self): - """Returns the number of lines represented.""" - return self.num_lines - - @staticmethod - def _CollapseStrings(elided): - """Collapses strings and chars on a line to simple "" or '' blocks. - - We nix strings first so we're not fooled by text like '"http://"' + """Holds 3 copies of all lines with different preprocessing applied to them. + + 1) elided member contains lines without strings and comments, + 2) lines member contains lines without comments, and + 3) raw_lines member contains all the lines without processing. + All these three members are of , and of the same length. + """ + + def __init__(self, lines): + self.elided = [] + self.lines = [] + self.raw_lines = lines + self.num_lines = len(lines) + self.lines_without_raw_strings = CleanseRawStrings(lines) + for linenum in range(len(self.lines_without_raw_strings)): + self.lines.append(CleanseComments(self.lines_without_raw_strings[linenum])) + elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) + self.elided.append(CleanseComments(elided)) + + def NumLines(self): + """Returns the number of lines represented.""" + return self.num_lines + + @staticmethod + def _CollapseStrings(elided): + """Collapses strings and chars on a line to simple "" or '' blocks. + + We nix strings first so we're not fooled by text like '"http://"' + + Args: + elided: The line being processed. + + Returns: + The line with collapsed strings. + """ + if _RE_PATTERN_INCLUDE.match(elided): + return elided + + # Remove escaped characters first to make quote/single quote collapsing + # basic. Things that look like escaped characters shouldn't occur + # outside of strings and chars. + elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub("", elided) + + # Replace quoted strings and digit separators. Both single quotes + # and double quotes are processed in the same loop, otherwise + # nested quotes wouldn't work. + collapsed = "" + while True: + # Find the first quote character + match = Match(r'^([^\'"]*)([\'"])(.*)$', elided) + if not match: + collapsed += elided + break + head, quote, tail = match.groups() + + if quote == '"': + # Collapse double quoted strings + second_quote = tail.find('"') + if second_quote >= 0: + collapsed += head + '""' + elided = tail[second_quote + 1 :] + else: + # Unmatched double quote, don't bother processing the rest + # of the line since this is probably a multiline string. + collapsed += elided + break + else: + # Found single quote, check nearby text to eliminate digit separators. + # + # There is no special handling for floating point here, because + # the integer/fractional/exponent parts would all be parsed + # correctly as long as there are digits on both sides of the + # separator. So we are fine as long as we don't see something + # like "0.'3" (gcc 4.9.0 will not allow this literal). + if Search(r"\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$", head): + match_literal = Match(r"^((?:\'?[0-9a-zA-Z_])*)(.*)$", "'" + tail) + collapsed += head + match_literal.group(1).replace("'", "") + elided = match_literal.group(2) + else: + second_quote = tail.find("'") + if second_quote >= 0: + collapsed += head + "''" + elided = tail[second_quote + 1 :] + else: + # Unmatched single quote + collapsed += elided + break + + return collapsed + + +def FindEndOfExpressionInLine(line, startpos, stack): + """Find the position just after the end of current parenthesized expression. Args: - elided: The line being processed. + line: a CleansedLines line. + startpos: start searching at this position. + stack: nesting stack at startpos. Returns: - The line with collapsed strings. + On finding matching end: (index just after matching end, None) + On finding an unclosed expression: (-1, None) + Otherwise: (-1, new stack at end of this line) """ - if _RE_PATTERN_INCLUDE.match(elided): - return elided - - # Remove escaped characters first to make quote/single quote collapsing - # basic. Things that look like escaped characters shouldn't occur - # outside of strings and chars. - elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) - - # Replace quoted strings and digit separators. Both single quotes - # and double quotes are processed in the same loop, otherwise - # nested quotes wouldn't work. - collapsed = '' - while True: - # Find the first quote character - match = Match(r'^([^\'"]*)([\'"])(.*)$', elided) - if not match: - collapsed += elided - break - head, quote, tail = match.groups() - - if quote == '"': - # Collapse double quoted strings - second_quote = tail.find('"') - if second_quote >= 0: - collapsed += head + '""' - elided = tail[second_quote + 1:] - else: - # Unmatched double quote, don't bother processing the rest - # of the line since this is probably a multiline string. - collapsed += elided - break - else: - # Found single quote, check nearby text to eliminate digit separators. - # - # There is no special handling for floating point here, because - # the integer/fractional/exponent parts would all be parsed - # correctly as long as there are digits on both sides of the - # separator. So we are fine as long as we don't see something - # like "0.'3" (gcc 4.9.0 will not allow this literal). - if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): - match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail) - collapsed += head + match_literal.group(1).replace("'", '') - elided = match_literal.group(2) - else: - second_quote = tail.find('\'') - if second_quote >= 0: - collapsed += head + "''" - elided = tail[second_quote + 1:] - else: - # Unmatched single quote - collapsed += elided - break + for i in range(startpos, len(line)): + char = line[i] + if char in "([{": + # Found start of parenthesized expression, push to expression stack + stack.append(char) + elif char == "<": + # Found potential start of template argument list + if i > 0 and line[i - 1] == "<": + # Left shift operator + if stack and stack[-1] == "<": + stack.pop() + if not stack: + return (-1, None) + elif i > 0 and Search(r"\boperator\s*$", line[0:i]): + # operator<, don't add to stack + continue + else: + # Tentative start of template argument list + stack.append("<") + elif char in ")]}": + # Found end of parenthesized expression. + # + # If we are currently expecting a matching '>', the pending '<' + # must have been an operator. Remove them from expression stack. + while stack and stack[-1] == "<": + stack.pop() + if not stack: + return (-1, None) + if ( + (stack[-1] == "(" and char == ")") + or (stack[-1] == "[" and char == "]") + or (stack[-1] == "{" and char == "}") + ): + stack.pop() + if not stack: + return (i + 1, None) + else: + # Mismatched parentheses + return (-1, None) + elif char == ">": + # Found potential end of template argument list. + + # Ignore "->" and operator functions + if i > 0 and ( + line[i - 1] == "-" or Search(r"\boperator\s*$", line[0 : i - 1]) + ): + continue + + # Pop the stack if there is a matching '<'. Otherwise, ignore + # this '>' since it must be an operator. + if stack: + if stack[-1] == "<": + stack.pop() + if not stack: + return (i + 1, None) + elif char == ";": + # Found something that look like end of statements. If we are currently + # expecting a '>', the matching '<' must have been an operator, since + # template argument list should not contain statements. + while stack and stack[-1] == "<": + stack.pop() + if not stack: + return (-1, None) + + # Did not find end of expression or unbalanced parentheses on this line + return (-1, stack) - return collapsed +def CloseExpression(clean_lines, linenum, pos): + """If input points to ( or { or [ or <, finds the position that closes it. -def FindEndOfExpressionInLine(line, startpos, stack): - """Find the position just after the end of current parenthesized expression. - - Args: - line: a CleansedLines line. - startpos: start searching at this position. - stack: nesting stack at startpos. - - Returns: - On finding matching end: (index just after matching end, None) - On finding an unclosed expression: (-1, None) - Otherwise: (-1, new stack at end of this line) - """ - for i in range(startpos, len(line)): - char = line[i] - if char in '([{': - # Found start of parenthesized expression, push to expression stack - stack.append(char) - elif char == '<': - # Found potential start of template argument list - if i > 0 and line[i - 1] == '<': - # Left shift operator - if stack and stack[-1] == '<': - stack.pop() - if not stack: - return (-1, None) - elif i > 0 and Search(r'\boperator\s*$', line[0:i]): - # operator<, don't add to stack - continue - else: - # Tentative start of template argument list - stack.append('<') - elif char in ')]}': - # Found end of parenthesized expression. - # - # If we are currently expecting a matching '>', the pending '<' - # must have been an operator. Remove them from expression stack. - while stack and stack[-1] == '<': - stack.pop() - if not stack: - return (-1, None) - if ((stack[-1] == '(' and char == ')') or - (stack[-1] == '[' and char == ']') or - (stack[-1] == '{' and char == '}')): - stack.pop() - if not stack: - return (i + 1, None) - else: - # Mismatched parentheses - return (-1, None) - elif char == '>': - # Found potential end of template argument list. - - # Ignore "->" and operator functions - if (i > 0 and - (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))): - continue - - # Pop the stack if there is a matching '<'. Otherwise, ignore - # this '>' since it must be an operator. - if stack: - if stack[-1] == '<': - stack.pop() - if not stack: - return (i + 1, None) - elif char == ';': - # Found something that look like end of statements. If we are currently - # expecting a '>', the matching '<' must have been an operator, since - # template argument list should not contain statements. - while stack and stack[-1] == '<': - stack.pop() - if not stack: - return (-1, None) - - # Did not find end of expression or unbalanced parentheses on this line - return (-1, stack) + If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the + linenum/pos that correspond to the closing of the expression. + TODO(unknown): cpplint spends a fair bit of time matching parentheses. + Ideally we would want to index all opening and closing parentheses once + and have CloseExpression be just a simple lookup, but due to preprocessor + tricks, this is not so easy. -def CloseExpression(clean_lines, linenum, pos): - """If input points to ( or { or [ or <, finds the position that closes it. - - If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the - linenum/pos that correspond to the closing of the expression. - - TODO(unknown): cpplint spends a fair bit of time matching parentheses. - Ideally we would want to index all opening and closing parentheses once - and have CloseExpression be just a simple lookup, but due to preprocessor - tricks, this is not so easy. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - pos: A position on the line. - - Returns: - A tuple (line, linenum, pos) pointer *past* the closing brace, or - (line, len(lines), -1) if we never find a close. Note we ignore - strings and comments when matching; and the line we return is the - 'cleansed' line at linenum. - """ - - line = clean_lines.elided[linenum] - if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]): - return (line, clean_lines.NumLines(), -1) + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + pos: A position on the line. - # Check first line - (end_pos, stack) = FindEndOfExpressionInLine(line, pos, []) - if end_pos > -1: - return (line, linenum, end_pos) + Returns: + A tuple (line, linenum, pos) pointer *past* the closing brace, or + (line, len(lines), -1) if we never find a close. Note we ignore + strings and comments when matching; and the line we return is the + 'cleansed' line at linenum. + """ - # Continue scanning forward - while stack and linenum < clean_lines.NumLines() - 1: - linenum += 1 line = clean_lines.elided[linenum] - (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack) + if (line[pos] not in "({[<") or Match(r"<[<=]", line[pos:]): + return (line, clean_lines.NumLines(), -1) + + # Check first line + (end_pos, stack) = FindEndOfExpressionInLine(line, pos, []) if end_pos > -1: - return (line, linenum, end_pos) + return (line, linenum, end_pos) + + # Continue scanning forward + while stack and linenum < clean_lines.NumLines() - 1: + linenum += 1 + line = clean_lines.elided[linenum] + (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack) + if end_pos > -1: + return (line, linenum, end_pos) - # Did not find end of expression before end of file, give up - return (line, clean_lines.NumLines(), -1) + # Did not find end of expression before end of file, give up + return (line, clean_lines.NumLines(), -1) def FindStartOfExpressionInLine(line, endpos, stack): - """Find position at the matching start of current expression. - - This is almost the reverse of FindEndOfExpressionInLine, but note - that the input position and returned position differs by 1. - - Args: - line: a CleansedLines line. - endpos: start searching at this position. - stack: nesting stack at endpos. - - Returns: - On finding matching start: (index at matching start, None) - On finding an unclosed expression: (-1, None) - Otherwise: (-1, new stack at beginning of this line) - """ - i = endpos - while i >= 0: - char = line[i] - if char in ')]}': - # Found end of expression, push to expression stack - stack.append(char) - elif char == '>': - # Found potential end of template argument list. - # - # Ignore it if it's a "->" or ">=" or "operator>" - if (i > 0 and - (line[i - 1] == '-' or - Match(r'\s>=\s', line[i - 1:]) or - Search(r'\boperator\s*$', line[0:i]))): - i -= 1 - else: - stack.append('>') - elif char == '<': - # Found potential start of template argument list - if i > 0 and line[i - 1] == '<': - # Left shift operator + """Find position at the matching start of current expression. + + This is almost the reverse of FindEndOfExpressionInLine, but note + that the input position and returned position differs by 1. + + Args: + line: a CleansedLines line. + endpos: start searching at this position. + stack: nesting stack at endpos. + + Returns: + On finding matching start: (index at matching start, None) + On finding an unclosed expression: (-1, None) + Otherwise: (-1, new stack at beginning of this line) + """ + i = endpos + while i >= 0: + char = line[i] + if char in ")]}": + # Found end of expression, push to expression stack + stack.append(char) + elif char == ">": + # Found potential end of template argument list. + # + # Ignore it if it's a "->" or ">=" or "operator>" + if i > 0 and ( + line[i - 1] == "-" + or Match(r"\s>=\s", line[i - 1 :]) + or Search(r"\boperator\s*$", line[0:i]) + ): + i -= 1 + else: + stack.append(">") + elif char == "<": + # Found potential start of template argument list + if i > 0 and line[i - 1] == "<": + # Left shift operator + i -= 1 + else: + # If there is a matching '>', we can pop the expression stack. + # Otherwise, ignore this '<' since it must be an operator. + if stack and stack[-1] == ">": + stack.pop() + if not stack: + return (i, None) + elif char in "([{": + # Found start of expression. + # + # If there are any unmatched '>' on the stack, they must be + # operators. Remove those. + while stack and stack[-1] == ">": + stack.pop() + if not stack: + return (-1, None) + if ( + (char == "(" and stack[-1] == ")") + or (char == "[" and stack[-1] == "]") + or (char == "{" and stack[-1] == "}") + ): + stack.pop() + if not stack: + return (i, None) + else: + # Mismatched parentheses + return (-1, None) + elif char == ";": + # Found something that look like end of statements. If we are currently + # expecting a '<', the matching '>' must have been an operator, since + # template argument list should not contain statements. + while stack and stack[-1] == ">": + stack.pop() + if not stack: + return (-1, None) + i -= 1 - else: - # If there is a matching '>', we can pop the expression stack. - # Otherwise, ignore this '<' since it must be an operator. - if stack and stack[-1] == '>': - stack.pop() - if not stack: - return (i, None) - elif char in '([{': - # Found start of expression. - # - # If there are any unmatched '>' on the stack, they must be - # operators. Remove those. - while stack and stack[-1] == '>': - stack.pop() - if not stack: - return (-1, None) - if ((char == '(' and stack[-1] == ')') or - (char == '[' and stack[-1] == ']') or - (char == '{' and stack[-1] == '}')): - stack.pop() - if not stack: - return (i, None) - else: - # Mismatched parentheses - return (-1, None) - elif char == ';': - # Found something that look like end of statements. If we are currently - # expecting a '<', the matching '>' must have been an operator, since - # template argument list should not contain statements. - while stack and stack[-1] == '>': - stack.pop() - if not stack: - return (-1, None) - - i -= 1 - - return (-1, stack) + + return (-1, stack) def ReverseCloseExpression(clean_lines, linenum, pos): - """If input points to ) or } or ] or >, finds the position that opens it. - - If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the - linenum/pos that correspond to the opening of the expression. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - pos: A position on the line. - - Returns: - A tuple (line, linenum, pos) pointer *at* the opening brace, or - (line, 0, -1) if we never find the matching opening brace. Note - we ignore strings and comments when matching; and the line we - return is the 'cleansed' line at linenum. - """ - line = clean_lines.elided[linenum] - if line[pos] not in ')}]>': - return (line, 0, -1) + """If input points to ) or } or ] or >, finds the position that opens it. - # Check last line - (start_pos, stack) = FindStartOfExpressionInLine(line, pos, []) - if start_pos > -1: - return (line, linenum, start_pos) + If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the + linenum/pos that correspond to the opening of the expression. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + pos: A position on the line. - # Continue scanning backward - while stack and linenum > 0: - linenum -= 1 + Returns: + A tuple (line, linenum, pos) pointer *at* the opening brace, or + (line, 0, -1) if we never find the matching opening brace. Note + we ignore strings and comments when matching; and the line we + return is the 'cleansed' line at linenum. + """ line = clean_lines.elided[linenum] - (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack) + if line[pos] not in ")}]>": + return (line, 0, -1) + + # Check last line + (start_pos, stack) = FindStartOfExpressionInLine(line, pos, []) if start_pos > -1: - return (line, linenum, start_pos) + return (line, linenum, start_pos) + + # Continue scanning backward + while stack and linenum > 0: + linenum -= 1 + line = clean_lines.elided[linenum] + (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack) + if start_pos > -1: + return (line, linenum, start_pos) - # Did not find start of expression before beginning of file, give up - return (line, 0, -1) + # Did not find start of expression before beginning of file, give up + return (line, 0, -1) def CheckForCopyright(filename, lines, error): - """Logs an error if no Copyright message appears at the top of the file.""" + """Logs an error if no Copyright message appears at the top of the file.""" - # We'll say it should occur by line 10. Don't forget there's a - # dummy line at the front. - for line in range(1, min(len(lines), 11)): - if re.search(r'Copyright', lines[line], re.I): break - else: # means no copyright line was found - error(filename, 0, 'legal/copyright', 5, - 'No copyright message found. ' - 'You should have a line: "Copyright [year] "') + # We'll say it should occur by line 10. Don't forget there's a + # dummy line at the front. + for line in range(1, min(len(lines), 11)): + if re.search(r"Copyright", lines[line], re.I): + break + else: # means no copyright line was found + error( + filename, + 0, + "legal/copyright", + 5, + "No copyright message found. " + 'You should have a line: "Copyright [year] "', + ) def GetIndentLevel(line): - """Return the number of leading spaces in line. + """Return the number of leading spaces in line. - Args: - line: A string to check. + Args: + line: A string to check. - Returns: - An integer count of leading spaces, possibly zero. - """ - indent = Match(r'^( *)\S', line) - if indent: - return len(indent.group(1)) - else: - return 0 + Returns: + An integer count of leading spaces, possibly zero. + """ + indent = Match(r"^( *)\S", line) + if indent: + return len(indent.group(1)) + else: + return 0 def GetHeaderGuardCPPVariable(filename): - """Returns the CPP variable that should be used as a header guard. + """Returns the CPP variable that should be used as a header guard. - Args: - filename: The name of a C++ header file. + Args: + filename: The name of a C++ header file. - Returns: - The CPP variable that should be used as a header guard in the - named file. + Returns: + The CPP variable that should be used as a header guard in the + named file. - """ + """ - # Restores original filename in case that cpplint is invoked from Emacs's - # flymake. - filename = re.sub(r'_flymake\.h$', '.h', filename) - filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) + # Restores original filename in case that cpplint is invoked from Emacs's + # flymake. + filename = re.sub(r"_flymake\.h$", ".h", filename) + filename = re.sub(r"/\.flymake/([^/]*)$", r"/\1", filename) - fileinfo = FileInfo(filename) - file_path_from_root = fileinfo.RepositoryName() - if _root: - file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) - return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_' + fileinfo = FileInfo(filename) + file_path_from_root = fileinfo.RepositoryName() + if _root: + file_path_from_root = re.sub("^" + _root + os.sep, "", file_path_from_root) + return re.sub(r"[-./\s]", "_", file_path_from_root).upper() + "_" def CheckForHeaderGuard(filename, lines, error): - """Checks that the file contains a header guard. - - Logs an error if no #ifndef header guard is present. For other - headers, checks that the full pathname is used. - - Args: - filename: The name of the C++ header file. - lines: An array of strings, each representing a line of the file. - error: The function to call with any errors found. - """ - - # Don't check for header guards if there are error suppression - # comments somewhere in this file. - # - # Because this is silencing a warning for a nonexistent line, we - # only support the very specific NOLINT(build/header_guard) syntax, - # and not the general NOLINT or NOLINT(*) syntax. - for i in lines: - if Search(r'//\s*NOLINT\(build/header_guard\)', i): - return - - cppvar = GetHeaderGuardCPPVariable(filename) - - ifndef = None - ifndef_linenum = 0 - define = None - endif = None - endif_linenum = 0 - for linenum, line in enumerate(lines): - linesplit = line.split() - if len(linesplit) >= 2: - # find the first occurrence of #ifndef and #define, save arg - if not ifndef and linesplit[0] == '#ifndef': - # set ifndef to the header guard presented on the #ifndef line. - ifndef = linesplit[1] - ifndef_linenum = linenum - if not define and linesplit[0] == '#define': - define = linesplit[1] - # find the last occurrence of #endif, save entire line - if line.startswith('#endif'): - endif = line - endif_linenum = linenum - - if not ifndef: - error(filename, 0, 'build/header_guard', 5, - 'No #ifndef header guard found, suggested CPP variable is: %s' % - cppvar) - return - - if not define: - error(filename, 0, 'build/header_guard', 5, - 'No #define header guard found, suggested CPP variable is: %s' % - cppvar) - return - - # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ - # for backward compatibility. - if ifndef != cppvar: - error_level = 0 - if ifndef != cppvar + '_': - error_level = 5 - - ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum, - error) - error(filename, ifndef_linenum, 'build/header_guard', error_level, - '#ifndef header guard has wrong style, please use: %s' % cppvar) - - if define != ifndef: - error(filename, 0, 'build/header_guard', 5, - '#ifndef and #define don\'t match, suggested CPP variable is: %s' % - cppvar) - return - - if endif != ('#endif // %s' % cppvar): - error_level = 0 - if endif != ('#endif // %s' % (cppvar + '_')): - error_level = 5 - - ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum, - error) - error(filename, endif_linenum, 'build/header_guard', error_level, - '#endif line should be "#endif // %s"' % cppvar) + """Checks that the file contains a header guard. + + Logs an error if no #ifndef header guard is present. For other + headers, checks that the full pathname is used. + + Args: + filename: The name of the C++ header file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + + # Don't check for header guards if there are error suppression + # comments somewhere in this file. + # + # Because this is silencing a warning for a nonexistent line, we + # only support the very specific NOLINT(build/header_guard) syntax, + # and not the general NOLINT or NOLINT(*) syntax. + for i in lines: + if Search(r"//\s*NOLINT\(build/header_guard\)", i): + return + + cppvar = GetHeaderGuardCPPVariable(filename) + + ifndef = None + ifndef_linenum = 0 + define = None + endif = None + endif_linenum = 0 + for linenum, line in enumerate(lines): + linesplit = line.split() + if len(linesplit) >= 2: + # find the first occurrence of #ifndef and #define, save arg + if not ifndef and linesplit[0] == "#ifndef": + # set ifndef to the header guard presented on the #ifndef line. + ifndef = linesplit[1] + ifndef_linenum = linenum + if not define and linesplit[0] == "#define": + define = linesplit[1] + # find the last occurrence of #endif, save entire line + if line.startswith("#endif"): + endif = line + endif_linenum = linenum + + if not ifndef: + error( + filename, + 0, + "build/header_guard", + 5, + "No #ifndef header guard found, suggested CPP variable is: %s" % cppvar, + ) + return + + if not define: + error( + filename, + 0, + "build/header_guard", + 5, + "No #define header guard found, suggested CPP variable is: %s" % cppvar, + ) + return + + # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ + # for backward compatibility. + if ifndef != cppvar: + error_level = 0 + if ifndef != cppvar + "_": + error_level = 5 + + ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum, error) + error( + filename, + ifndef_linenum, + "build/header_guard", + error_level, + "#ifndef header guard has wrong style, please use: %s" % cppvar, + ) + + if define != ifndef: + error( + filename, + 0, + "build/header_guard", + 5, + "#ifndef and #define don't match, suggested CPP variable is: %s" % cppvar, + ) + return + + if endif != ("#endif // %s" % cppvar): + error_level = 0 + if endif != ("#endif // %s" % (cppvar + "_")): + error_level = 5 + + ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum, error) + error( + filename, + endif_linenum, + "build/header_guard", + error_level, + '#endif line should be "#endif // %s"' % cppvar, + ) def CheckForBadCharacters(filename, lines, error): - """Logs an error for each line containing bad characters. + """Logs an error for each line containing bad characters. - Two kinds of bad characters: + Two kinds of bad characters: - 1. Unicode replacement characters: These indicate that either the file - contained invalid UTF-8 (likely) or Unicode replacement characters (which - it shouldn't). Note that it's possible for this to throw off line - numbering if the invalid UTF-8 occurred adjacent to a newline. + 1. Unicode replacement characters: These indicate that either the file + contained invalid UTF-8 (likely) or Unicode replacement characters (which + it shouldn't). Note that it's possible for this to throw off line + numbering if the invalid UTF-8 occurred adjacent to a newline. - 2. NUL bytes. These are problematic for some tools. + 2. NUL bytes. These are problematic for some tools. - Args: - filename: The name of the current file. - lines: An array of strings, each representing a line of the file. - error: The function to call with any errors found. - """ - for linenum, line in enumerate(lines): - if '\ufffd' in line: - error(filename, linenum, 'readability/utf8', 5, - 'Line contains invalid UTF-8 (or Unicode replacement character).') - if '\0' in line: - error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') + Args: + filename: The name of the current file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + for linenum, line in enumerate(lines): + if "\\ufffd" in line: + error( + filename, + linenum, + "readability/utf8", + 5, + "Line contains invalid UTF-8 (or Unicode replacement character).", + ) + if "\0" in line: + error(filename, linenum, "readability/nul", 5, "Line contains NUL byte.") def CheckForNewlineAtEOF(filename, lines, error): - """Logs an error if there is no newline char at the end of the file. + """Logs an error if there is no newline char at the end of the file. - Args: - filename: The name of the current file. - lines: An array of strings, each representing a line of the file. - error: The function to call with any errors found. - """ + Args: + filename: The name of the current file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ - # The array lines() was created by adding two newlines to the - # original file (go figure), then splitting on \n. - # To verify that the file ends in \n, we just have to make sure the - # last-but-two element of lines() exists and is empty. - if len(lines) < 3 or lines[-2]: - error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, - 'Could not find a newline character at the end of the file.') + # The array lines() was created by adding two newlines to the + # original file (go figure), then splitting on \n. + # To verify that the file ends in \n, we just have to make sure the + # last-but-two element of lines() exists and is empty. + if len(lines) < 3 or lines[-2]: + error( + filename, + len(lines) - 2, + "whitespace/ending_newline", + 5, + "Could not find a newline character at the end of the file.", + ) def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): - """Logs an error if we see /* ... */ or "..." that extend past one line. - - /* ... */ comments are legit inside macros, for one line. - Otherwise, we prefer // comments, so it's ok to warn about the - other. Likewise, it's ok for strings to extend across multiple - lines, as long as a line continuation character (backslash) - terminates each line. Although not currently prohibited by the C++ - style guide, it's ugly and unnecessary. We don't do well with either - in this lint program, so we warn about both. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Remove all \\ (escaped backslashes) from the line. They are OK, and the - # second (escaped) slash may trigger later \" detection erroneously. - line = line.replace('\\\\', '') - - if line.count('/*') > line.count('*/'): - error(filename, linenum, 'readability/multiline_comment', 5, - 'Complex multi-line /*...*/-style comment found. ' - 'Lint may give bogus warnings. ' - 'Consider replacing these with //-style comments, ' - 'with #if 0...#endif, ' - 'or with more clearly structured multi-line comments.') - - if (line.count('"') - line.count('\\"')) % 2: - error(filename, linenum, 'readability/multiline_string', 5, - 'Multi-line string ("...") found. This lint script doesn\'t ' - 'do well with such strings, and may give bogus warnings. ' - 'Use C++11 raw strings or concatenation instead.') + """Logs an error if we see /* ... */ or "..." that extend past one line. + + /* ... */ comments are legit inside macros, for one line. + Otherwise, we prefer // comments, so it's ok to warn about the + other. Likewise, it's ok for strings to extend across multiple + lines, as long as a line continuation character (backslash) + terminates each line. Although not currently prohibited by the C++ + style guide, it's ugly and unnecessary. We don't do well with either + in this lint program, so we warn about both. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + + # Remove all \\ (escaped backslashes) from the line. They are OK, and the + # second (escaped) slash may trigger later \" detection erroneously. + line = line.replace("\\\\", "") + + if line.count("/*") > line.count("*/"): + error( + filename, + linenum, + "readability/multiline_comment", + 5, + "Complex multi-line /*...*/-style comment found. " + "Lint may give bogus warnings. " + "Consider replacing these with //-style comments, " + "with #if 0...#endif, " + "or with more clearly structured multi-line comments.", + ) + + if (line.count('"') - line.count('\\"')) % 2: + error( + filename, + linenum, + "readability/multiline_string", + 5, + 'Multi-line string ("...") found. This lint script doesn\'t ' + "do well with such strings, and may give bogus warnings. " + "Use C++11 raw strings or concatenation instead.", + ) # (non-threadsafe name, thread-safe alternative, validation pattern) @@ -1847,4299 +1967,5097 @@ def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): # in some expression context on the same line by matching on some # operator before the function name. This eliminates constructors and # member function calls. -_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)' +_UNSAFE_FUNC_PREFIX = r"(?:[-+*/=%^&|(<]\s*|>\s+)" _THREADING_LIST = ( - ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'), - ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'), - ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'), - ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'), - ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'), - ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'), - ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'), - ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'), - ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'), - ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'), - ('strtok(', 'strtok_r(', - _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'), - ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'), - ) + ("asctime(", "asctime_r(", _UNSAFE_FUNC_PREFIX + r"asctime\([^)]+\)"), + ("ctime(", "ctime_r(", _UNSAFE_FUNC_PREFIX + r"ctime\([^)]+\)"), + ("getgrgid(", "getgrgid_r(", _UNSAFE_FUNC_PREFIX + r"getgrgid\([^)]+\)"), + ("getgrnam(", "getgrnam_r(", _UNSAFE_FUNC_PREFIX + r"getgrnam\([^)]+\)"), + ("getlogin(", "getlogin_r(", _UNSAFE_FUNC_PREFIX + r"getlogin\(\)"), + ("getpwnam(", "getpwnam_r(", _UNSAFE_FUNC_PREFIX + r"getpwnam\([^)]+\)"), + ("getpwuid(", "getpwuid_r(", _UNSAFE_FUNC_PREFIX + r"getpwuid\([^)]+\)"), + ("gmtime(", "gmtime_r(", _UNSAFE_FUNC_PREFIX + r"gmtime\([^)]+\)"), + ("localtime(", "localtime_r(", _UNSAFE_FUNC_PREFIX + r"localtime\([^)]+\)"), + ("rand(", "rand_r(", _UNSAFE_FUNC_PREFIX + r"rand\(\)"), + ("strtok(", "strtok_r(", _UNSAFE_FUNC_PREFIX + r"strtok\([^)]+\)"), + ("ttyname(", "ttyname_r(", _UNSAFE_FUNC_PREFIX + r"ttyname\([^)]+\)"), +) def CheckPosixThreading(filename, clean_lines, linenum, error): - """Checks for calls to thread-unsafe functions. - - Much code has been originally written without consideration of - multi-threading. Also, engineers are relying on their old experience; - they have learned posix before threading extensions were added. These - tests guide the engineers to use thread-safe functions (when using - posix directly). - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST: - # Additional pattern matching check to confirm that this is the - # function we are looking for - if Search(pattern, line): - error(filename, linenum, 'runtime/threadsafe_fn', 2, - 'Consider using ' + multithread_safe_func + - '...) instead of ' + single_thread_func + - '...) for improved thread safety.') + """Checks for calls to thread-unsafe functions. + + Much code has been originally written without consideration of + multi-threading. Also, engineers are relying on their old experience; + they have learned posix before threading extensions were added. These + tests guide the engineers to use thread-safe functions (when using + posix directly). + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST: + # Additional pattern matching check to confirm that this is the + # function we are looking for + if Search(pattern, line): + error( + filename, + linenum, + "runtime/threadsafe_fn", + 2, + "Consider using " + + multithread_safe_func + + "...) instead of " + + single_thread_func + + "...) for improved thread safety.", + ) def CheckVlogArguments(filename, clean_lines, linenum, error): - """Checks that VLOG() is only used for defining a logging level. - - For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and - VLOG(FATAL) are not. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): - error(filename, linenum, 'runtime/vlog', 5, - 'VLOG() should be used with numeric verbosity level. ' - 'Use LOG() if you want symbolic severity levels.') + """Checks that VLOG() is only used for defining a logging level. + + For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and + VLOG(FATAL) are not. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + if Search(r"\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)", line): + error( + filename, + linenum, + "runtime/vlog", + 5, + "VLOG() should be used with numeric verbosity level. " + "Use LOG() if you want symbolic severity levels.", + ) + # Matches invalid increment: *count++, which moves pointer instead of # incrementing a value. -_RE_PATTERN_INVALID_INCREMENT = re.compile( - r'^\s*\*\w+(\+\+|--);') +_RE_PATTERN_INVALID_INCREMENT = re.compile(r"^\s*\*\w+(\+\+|--);") def CheckInvalidIncrement(filename, clean_lines, linenum, error): - """Checks for invalid increment *count++. - - For example following function: - void increment_counter(int* count) { - *count++; - } - is invalid, because it effectively does count++, moving pointer, and should - be replaced with ++*count, (*count)++ or *count += 1. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - if _RE_PATTERN_INVALID_INCREMENT.match(line): - error(filename, linenum, 'runtime/invalid_increment', 5, - 'Changing pointer instead of value (or unused value of operator*).') + """Checks for invalid increment *count++. + + For example following function: + void increment_counter(int* count) { + *count++; + } + is invalid, because it effectively does count++, moving pointer, and should + be replaced with ++*count, (*count)++ or *count += 1. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + if _RE_PATTERN_INVALID_INCREMENT.match(line): + error( + filename, + linenum, + "runtime/invalid_increment", + 5, + "Changing pointer instead of value (or unused value of operator*).", + ) def IsMacroDefinition(clean_lines, linenum): - if Search(r'^#define', clean_lines[linenum]): - return True + if Search(r"^#define", clean_lines[linenum]): + return True - if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]): - return True + if linenum > 0 and Search(r"\\$", clean_lines[linenum - 1]): + return True - return False + return False def IsForwardClassDeclaration(clean_lines, linenum): - return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum]) + return Match(r"^\s*(\btemplate\b)*.*class\s+\w+;\s*$", clean_lines[linenum]) class _BlockInfo(object): - """Stores information about a generic block of code.""" + """Stores information about a generic block of code.""" + + def __init__(self, seen_open_brace): + self.seen_open_brace = seen_open_brace + self.open_parentheses = 0 + self.inline_asm = _NO_ASM + self.check_namespace_indentation = False + + def CheckBegin(self, filename, clean_lines, linenum, error): + """Run checks that applies to text up to the opening brace. + + This is mostly for checking the text after the class identifier + and the "{", usually where the base class is specified. For other + blocks, there isn't much to check, so we always pass. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + pass - def __init__(self, seen_open_brace): - self.seen_open_brace = seen_open_brace - self.open_parentheses = 0 - self.inline_asm = _NO_ASM - self.check_namespace_indentation = False + def CheckEnd(self, filename, clean_lines, linenum, error): + """Run checks that applies to text after the closing brace. - def CheckBegin(self, filename, clean_lines, linenum, error): - """Run checks that applies to text up to the opening brace. + This is mostly used for checking end of namespace comments. - This is mostly for checking the text after the class identifier - and the "{", usually where the base class is specified. For other - blocks, there isn't much to check, so we always pass. + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + pass - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - pass - - def CheckEnd(self, filename, clean_lines, linenum, error): - """Run checks that applies to text after the closing brace. - - This is mostly used for checking end of namespace comments. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - pass + def IsBlockInfo(self): + """Returns true if this block is a _BlockInfo. - def IsBlockInfo(self): - """Returns true if this block is a _BlockInfo. + This is convenient for verifying that an object is an instance of + a _BlockInfo, but not an instance of any of the derived classes. - This is convenient for verifying that an object is an instance of - a _BlockInfo, but not an instance of any of the derived classes. - - Returns: - True for this class, False for derived classes. - """ - return self.__class__ == _BlockInfo + Returns: + True for this class, False for derived classes. + """ + return self.__class__ == _BlockInfo class _ExternCInfo(_BlockInfo): - """Stores information about an 'extern "C"' block.""" + """Stores information about an 'extern "C"' block.""" - def __init__(self): - _BlockInfo.__init__(self, True) + def __init__(self): + _BlockInfo.__init__(self, True) class _ClassInfo(_BlockInfo): - """Stores information about a class.""" - - def __init__(self, name, class_or_struct, clean_lines, linenum): - _BlockInfo.__init__(self, False) - self.name = name - self.starting_linenum = linenum - self.is_derived = False - self.check_namespace_indentation = True - if class_or_struct == 'struct': - self.access = 'public' - self.is_struct = True - else: - self.access = 'private' - self.is_struct = False + """Stores information about a class.""" + + def __init__(self, name, class_or_struct, clean_lines, linenum): + _BlockInfo.__init__(self, False) + self.name = name + self.starting_linenum = linenum + self.is_derived = False + self.check_namespace_indentation = True + if class_or_struct == "struct": + self.access = "public" + self.is_struct = True + else: + self.access = "private" + self.is_struct = False - # Remember initial indentation level for this class. Using raw_lines here - # instead of elided to account for leading comments. - self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum]) + # Remember initial indentation level for this class. Using raw_lines here + # instead of elided to account for leading comments. + self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum]) - # Try to find the end of the class. This will be confused by things like: - # class A { - # } *x = { ... - # - # But it's still good enough for CheckSectionSpacing. - self.last_line = 0 - depth = 0 - for i in range(linenum, clean_lines.NumLines()): - line = clean_lines.elided[i] - depth += line.count('{') - line.count('}') - if not depth: - self.last_line = i - break - - def CheckBegin(self, filename, clean_lines, linenum, error): - # Look for a bare ':' - if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): - self.is_derived = True - - def CheckEnd(self, filename, clean_lines, linenum, error): - # Check that closing brace is aligned with beginning of the class. - # Only do this if the closing brace is indented by only whitespaces. - # This means we will not check single-line class definitions. - indent = Match(r'^( *)\}', clean_lines.elided[linenum]) - if indent and len(indent.group(1)) != self.class_indent: - if self.is_struct: - parent = 'struct ' + self.name - else: - parent = 'class ' + self.name - error(filename, linenum, 'whitespace/indent', 3, - 'Closing brace should be aligned with beginning of %s' % parent) + # Try to find the end of the class. This will be confused by things like: + # class A { + # } *x = { ... + # + # But it's still good enough for CheckSectionSpacing. + self.last_line = 0 + depth = 0 + for i in range(linenum, clean_lines.NumLines()): + line = clean_lines.elided[i] + depth += line.count("{") - line.count("}") + if not depth: + self.last_line = i + break + + def CheckBegin(self, filename, clean_lines, linenum, error): + # Look for a bare ':' + if Search("(^|[^:]):($|[^:])", clean_lines.elided[linenum]): + self.is_derived = True + + def CheckEnd(self, filename, clean_lines, linenum, error): + # Check that closing brace is aligned with beginning of the class. + # Only do this if the closing brace is indented by only whitespaces. + # This means we will not check single-line class definitions. + indent = Match(r"^( *)\}", clean_lines.elided[linenum]) + if indent and len(indent.group(1)) != self.class_indent: + if self.is_struct: + parent = "struct " + self.name + else: + parent = "class " + self.name + error( + filename, + linenum, + "whitespace/indent", + 3, + "Closing brace should be aligned with beginning of %s" % parent, + ) class _NamespaceInfo(_BlockInfo): - """Stores information about a namespace.""" - - def __init__(self, name, linenum): - _BlockInfo.__init__(self, False) - self.name = name or '' - self.starting_linenum = linenum - self.check_namespace_indentation = True - - def CheckEnd(self, filename, clean_lines, linenum, error): - """Check end of namespace comments.""" - line = clean_lines.raw_lines[linenum] - - # Check how many lines is enclosed in this namespace. Don't issue - # warning for missing namespace comments if there aren't enough - # lines. However, do apply checks if there is already an end of - # namespace comment and it's incorrect. - # - # TODO(unknown): We always want to check end of namespace comments - # if a namespace is large, but sometimes we also want to apply the - # check if a short namespace contained nontrivial things (something - # other than forward declarations). There is currently no logic on - # deciding what these nontrivial things are, so this check is - # triggered by namespace size only, which works most of the time. - if (linenum - self.starting_linenum < 10 - and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): - return - - # Look for matching comment at end of namespace. - # - # Note that we accept C style "/* */" comments for terminating - # namespaces, so that code that terminate namespaces inside - # preprocessor macros can be cpplint clean. - # - # We also accept stuff like "// end of namespace ." with the - # period at the end. - # - # Besides these, we don't accept anything else, otherwise we might - # get false negatives when existing comment is a substring of the - # expected namespace. - if self.name: - # Named namespace - if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + - r'[\*/\.\\\s]*$'), - line): - error(filename, linenum, 'readability/namespace', 5, - 'Namespace should be terminated with "// namespace %s"' % - self.name) - else: - # Anonymous namespace - if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): - # If "// namespace anonymous" or "// anonymous namespace (more text)", - # mention "// anonymous namespace" as an acceptable form - if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line): - error(filename, linenum, 'readability/namespace', 5, - 'Anonymous namespace should be terminated with "// namespace"' - ' or "// anonymous namespace"') + """Stores information about a namespace.""" + + def __init__(self, name, linenum): + _BlockInfo.__init__(self, False) + self.name = name or "" + self.starting_linenum = linenum + self.check_namespace_indentation = True + + def CheckEnd(self, filename, clean_lines, linenum, error): + """Check end of namespace comments.""" + line = clean_lines.raw_lines[linenum] + + # Check how many lines is enclosed in this namespace. Don't issue + # warning for missing namespace comments if there aren't enough + # lines. However, do apply checks if there is already an end of + # namespace comment and it's incorrect. + # + # TODO(unknown): We always want to check end of namespace comments + # if a namespace is large, but sometimes we also want to apply the + # check if a short namespace contained nontrivial things (something + # other than forward declarations). There is currently no logic on + # deciding what these nontrivial things are, so this check is + # triggered by namespace size only, which works most of the time. + if linenum - self.starting_linenum < 10 and not Match( + r"};*\s*(//|/\*).*\bnamespace\b", line + ): + return + + # Look for matching comment at end of namespace. + # + # Note that we accept C style "/* */" comments for terminating + # namespaces, so that code that terminate namespaces inside + # preprocessor macros can be cpplint clean. + # + # We also accept stuff like "// end of namespace ." with the + # period at the end. + # + # Besides these, we don't accept anything else, otherwise we might + # get false negatives when existing comment is a substring of the + # expected namespace. + if self.name: + # Named namespace + if not Match( + ( + r"};*\s*(//|/\*).*\bnamespace\s+" + + re.escape(self.name) + + r"[\*/\.\\\s]*$" + ), + line, + ): + error( + filename, + linenum, + "readability/namespace", + 5, + 'Namespace should be terminated with "// namespace %s"' % self.name, + ) else: - error(filename, linenum, 'readability/namespace', 5, - 'Anonymous namespace should be terminated with "// namespace"') + # Anonymous namespace + if not Match(r"};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$", line): + # If "// namespace anonymous" or "// anonymous namespace (more text)", + # mention "// anonymous namespace" as an acceptable form + if Match(r"}.*\b(namespace anonymous|anonymous namespace)\b", line): + error( + filename, + linenum, + "readability/namespace", + 5, + 'Anonymous namespace should be terminated with "// namespace"' + ' or "// anonymous namespace"', + ) + else: + error( + filename, + linenum, + "readability/namespace", + 5, + 'Anonymous namespace should be terminated with "// namespace"', + ) class _PreprocessorInfo(object): - """Stores checkpoints of nesting stacks when #if/#else is seen.""" + """Stores checkpoints of nesting stacks when #if/#else is seen.""" - def __init__(self, stack_before_if): - # The entire nesting stack before #if - self.stack_before_if = stack_before_if + def __init__(self, stack_before_if): + # The entire nesting stack before #if + self.stack_before_if = stack_before_if - # The entire nesting stack up to #else - self.stack_before_else = [] + # The entire nesting stack up to #else + self.stack_before_else = [] - # Whether we have already seen #else or #elif - self.seen_else = False + # Whether we have already seen #else or #elif + self.seen_else = False class NestingState(object): - """Holds states related to parsing braces.""" - - def __init__(self): - # Stack for tracking all braces. An object is pushed whenever we - # see a "{", and popped when we see a "}". Only 3 types of - # objects are possible: - # - _ClassInfo: a class or struct. - # - _NamespaceInfo: a namespace. - # - _BlockInfo: some other type of block. - self.stack = [] - - # Top of the previous stack before each Update(). - # - # Because the nesting_stack is updated at the end of each line, we - # had to do some convoluted checks to find out what is the current - # scope at the beginning of the line. This check is simplified by - # saving the previous top of nesting stack. - # - # We could save the full stack, but we only need the top. Copying - # the full nesting stack would slow down cpplint by ~10%. - self.previous_stack_top = [] + """Holds states related to parsing braces.""" + + def __init__(self): + # Stack for tracking all braces. An object is pushed whenever we + # see a "{", and popped when we see a "}". Only 3 types of + # objects are possible: + # - _ClassInfo: a class or struct. + # - _NamespaceInfo: a namespace. + # - _BlockInfo: some other type of block. + self.stack = [] + + # Top of the previous stack before each Update(). + # + # Because the nesting_stack is updated at the end of each line, we + # had to do some convoluted checks to find out what is the current + # scope at the beginning of the line. This check is simplified by + # saving the previous top of nesting stack. + # + # We could save the full stack, but we only need the top. Copying + # the full nesting stack would slow down cpplint by ~10%. + self.previous_stack_top = [] + + # Stack of _PreprocessorInfo objects. + self.pp_stack = [] + + def SeenOpenBrace(self): + """Check if we have seen the opening brace for the innermost block. + + Returns: + True if we have seen the opening brace, False if the innermost + block is still expecting an opening brace. + """ + return (not self.stack) or self.stack[-1].seen_open_brace + + def InNamespaceBody(self): + """Check if we are currently one level inside a namespace body. + + Returns: + True if top of the stack is a namespace block, False otherwise. + """ + return self.stack and isinstance(self.stack[-1], _NamespaceInfo) + + def InExternC(self): + """Check if we are currently one level inside an 'extern "C"' block. + + Returns: + True if top of the stack is an extern block, False otherwise. + """ + return self.stack and isinstance(self.stack[-1], _ExternCInfo) + + def InClassDeclaration(self): + """Check if we are currently one level inside a class or struct declaration. + + Returns: + True if top of the stack is a class/struct, False otherwise. + """ + return self.stack and isinstance(self.stack[-1], _ClassInfo) + + def InAsmBlock(self): + """Check if we are currently one level inside an inline ASM block. + + Returns: + True if the top of the stack is a block containing inline ASM. + """ + return self.stack and self.stack[-1].inline_asm != _NO_ASM + + def InTemplateArgumentList(self, clean_lines, linenum, pos): + """Check if current position is inside template argument list. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + pos: position just after the suspected template argument. + Returns: + True if (linenum, pos) is inside template arguments. + """ + while linenum < clean_lines.NumLines(): + # Find the earliest character that might indicate a template argument + line = clean_lines.elided[linenum] + match = Match(r"^[^{};=\[\]\.<>]*(.)", line[pos:]) + if not match: + linenum += 1 + pos = 0 + continue + token = match.group(1) + pos += len(match.group(0)) + + # These things do not look like template argument list: + # class Suspect { + # class Suspect x; } + if token in ("{", "}", ";"): + return False - # Stack of _PreprocessorInfo objects. - self.pp_stack = [] + # These things look like template argument list: + # template + # template + # template + # template + if token in (">", "=", "[", "]", "."): + return True + + # Check if token is an unmatched '<'. + # If not, move on to the next character. + if token != "<": + pos += 1 + if pos >= len(line): + linenum += 1 + pos = 0 + continue + + # We can't be sure if we just find a single '<', and need to + # find the matching '>'. + (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1) + if end_pos < 0: + # Not sure if template argument list or syntax error in file + return False + linenum = end_line + pos = end_pos + return False - def SeenOpenBrace(self): - """Check if we have seen the opening brace for the innermost block. + def UpdatePreprocessor(self, line): + """Update preprocessor stack. + + We need to handle preprocessors due to classes like this: + #ifdef SWIG + struct ResultDetailsPageElementExtensionPoint { + #else + struct ResultDetailsPageElementExtensionPoint : public Extension { + #endif + + We make the following assumptions (good enough for most files): + - Preprocessor condition evaluates to true from #if up to first + #else/#elif/#endif. + + - Preprocessor condition evaluates to false from #else/#elif up + to #endif. We still perform lint checks on these lines, but + these do not affect nesting stack. + + Args: + line: current line to check. + """ + if Match(r"^\s*#\s*(if|ifdef|ifndef)\b", line): + # Beginning of #if block, save the nesting stack here. The saved + # stack will allow us to restore the parsing state in the #else case. + self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) + elif Match(r"^\s*#\s*(else|elif)\b", line): + # Beginning of #else block + if self.pp_stack: + if not self.pp_stack[-1].seen_else: + # This is the first #else or #elif block. Remember the + # whole nesting stack up to this point. This is what we + # keep after the #endif. + self.pp_stack[-1].seen_else = True + self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) + + # Restore the stack to how it was before the #if + self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) + else: + # TODO(unknown): unexpected #else, issue warning? + pass + elif Match(r"^\s*#\s*endif\b", line): + # End of #if or #else blocks. + if self.pp_stack: + # If we saw an #else, we will need to restore the nesting + # stack to its former state before the #else, otherwise we + # will just continue from where we left off. + if self.pp_stack[-1].seen_else: + # Here we can just use a shallow copy since we are the last + # reference to it. + self.stack = self.pp_stack[-1].stack_before_else + # Drop the corresponding #if + self.pp_stack.pop() + else: + # TODO(unknown): unexpected #endif, issue warning? + pass + + # TODO(unknown): Update() is too long, but we will refactor later. + def Update(self, filename, clean_lines, linenum, error): + """Update nesting state with current line. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + + # Remember top of the previous nesting stack. + # + # The stack is always pushed/popped and not modified in place, so + # we can just do a shallow copy instead of copy.deepcopy. Using + # deepcopy would slow down cpplint by ~28%. + if self.stack: + self.previous_stack_top = self.stack[-1] + else: + self.previous_stack_top = None - Returns: - True if we have seen the opening brace, False if the innermost - block is still expecting an opening brace. - """ - return (not self.stack) or self.stack[-1].seen_open_brace + # Update pp_stack + self.UpdatePreprocessor(line) - def InNamespaceBody(self): - """Check if we are currently one level inside a namespace body. + # Count parentheses. This is to avoid adding struct arguments to + # the nesting stack. + if self.stack: + inner_block = self.stack[-1] + depth_change = line.count("(") - line.count(")") + inner_block.open_parentheses += depth_change + + # Also check if we are starting or ending an inline assembly block. + if inner_block.inline_asm in (_NO_ASM, _END_ASM): + if ( + depth_change != 0 + and inner_block.open_parentheses == 1 + and _MATCH_ASM.match(line) + ): + # Enter assembly block + inner_block.inline_asm = _INSIDE_ASM + else: + # Not entering assembly block. If previous line was _END_ASM, + # we will now shift to _NO_ASM state. + inner_block.inline_asm = _NO_ASM + elif ( + inner_block.inline_asm == _INSIDE_ASM + and inner_block.open_parentheses == 0 + ): + # Exit assembly block + inner_block.inline_asm = _END_ASM + + # Consume namespace declaration at the beginning of the line. Do + # this in a loop so that we catch same line declarations like this: + # namespace proto2 { namespace bridge { class MessageSet; } } + while True: + # Match start of namespace. The "\b\s*" below catches namespace + # declarations even if it weren't followed by a whitespace, this + # is so that we don't confuse our namespace checker. The + # missing spaces will be flagged by CheckSpacing. + namespace_decl_match = Match(r"^\s*namespace\b\s*([:\w]+)?(.*)$", line) + if not namespace_decl_match: + break + + new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) + self.stack.append(new_namespace) + + line = namespace_decl_match.group(2) + if line.find("{") != -1: + new_namespace.seen_open_brace = True + line = line[line.find("{") + 1 :] + + # Look for a class declaration in whatever is left of the line + # after parsing namespaces. The regexp accounts for decorated classes + # such as in: + # class LOCKABLE API Object { + # }; + class_decl_match = Match( + r"^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?" + r"(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))" + r"(.*)$", + line, + ) + if class_decl_match and ( + not self.stack or self.stack[-1].open_parentheses == 0 + ): + # We do not want to accept classes that are actually template arguments: + # template , + # template class Ignore3> + # void Function() {}; + # + # To avoid template argument cases, we scan forward and look for + # an unmatched '>'. If we see one, assume we are inside a + # template argument list. + end_declaration = len(class_decl_match.group(1)) + if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration): + self.stack.append( + _ClassInfo( + class_decl_match.group(3), + class_decl_match.group(2), + clean_lines, + linenum, + ) + ) + line = class_decl_match.group(4) + + # If we have not yet seen the opening brace for the innermost block, + # run checks here. + if not self.SeenOpenBrace(): + self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) + + # Update access control if we are inside a class/struct + if self.stack and isinstance(self.stack[-1], _ClassInfo): + classinfo = self.stack[-1] + access_match = Match( + r"^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?" + r":(?:[^:]|$)", + line, + ) + if access_match: + classinfo.access = access_match.group(2) + + # Check that access keywords are indented +1 space. Skip this + # check if the keywords are not preceded by whitespaces. + indent = access_match.group(1) + if len(indent) != classinfo.class_indent + 1 and Match( + r"^\s*$", indent + ): + if classinfo.is_struct: + parent = "struct " + classinfo.name + else: + parent = "class " + classinfo.name + slots = "" + if access_match.group(3): + slots = access_match.group(3) + error( + filename, + linenum, + "whitespace/indent", + 3, + "%s%s: should be indented +1 space inside %s" + % (access_match.group(2), slots, parent), + ) + + # Consume braces or semicolons from what's left of the line + while True: + # Match first brace, semicolon, or closed parenthesis. + matched = Match(r"^[^{;)}]*([{;)}])(.*)$", line) + if not matched: + break + + token = matched.group(1) + if token == "{": + # If namespace or class hasn't seen a opening brace yet, mark + # namespace/class head as complete. Push a new block onto the + # stack otherwise. + if not self.SeenOpenBrace(): + self.stack[-1].seen_open_brace = True + elif Match(r'^extern\s*"[^"]*"\s*\{', line): + self.stack.append(_ExternCInfo()) + else: + self.stack.append(_BlockInfo(True)) + if _MATCH_ASM.match(line): + self.stack[-1].inline_asm = _BLOCK_ASM + + elif token == ";" or token == ")": + # If we haven't seen an opening brace yet, but we already saw + # a semicolon, this is probably a forward declaration. Pop + # the stack for these. + # + # Similarly, if we haven't seen an opening brace yet, but we + # already saw a closing parenthesis, then these are probably + # function arguments with extra "class" or "struct" keywords. + # Also pop these stack for these. + if not self.SeenOpenBrace(): + self.stack.pop() + else: # token == '}' + # Perform end of block checks and pop the stack. + if self.stack: + self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) + self.stack.pop() + line = matched.group(2) + + def InnermostClass(self): + """Get class info on the top of the stack. + + Returns: + A _ClassInfo object if we are inside a class, or None otherwise. + """ + for i in range(len(self.stack), 0, -1): + classinfo = self.stack[i - 1] + if isinstance(classinfo, _ClassInfo): + return classinfo + return None + + def CheckCompletedBlocks(self, filename, error): + """Checks that all classes and namespaces have been completely parsed. + + Call this when all lines in a file have been processed. + Args: + filename: The name of the current file. + error: The function to call with any errors found. + """ + # Note: This test can result in false positives if #ifdef constructs + # get in the way of brace matching. See the testBuildClass test in + # cpplint_unittest.py for an example of this. + for obj in self.stack: + if isinstance(obj, _ClassInfo): + error( + filename, + obj.starting_linenum, + "build/class", + 5, + "Failed to find complete declaration of class %s" % obj.name, + ) + elif isinstance(obj, _NamespaceInfo): + error( + filename, + obj.starting_linenum, + "build/namespaces", + 5, + "Failed to find complete declaration of namespace %s" % obj.name, + ) + + +def CheckForNonStandardConstructs(filename, clean_lines, linenum, nesting_state, error): + r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. + + Complain about several constructs which gcc-2 accepts, but which are + not standard C++. Warning about these in lint is one way to ease the + transition to new compilers. + - put storage class first (e.g. "static const" instead of "const static"). + - "%lld" instead of %qd" in printf-type functions. + - "%1$d" is non-standard in printf-type functions. + - "\%" is an undefined character escape sequence. + - text after #endif is not allowed. + - invalid inner-style forward declaration. + - >? and ?= and )\?=?\s*(\w+|[+-]?\d+)(\.\d*)?", line): + error( + filename, + linenum, + "build/deprecated", + 3, + ">? and ))?' + # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' + error( + filename, + linenum, + "runtime/member_string_references", + 2, + "const string& members are dangerous. It is much better to use " + "alternatives, such as pointers or simple constants.", + ) + + # Everything else in this function operates on class declarations. + # Return early if the top of the nesting stack is not a class, or if + # the class head is not completed yet. + classinfo = nesting_state.InnermostClass() + if not classinfo or not classinfo.seen_open_brace: + return - def InClassDeclaration(self): - """Check if we are currently one level inside a class or struct declaration. + # The class may have been declared with namespace or classname qualifiers. + # The constructor and destructor will not have those qualifiers. + base_classname = classinfo.name.split("::")[-1] + + # Look for single-argument constructors that aren't marked explicit. + # Technically a valid construct, but against style. Also look for + # non-single-argument constructors which are also technically valid, but + # strongly suggest something is wrong. + explicit_constructor_match = Match( + r"\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*" + r"\(((?:[^()]|\([^()]*\))*)\)" % re.escape(base_classname), + line, + ) - Returns: - True if top of the stack is a class/struct, False otherwise. - """ - return self.stack and isinstance(self.stack[-1], _ClassInfo) + if explicit_constructor_match: + is_marked_explicit = explicit_constructor_match.group(1) - def InAsmBlock(self): - """Check if we are currently one level inside an inline ASM block. + if not explicit_constructor_match.group(2): + constructor_args = [] + else: + constructor_args = explicit_constructor_match.group(2).split(",") + + # collapse arguments so that commas in template parameter lists and function + # argument parameter lists don't split arguments in two + i = 0 + while i < len(constructor_args): + constructor_arg = constructor_args[i] + while constructor_arg.count("<") > constructor_arg.count( + ">" + ) or constructor_arg.count("(") > constructor_arg.count(")"): + constructor_arg += "," + constructor_args[i + 1] + del constructor_args[i + 1] + constructor_args[i] = constructor_arg + i += 1 + + defaulted_args = [arg for arg in constructor_args if "=" in arg] + noarg_constructor = ( + not constructor_args + or # empty arg list + # 'void' arg specifier + (len(constructor_args) == 1 and constructor_args[0].strip() == "void") + ) + onearg_constructor = ( + (len(constructor_args) == 1 and not noarg_constructor) # exactly one arg + or + # all but at most one arg defaulted + ( + len(constructor_args) >= 1 + and not noarg_constructor + and len(defaulted_args) >= len(constructor_args) - 1 + ) + ) + initializer_list_constructor = bool( + onearg_constructor + and Search(r"\bstd\s*::\s*initializer_list\b", constructor_args[0]) + ) + copy_constructor = bool( + onearg_constructor + and Match( + r"(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&" + % re.escape(base_classname), + constructor_args[0].strip(), + ) + ) + + if ( + not is_marked_explicit + and onearg_constructor + and not initializer_list_constructor + and not copy_constructor + ): + if defaulted_args: + error( + filename, + linenum, + "runtime/explicit", + 5, + "Constructors callable with one argument " + "should be marked explicit.", + ) + else: + error( + filename, + linenum, + "runtime/explicit", + 5, + "Single-parameter constructors should be marked explicit.", + ) + elif is_marked_explicit and not onearg_constructor: + if noarg_constructor: + error( + filename, + linenum, + "runtime/explicit", + 5, + "Zero-parameter constructors should not be marked explicit.", + ) + else: + error( + filename, + linenum, + "runtime/explicit", + 0, + "Constructors that require multiple arguments " + "should not be marked explicit.", + ) - Returns: - True if the top of the stack is a block containing inline ASM. - """ - return self.stack and self.stack[-1].inline_asm != _NO_ASM - def InTemplateArgumentList(self, clean_lines, linenum, pos): - """Check if current position is inside template argument list. +def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): + """Checks for the correctness of various spacing around function calls. Args: + filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. - pos: position just after the suspected template argument. - Returns: - True if (linenum, pos) is inside template arguments. + error: The function to call with any errors found. """ - while linenum < clean_lines.NumLines(): - # Find the earliest character that might indicate a template argument - line = clean_lines.elided[linenum] - match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:]) - if not match: - linenum += 1 - pos = 0 - continue - token = match.group(1) - pos += len(match.group(0)) - - # These things do not look like template argument list: - # class Suspect { - # class Suspect x; } - if token in ('{', '}', ';'): return False - - # These things look like template argument list: - # template - # template - # template - # template - if token in ('>', '=', '[', ']', '.'): return True - - # Check if token is an unmatched '<'. - # If not, move on to the next character. - if token != '<': - pos += 1 - if pos >= len(line): - linenum += 1 - pos = 0 - continue - - # We can't be sure if we just find a single '<', and need to - # find the matching '>'. - (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1) - if end_pos < 0: - # Not sure if template argument list or syntax error in file - return False - linenum = end_line - pos = end_pos - return False + line = clean_lines.elided[linenum] + + # Since function calls often occur inside if/for/while/switch + # expressions - which have their own, more liberal conventions - we + # first see if we should be looking inside such an expression for a + # function call, to which we can apply more strict standards. + fncall = line # if there's no control flow construct, look at whole line + for pattern in ( + r"\bif\s*\((.*)\)\s*{", + r"\bfor\s*\((.*)\)\s*{", + r"\bwhile\s*\((.*)\)\s*[{;]", + r"\bswitch\s*\((.*)\)\s*{", + ): + match = Search(pattern, line) + if match: + fncall = match.group(1) # look inside the parens for function calls + break - def UpdatePreprocessor(self, line): - """Update preprocessor stack. + # Except in if/for/while/switch, there should never be space + # immediately inside parens (eg "f( 3, 4 )"). We make an exception + # for nested parens ( (a+b) + c ). Likewise, there should never be + # a space before a ( when it's a function argument. I assume it's a + # function argument when the char before the whitespace is legal in + # a function name (alnum + _) and we're not starting a macro. Also ignore + # pointers and references to arrays and functions coz they're too tricky: + # we use a very simple way to recognize these: + # " (something)(maybe-something)" or + # " (something)(maybe-something," or + # " (something)[something]" + # Note that we assume the contents of [] to be short enough that + # they'll never need to wrap. + if ( # Ignore control structures. + not Search(r"\b(if|for|while|switch|return|new|delete|catch|sizeof)\b", fncall) + and + # Ignore pointers/references to functions. + not Search(r" \([^)]+\)\([^)]*(\)|,$)", fncall) + and + # Ignore pointers/references to arrays. + not Search(r" \([^)]+\)\[[^\]]+\]", fncall) + ): + if Search(r"\w\s*\(\s(?!\s*\\$)", fncall): # a ( used for a fn call + error( + filename, + linenum, + "whitespace/parens", + 4, + "Extra space after ( in function call", + ) + elif Search(r"\(\s+(?!(\s*\\)|\()", fncall): + error(filename, linenum, "whitespace/parens", 2, "Extra space after (") + if ( + Search(r"\w\s+\(", fncall) + and not Search(r"#\s*define|typedef|using\s+\w+\s*=", fncall) + and not Search(r"\w\s+\((\w+::)*\*\w+\)\(", fncall) + ): + # TODO(unknown): Space after an operator function seem to be a common + # error, silence those for now by restricting them to highest verbosity. + if Search(r"\boperator_*\b", line): + error( + filename, + linenum, + "whitespace/parens", + 0, + "Extra space before ( in function call", + ) + else: + error( + filename, + linenum, + "whitespace/parens", + 4, + "Extra space before ( in function call", + ) + # If the ) is followed only by a newline or a { + newline, assume it's + # part of a control statement (if/while/etc), and don't complain + if Search(r"[^)]\s+\)\s*[^{\s]", fncall): + # If the closing parenthesis is preceded by only whitespaces, + # try to give a more descriptive error message. + if Search(r"^\s+\)", fncall): + error( + filename, + linenum, + "whitespace/parens", + 2, + "Closing ) should be moved to the previous line", + ) + else: + error(filename, linenum, "whitespace/parens", 2, "Extra space before )") - We need to handle preprocessors due to classes like this: - #ifdef SWIG - struct ResultDetailsPageElementExtensionPoint { - #else - struct ResultDetailsPageElementExtensionPoint : public Extension { - #endif - We make the following assumptions (good enough for most files): - - Preprocessor condition evaluates to true from #if up to first - #else/#elif/#endif. +def IsBlankLine(line): + """Returns true if the given line is blank. - - Preprocessor condition evaluates to false from #else/#elif up - to #endif. We still perform lint checks on these lines, but - these do not affect nesting stack. + We consider a line to be blank if the line is empty or consists of + only white spaces. Args: - line: current line to check. + line: A line of a string. + + Returns: + True, if the given line is blank. """ - if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): - # Beginning of #if block, save the nesting stack here. The saved - # stack will allow us to restore the parsing state in the #else case. - self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) - elif Match(r'^\s*#\s*(else|elif)\b', line): - # Beginning of #else block - if self.pp_stack: - if not self.pp_stack[-1].seen_else: - # This is the first #else or #elif block. Remember the - # whole nesting stack up to this point. This is what we - # keep after the #endif. - self.pp_stack[-1].seen_else = True - self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) - - # Restore the stack to how it was before the #if - self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) - else: - # TODO(unknown): unexpected #else, issue warning? - pass - elif Match(r'^\s*#\s*endif\b', line): - # End of #if or #else blocks. - if self.pp_stack: - # If we saw an #else, we will need to restore the nesting - # stack to its former state before the #else, otherwise we - # will just continue from where we left off. - if self.pp_stack[-1].seen_else: - # Here we can just use a shallow copy since we are the last - # reference to it. - self.stack = self.pp_stack[-1].stack_before_else - # Drop the corresponding #if - self.pp_stack.pop() - else: - # TODO(unknown): unexpected #endif, issue warning? - pass + return not line or line.isspace() + + +def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error): + is_namespace_indent_item = ( + len(nesting_state.stack) > 1 + and nesting_state.stack[-1].check_namespace_indentation + and isinstance(nesting_state.previous_stack_top, _NamespaceInfo) + and nesting_state.previous_stack_top == nesting_state.stack[-2] + ) + + if ShouldCheckNamespaceIndentation( + nesting_state, is_namespace_indent_item, clean_lines.elided, line + ): + CheckItemIndentationInNamespace(filename, clean_lines.elided, line, error) + + +def CheckForFunctionLengths(filename, clean_lines, linenum, function_state, error): + """Reports for long function bodies. - # TODO(unknown): Update() is too long, but we will refactor later. - def Update(self, filename, clean_lines, linenum, error): - """Update nesting state with current line. + For an overview why this is done, see: + http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions + + Uses a simplistic algorithm assuming other style guidelines + (especially spacing) are followed. + Only checks unindented functions, so class members are unchecked. + Trivial bodies are unchecked, so constructors with huge initializer lists + may be missed. + Blank/comment lines are not counted so as to avoid encouraging the removal + of vertical space and comments just to get through a lint check. + NOLINT *on the last line of a function* disables this check. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. + function_state: Current function name and lines in body so far. error: The function to call with any errors found. """ - line = clean_lines.elided[linenum] - - # Remember top of the previous nesting stack. - # - # The stack is always pushed/popped and not modified in place, so - # we can just do a shallow copy instead of copy.deepcopy. Using - # deepcopy would slow down cpplint by ~28%. - if self.stack: - self.previous_stack_top = self.stack[-1] - else: - self.previous_stack_top = None - - # Update pp_stack - self.UpdatePreprocessor(line) - - # Count parentheses. This is to avoid adding struct arguments to - # the nesting stack. - if self.stack: - inner_block = self.stack[-1] - depth_change = line.count('(') - line.count(')') - inner_block.open_parentheses += depth_change - - # Also check if we are starting or ending an inline assembly block. - if inner_block.inline_asm in (_NO_ASM, _END_ASM): - if (depth_change != 0 and - inner_block.open_parentheses == 1 and - _MATCH_ASM.match(line)): - # Enter assembly block - inner_block.inline_asm = _INSIDE_ASM - else: - # Not entering assembly block. If previous line was _END_ASM, - # we will now shift to _NO_ASM state. - inner_block.inline_asm = _NO_ASM - elif (inner_block.inline_asm == _INSIDE_ASM and - inner_block.open_parentheses == 0): - # Exit assembly block - inner_block.inline_asm = _END_ASM - - # Consume namespace declaration at the beginning of the line. Do - # this in a loop so that we catch same line declarations like this: - # namespace proto2 { namespace bridge { class MessageSet; } } - while True: - # Match start of namespace. The "\b\s*" below catches namespace - # declarations even if it weren't followed by a whitespace, this - # is so that we don't confuse our namespace checker. The - # missing spaces will be flagged by CheckSpacing. - namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) - if not namespace_decl_match: - break - - new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) - self.stack.append(new_namespace) - - line = namespace_decl_match.group(2) - if line.find('{') != -1: - new_namespace.seen_open_brace = True - line = line[line.find('{') + 1:] - - # Look for a class declaration in whatever is left of the line - # after parsing namespaces. The regexp accounts for decorated classes - # such as in: - # class LOCKABLE API Object { - # }; - class_decl_match = Match( - r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?' - r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))' - r'(.*)$', line) - if (class_decl_match and - (not self.stack or self.stack[-1].open_parentheses == 0)): - # We do not want to accept classes that are actually template arguments: - # template , - # template class Ignore3> - # void Function() {}; - # - # To avoid template argument cases, we scan forward and look for - # an unmatched '>'. If we see one, assume we are inside a - # template argument list. - end_declaration = len(class_decl_match.group(1)) - if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration): - self.stack.append(_ClassInfo( - class_decl_match.group(3), class_decl_match.group(2), - clean_lines, linenum)) - line = class_decl_match.group(4) - - # If we have not yet seen the opening brace for the innermost block, - # run checks here. - if not self.SeenOpenBrace(): - self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) - - # Update access control if we are inside a class/struct - if self.stack and isinstance(self.stack[-1], _ClassInfo): - classinfo = self.stack[-1] - access_match = Match( - r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' - r':(?:[^:]|$)', - line) - if access_match: - classinfo.access = access_match.group(2) - - # Check that access keywords are indented +1 space. Skip this - # check if the keywords are not preceded by whitespaces. - indent = access_match.group(1) - if (len(indent) != classinfo.class_indent + 1 and - Match(r'^\s*$', indent)): - if classinfo.is_struct: - parent = 'struct ' + classinfo.name - else: - parent = 'class ' + classinfo.name - slots = '' - if access_match.group(3): - slots = access_match.group(3) - error(filename, linenum, 'whitespace/indent', 3, - '%s%s: should be indented +1 space inside %s' % ( - access_match.group(2), slots, parent)) - - # Consume braces or semicolons from what's left of the line - while True: - # Match first brace, semicolon, or closed parenthesis. - matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) - if not matched: - break - - token = matched.group(1) - if token == '{': - # If namespace or class hasn't seen a opening brace yet, mark - # namespace/class head as complete. Push a new block onto the - # stack otherwise. - if not self.SeenOpenBrace(): - self.stack[-1].seen_open_brace = True - elif Match(r'^extern\s*"[^"]*"\s*\{', line): - self.stack.append(_ExternCInfo()) - else: - self.stack.append(_BlockInfo(True)) - if _MATCH_ASM.match(line): - self.stack[-1].inline_asm = _BLOCK_ASM - - elif token == ';' or token == ')': - # If we haven't seen an opening brace yet, but we already saw - # a semicolon, this is probably a forward declaration. Pop - # the stack for these. - # - # Similarly, if we haven't seen an opening brace yet, but we - # already saw a closing parenthesis, then these are probably - # function arguments with extra "class" or "struct" keywords. - # Also pop these stack for these. - if not self.SeenOpenBrace(): - self.stack.pop() - else: # token == '}' - # Perform end of block checks and pop the stack. - if self.stack: - self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) - self.stack.pop() - line = matched.group(2) - - def InnermostClass(self): - """Get class info on the top of the stack. + lines = clean_lines.lines + line = lines[linenum] + joined_line = "" + + starting_func = False + regexp = r"(\w(\w|::|\*|\&|\s)*)\(" # decls * & space::name( ... + match_result = Match(regexp, line) + if match_result: + # If the name is all caps and underscores, figure it's a macro and + # ignore it, unless it's TEST or TEST_F. + function_name = match_result.group(1).split()[-1] + if ( + function_name == "TEST" + or function_name == "TEST_F" + or (not Match(r"[A-Z_]+$", function_name)) + ): + starting_func = True + + if starting_func: + body_found = False + for start_linenum in range(linenum, clean_lines.NumLines()): + start_line = lines[start_linenum] + joined_line += " " + start_line.lstrip() + if Search(r"(;|})", start_line): # Declarations and trivial functions + body_found = True + break # ... ignore + elif Search(r"{", start_line): + body_found = True + function = Search(r"((\w|:)*)\(", line).group(1) + if Match(r"TEST", function): # Handle TEST... macros + parameter_regexp = Search(r"(\(.*\))", joined_line) + if parameter_regexp: # Ignore bad syntax + function += parameter_regexp.group(1) + else: + function += "()" + function_state.Begin(function) + break + if not body_found: + # No body for the function (or evidence of a non-function) was found. + error( + filename, + linenum, + "readability/fn_size", + 5, + "Lint failed to find start of function body.", + ) + elif Match(r"^\}\s*$", line): # function end + function_state.Check(error, filename, linenum) + function_state.End() + elif not Match(r"^\s*$", line): + function_state.Count() # Count non-blank/non-comment lines. + + +_RE_PATTERN_TODO = re.compile(r"^//(\s*)TODO(\(.+?\))?:?(\s|$)?") - Returns: - A _ClassInfo object if we are inside a class, or None otherwise. - """ - for i in range(len(self.stack), 0, -1): - classinfo = self.stack[i - 1] - if isinstance(classinfo, _ClassInfo): - return classinfo - return None - def CheckCompletedBlocks(self, filename, error): - """Checks that all classes and namespaces have been completely parsed. +def CheckComment(line, filename, linenum, next_line_start, error): + """Checks for common mistakes in comments. - Call this when all lines in a file have been processed. Args: + line: The line in question. filename: The name of the current file. + linenum: The number of the line to check. + next_line_start: The first non-whitespace column of the next line. error: The function to call with any errors found. """ - # Note: This test can result in false positives if #ifdef constructs - # get in the way of brace matching. See the testBuildClass test in - # cpplint_unittest.py for an example of this. - for obj in self.stack: - if isinstance(obj, _ClassInfo): - error(filename, obj.starting_linenum, 'build/class', 5, - 'Failed to find complete declaration of class %s' % - obj.name) - elif isinstance(obj, _NamespaceInfo): - error(filename, obj.starting_linenum, 'build/namespaces', 5, - 'Failed to find complete declaration of namespace %s' % - obj.name) - - -def CheckForNonStandardConstructs(filename, clean_lines, linenum, - nesting_state, error): - r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. - - Complain about several constructs which gcc-2 accepts, but which are - not standard C++. Warning about these in lint is one way to ease the - transition to new compilers. - - put storage class first (e.g. "static const" instead of "const static"). - - "%lld" instead of %qd" in printf-type functions. - - "%1$d" is non-standard in printf-type functions. - - "\%" is an undefined character escape sequence. - - text after #endif is not allowed. - - invalid inner-style forward declaration. - - >? and ?= and )\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', - line): - error(filename, linenum, 'build/deprecated', 3, - '>? and ))?' - # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' - error(filename, linenum, 'runtime/member_string_references', 2, - 'const string& members are dangerous. It is much better to use ' - 'alternatives, such as pointers or simple constants.') - - # Everything else in this function operates on class declarations. - # Return early if the top of the nesting stack is not a class, or if - # the class head is not completed yet. - classinfo = nesting_state.InnermostClass() - if not classinfo or not classinfo.seen_open_brace: - return - - # The class may have been declared with namespace or classname qualifiers. - # The constructor and destructor will not have those qualifiers. - base_classname = classinfo.name.split('::')[-1] - - # Look for single-argument constructors that aren't marked explicit. - # Technically a valid construct, but against style. Also look for - # non-single-argument constructors which are also technically valid, but - # strongly suggest something is wrong. - explicit_constructor_match = Match( - r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*' - r'\(((?:[^()]|\([^()]*\))*)\)' - % re.escape(base_classname), - line) - - if explicit_constructor_match: - is_marked_explicit = explicit_constructor_match.group(1) - - if not explicit_constructor_match.group(2): - constructor_args = [] - else: - constructor_args = explicit_constructor_match.group(2).split(',') - - # collapse arguments so that commas in template parameter lists and function - # argument parameter lists don't split arguments in two - i = 0 - while i < len(constructor_args): - constructor_arg = constructor_args[i] - while (constructor_arg.count('<') > constructor_arg.count('>') or - constructor_arg.count('(') > constructor_arg.count(')')): - constructor_arg += ',' + constructor_args[i + 1] - del constructor_args[i + 1] - constructor_args[i] = constructor_arg - i += 1 - - defaulted_args = [arg for arg in constructor_args if '=' in arg] - noarg_constructor = (not constructor_args or # empty arg list - # 'void' arg specifier - (len(constructor_args) == 1 and - constructor_args[0].strip() == 'void')) - onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg - not noarg_constructor) or - # all but at most one arg defaulted - (len(constructor_args) >= 1 and - not noarg_constructor and - len(defaulted_args) >= len(constructor_args) - 1)) - initializer_list_constructor = bool( - onearg_constructor and - Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0])) - copy_constructor = bool( - onearg_constructor and - Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&' - % re.escape(base_classname), constructor_args[0].strip())) - - if (not is_marked_explicit and - onearg_constructor and - not initializer_list_constructor and - not copy_constructor): - if defaulted_args: - error(filename, linenum, 'runtime/explicit', 5, - 'Constructors callable with one argument ' - 'should be marked explicit.') - else: - error(filename, linenum, 'runtime/explicit', 5, - 'Single-parameter constructors should be marked explicit.') - elif is_marked_explicit and not onearg_constructor: - if noarg_constructor: - error(filename, linenum, 'runtime/explicit', 5, - 'Zero-parameter constructors should not be marked explicit.') - else: - error(filename, linenum, 'runtime/explicit', 0, - 'Constructors that require multiple arguments ' - 'should not be marked explicit.') + commentpos = line.find("//") + if commentpos != -1: + # Check if the // may be in quotes. If so, ignore it + # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison + if ( + line.count('"', 0, commentpos) - line.count('\\"', 0, commentpos) + ) % 2 == 0: # not in quotes + # Allow one space for new scopes, two spaces otherwise: + if not (Match(r"^.*{ *//", line) and next_line_start == commentpos) and ( + (commentpos >= 1 and line[commentpos - 1] not in string.whitespace) + or (commentpos >= 2 and line[commentpos - 2] not in string.whitespace) + ): + error( + filename, + linenum, + "whitespace/comments", + 2, + "At least two spaces is best between code and comments", + ) + + # Checks for common mistakes in TODO comments. + comment = line[commentpos:] + match = _RE_PATTERN_TODO.match(comment) + if match: + # One whitespace is correct; zero whitespace is handled elsewhere. + leading_whitespace = match.group(1) + if len(leading_whitespace) > 1: + error( + filename, + linenum, + "whitespace/todo", + 2, + "Too many spaces before TODO", + ) + + username = match.group(2) + if not username: + error( + filename, + linenum, + "readability/todo", + 2, + "Missing username in TODO; it should look like " + '"// TODO(my_username): Stuff."', + ) + + middle_whitespace = match.group(3) + # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison + if middle_whitespace != " " and middle_whitespace != "": + error( + filename, + linenum, + "whitespace/todo", + 2, + "TODO(my_username) should be followed by a space", + ) + + # If the comment contains an alphanumeric character, there + # should be a space somewhere between it and the //. + if Match(r"//[^ ]*\w", comment): + error( + filename, + linenum, + "whitespace/comments", + 4, + "Should have a space between // and comment", + ) -def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): - """Checks for the correctness of various spacing around function calls. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Since function calls often occur inside if/for/while/switch - # expressions - which have their own, more liberal conventions - we - # first see if we should be looking inside such an expression for a - # function call, to which we can apply more strict standards. - fncall = line # if there's no control flow construct, look at whole line - for pattern in (r'\bif\s*\((.*)\)\s*{', - r'\bfor\s*\((.*)\)\s*{', - r'\bwhile\s*\((.*)\)\s*[{;]', - r'\bswitch\s*\((.*)\)\s*{'): - match = Search(pattern, line) - if match: - fncall = match.group(1) # look inside the parens for function calls - break - - # Except in if/for/while/switch, there should never be space - # immediately inside parens (eg "f( 3, 4 )"). We make an exception - # for nested parens ( (a+b) + c ). Likewise, there should never be - # a space before a ( when it's a function argument. I assume it's a - # function argument when the char before the whitespace is legal in - # a function name (alnum + _) and we're not starting a macro. Also ignore - # pointers and references to arrays and functions coz they're too tricky: - # we use a very simple way to recognize these: - # " (something)(maybe-something)" or - # " (something)(maybe-something," or - # " (something)[something]" - # Note that we assume the contents of [] to be short enough that - # they'll never need to wrap. - if ( # Ignore control structures. - not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', - fncall) and - # Ignore pointers/references to functions. - not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and - # Ignore pointers/references to arrays. - not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): - if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call - error(filename, linenum, 'whitespace/parens', 4, - 'Extra space after ( in function call') - elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): - error(filename, linenum, 'whitespace/parens', 2, - 'Extra space after (') - if (Search(r'\w\s+\(', fncall) and - not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and - not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)): - # TODO(unknown): Space after an operator function seem to be a common - # error, silence those for now by restricting them to highest verbosity. - if Search(r'\boperator_*\b', line): - error(filename, linenum, 'whitespace/parens', 0, - 'Extra space before ( in function call') - else: - error(filename, linenum, 'whitespace/parens', 4, - 'Extra space before ( in function call') - # If the ) is followed only by a newline or a { + newline, assume it's - # part of a control statement (if/while/etc), and don't complain - if Search(r'[^)]\s+\)\s*[^{\s]', fncall): - # If the closing parenthesis is preceded by only whitespaces, - # try to give a more descriptive error message. - if Search(r'^\s+\)', fncall): - error(filename, linenum, 'whitespace/parens', 2, - 'Closing ) should be moved to the previous line') - else: - error(filename, linenum, 'whitespace/parens', 2, - 'Extra space before )') +def CheckAccess(filename, clean_lines, linenum, nesting_state, error): + """Checks for improper use of DISALLOW* macros. + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + nesting_state: A NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] # get rid of comments and strings -def IsBlankLine(line): - """Returns true if the given line is blank. - - We consider a line to be blank if the line is empty or consists of - only white spaces. - - Args: - line: A line of a string. - - Returns: - True, if the given line is blank. - """ - return not line or line.isspace() - - -def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, - error): - is_namespace_indent_item = ( - len(nesting_state.stack) > 1 and - nesting_state.stack[-1].check_namespace_indentation and - isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and - nesting_state.previous_stack_top == nesting_state.stack[-2]) - - if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, - clean_lines.elided, line): - CheckItemIndentationInNamespace(filename, clean_lines.elided, - line, error) - - -def CheckForFunctionLengths(filename, clean_lines, linenum, - function_state, error): - """Reports for long function bodies. - - For an overview why this is done, see: - http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions - - Uses a simplistic algorithm assuming other style guidelines - (especially spacing) are followed. - Only checks unindented functions, so class members are unchecked. - Trivial bodies are unchecked, so constructors with huge initializer lists - may be missed. - Blank/comment lines are not counted so as to avoid encouraging the removal - of vertical space and comments just to get through a lint check. - NOLINT *on the last line of a function* disables this check. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - function_state: Current function name and lines in body so far. - error: The function to call with any errors found. - """ - lines = clean_lines.lines - line = lines[linenum] - joined_line = '' - - starting_func = False - regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... - match_result = Match(regexp, line) - if match_result: - # If the name is all caps and underscores, figure it's a macro and - # ignore it, unless it's TEST or TEST_F. - function_name = match_result.group(1).split()[-1] - if function_name == 'TEST' or function_name == 'TEST_F' or ( - not Match(r'[A-Z_]+$', function_name)): - starting_func = True - - if starting_func: - body_found = False - for start_linenum in range(linenum, clean_lines.NumLines()): - start_line = lines[start_linenum] - joined_line += ' ' + start_line.lstrip() - if Search(r'(;|})', start_line): # Declarations and trivial functions - body_found = True - break # ... ignore - elif Search(r'{', start_line): - body_found = True - function = Search(r'((\w|:)*)\(', line).group(1) - if Match(r'TEST', function): # Handle TEST... macros - parameter_regexp = Search(r'(\(.*\))', joined_line) - if parameter_regexp: # Ignore bad syntax - function += parameter_regexp.group(1) - else: - function += '()' - function_state.Begin(function) - break - if not body_found: - # No body for the function (or evidence of a non-function) was found. - error(filename, linenum, 'readability/fn_size', 5, - 'Lint failed to find start of function body.') - elif Match(r'^\}\s*$', line): # function end - function_state.Check(error, filename, linenum) - function_state.End() - elif not Match(r'^\s*$', line): - function_state.Count() # Count non-blank/non-comment lines. + matched = Match( + (r"\s*(DISALLOW_COPY_AND_ASSIGN|" r"DISALLOW_IMPLICIT_CONSTRUCTORS)"), line + ) + if not matched: + return + if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): + if nesting_state.stack[-1].access != "private": + error( + filename, + linenum, + "readability/constructors", + 3, + "%s must be in the private: section" % matched.group(1), + ) + else: + # Found DISALLOW* macro outside a class declaration, or perhaps it + # was used inside a function when it should have been part of the + # class declaration. We could issue a warning here, but it + # probably resulted in a compiler error already. + pass -_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?') +def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): + """Checks for the correctness of various spacing issues in the code. -def CheckComment(line, filename, linenum, next_line_start, error): - """Checks for common mistakes in comments. - - Args: - line: The line in question. - filename: The name of the current file. - linenum: The number of the line to check. - next_line_start: The first non-whitespace column of the next line. - error: The function to call with any errors found. - """ - commentpos = line.find('//') - if commentpos != -1: - # Check if the // may be in quotes. If so, ignore it - # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison - if (line.count('"', 0, commentpos) - - line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes - # Allow one space for new scopes, two spaces otherwise: - if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and - ((commentpos >= 1 and - line[commentpos-1] not in string.whitespace) or - (commentpos >= 2 and - line[commentpos-2] not in string.whitespace))): - error(filename, linenum, 'whitespace/comments', 2, - 'At least two spaces is best between code and comments') - - # Checks for common mistakes in TODO comments. - comment = line[commentpos:] - match = _RE_PATTERN_TODO.match(comment) - if match: - # One whitespace is correct; zero whitespace is handled elsewhere. - leading_whitespace = match.group(1) - if len(leading_whitespace) > 1: - error(filename, linenum, 'whitespace/todo', 2, - 'Too many spaces before TODO') - - username = match.group(2) - if not username: - error(filename, linenum, 'readability/todo', 2, - 'Missing username in TODO; it should look like ' - '"// TODO(my_username): Stuff."') - - middle_whitespace = match.group(3) - # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison - if middle_whitespace != ' ' and middle_whitespace != '': - error(filename, linenum, 'whitespace/todo', 2, - 'TODO(my_username) should be followed by a space') - - # If the comment contains an alphanumeric character, there - # should be a space somewhere between it and the //. - if Match(r'//[^ ]*\w', comment): - error(filename, linenum, 'whitespace/comments', 4, - 'Should have a space between // and comment') + Things we check for: spaces around operators, spaces after + if/for/while/switch, no spaces around parens in function calls, two + spaces between code and comment, don't start a block with a blank + line, don't end a function with a blank line, don't add a blank line + after public/protected/private, don't have too many blank lines in a row. -def CheckAccess(filename, clean_lines, linenum, nesting_state, error): - """Checks for improper use of DISALLOW* macros. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] # get rid of comments and strings - - matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' - r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) - if not matched: - return - if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): - if nesting_state.stack[-1].access != 'private': - error(filename, linenum, 'readability/constructors', 3, - '%s must be in the private: section' % matched.group(1)) - - else: - # Found DISALLOW* macro outside a class declaration, or perhaps it - # was used inside a function when it should have been part of the - # class declaration. We could issue a warning here, but it - # probably resulted in a compiler error already. - pass + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + nesting_state: A NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + # Don't use "elided" lines here, otherwise we can't check commented lines. + # Don't want to use "raw" either, because we don't want to check inside C++11 + # raw strings, + raw = clean_lines.lines_without_raw_strings + line = raw[linenum] -def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): - """Checks for the correctness of various spacing issues in the code. - - Things we check for: spaces around operators, spaces after - if/for/while/switch, no spaces around parens in function calls, two - spaces between code and comment, don't start a block with a blank - line, don't end a function with a blank line, don't add a blank line - after public/protected/private, don't have too many blank lines in a row. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - - # Don't use "elided" lines here, otherwise we can't check commented lines. - # Don't want to use "raw" either, because we don't want to check inside C++11 - # raw strings, - raw = clean_lines.lines_without_raw_strings - line = raw[linenum] - - # Before nixing comments, check if the line is blank for no good - # reason. This includes the first line after a block is opened, and - # blank lines at the end of a function (ie, right before a line like '}' - # - # Skip all the blank line checks if we are immediately inside a - # namespace body. In other words, don't issue blank line warnings - # for this block: - # namespace { - # - # } - # - # A warning about missing end of namespace comments will be issued instead. - # - # Also skip blank line checks for 'extern "C"' blocks, which are formatted - # like namespaces. - if (IsBlankLine(line) and - not nesting_state.InNamespaceBody() and - not nesting_state.InExternC()): - elided = clean_lines.elided - prev_line = elided[linenum - 1] - prevbrace = prev_line.rfind('{') - # TODO(unknown): Don't complain if line before blank line, and line after, - # both start with alnums and are indented the same amount. - # This ignores whitespace at the start of a namespace block - # because those are not usually indented. - if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: - # OK, we have a blank line at the start of a code block. Before we - # complain, we check if it is an exception to the rule: The previous - # non-empty line has the parameters of a function header that are indented - # 4 spaces (because they did not fit in a 80 column line when placed on - # the same line as the function name). We also check for the case where - # the previous line is indented 6 spaces, which may happen when the - # initializers of a constructor do not fit into a 80 column line. - exception = False - if Match(r' {6}\w', prev_line): # Initializer list? - # We are looking for the opening column of initializer list, which - # should be indented 4 spaces to cause 6 space indentation afterwards. - search_position = linenum-2 - while (search_position >= 0 - and Match(r' {6}\w', elided[search_position])): - search_position -= 1 - exception = (search_position >= 0 - and elided[search_position][:5] == ' :') - else: - # Search for the function arguments or an initializer list. We use a - # simple heuristic here: If the line is indented 4 spaces; and we have a - # closing paren, without the opening paren, followed by an opening brace - # or colon (for initializer lists) we assume that it is the last line of - # a function header. If we have a colon indented 4 spaces, it is an - # initializer list. - exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', - prev_line) - or Match(r' {4}:', prev_line)) - - if not exception: - error(filename, linenum, 'whitespace/blank_line', 2, - 'Redundant blank line at the start of a code block ' - 'should be deleted.') - # Ignore blank lines at the end of a block in a long if-else - # chain, like this: - # if (condition1) { - # // Something followed by a blank line + # Before nixing comments, check if the line is blank for no good + # reason. This includes the first line after a block is opened, and + # blank lines at the end of a function (ie, right before a line like '}' + # + # Skip all the blank line checks if we are immediately inside a + # namespace body. In other words, don't issue blank line warnings + # for this block: + # namespace { # - # } else if (condition2) { - # // Something else # } + # + # A warning about missing end of namespace comments will be issued instead. + # + # Also skip blank line checks for 'extern "C"' blocks, which are formatted + # like namespaces. + if ( + IsBlankLine(line) + and not nesting_state.InNamespaceBody() + and not nesting_state.InExternC() + ): + elided = clean_lines.elided + prev_line = elided[linenum - 1] + prevbrace = prev_line.rfind("{") + # TODO(unknown): Don't complain if line before blank line, and line after, + # both start with alnums and are indented the same amount. + # This ignores whitespace at the start of a namespace block + # because those are not usually indented. + if prevbrace != -1 and prev_line[prevbrace:].find("}") == -1: + # OK, we have a blank line at the start of a code block. Before we + # complain, we check if it is an exception to the rule: The previous + # non-empty line has the parameters of a function header that are indented + # 4 spaces (because they did not fit in a 80 column line when placed on + # the same line as the function name). We also check for the case where + # the previous line is indented 6 spaces, which may happen when the + # initializers of a constructor do not fit into a 80 column line. + exception = False + if Match(r" {6}\w", prev_line): # Initializer list? + # We are looking for the opening column of initializer list, which + # should be indented 4 spaces to cause 6 space indentation afterwards. + search_position = linenum - 2 + while search_position >= 0 and Match( + r" {6}\w", elided[search_position] + ): + search_position -= 1 + exception = ( + search_position >= 0 and elided[search_position][:5] == " :" + ) + else: + # Search for the function arguments or an initializer list. We use a + # simple heuristic here: If the line is indented 4 spaces; and we have a + # closing paren, without the opening paren, followed by an opening brace + # or colon (for initializer lists) we assume that it is the last line of + # a function header. If we have a colon indented 4 spaces, it is an + # initializer list. + exception = Match( + r" {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)", prev_line + ) or Match(r" {4}:", prev_line) + + if not exception: + error( + filename, + linenum, + "whitespace/blank_line", + 2, + "Redundant blank line at the start of a code block " + "should be deleted.", + ) + # Ignore blank lines at the end of a block in a long if-else + # chain, like this: + # if (condition1) { + # // Something followed by a blank line + # + # } else if (condition2) { + # // Something else + # } + if linenum + 1 < clean_lines.NumLines(): + next_line = raw[linenum + 1] + if ( + next_line + and Match(r"\s*}", next_line) + and next_line.find("} else ") == -1 + ): + error( + filename, + linenum, + "whitespace/blank_line", + 3, + "Redundant blank line at the end of a code block " + "should be deleted.", + ) + + matched = Match(r"\s*(public|protected|private):", prev_line) + if matched: + error( + filename, + linenum, + "whitespace/blank_line", + 3, + 'Do not leave a blank line after "%s:"' % matched.group(1), + ) + + # Next, check comments + next_line_start = 0 if linenum + 1 < clean_lines.NumLines(): - next_line = raw[linenum + 1] - if (next_line - and Match(r'\s*}', next_line) - and next_line.find('} else ') == -1): - error(filename, linenum, 'whitespace/blank_line', 3, - 'Redundant blank line at the end of a code block ' - 'should be deleted.') - - matched = Match(r'\s*(public|protected|private):', prev_line) - if matched: - error(filename, linenum, 'whitespace/blank_line', 3, - 'Do not leave a blank line after "%s:"' % matched.group(1)) - - # Next, check comments - next_line_start = 0 - if linenum + 1 < clean_lines.NumLines(): - next_line = raw[linenum + 1] - next_line_start = len(next_line) - len(next_line.lstrip()) - CheckComment(line, filename, linenum, next_line_start, error) + next_line = raw[linenum + 1] + next_line_start = len(next_line) - len(next_line.lstrip()) + CheckComment(line, filename, linenum, next_line_start, error) - # get rid of comments and strings - line = clean_lines.elided[linenum] + # get rid of comments and strings + line = clean_lines.elided[linenum] - # You shouldn't have spaces before your brackets, except maybe after - # 'delete []' or 'return []() {};' - if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line): - error(filename, linenum, 'whitespace/braces', 5, - 'Extra space before [') + # You shouldn't have spaces before your brackets, except maybe after + # 'delete []' or 'return []() {};' + if Search(r"\w\s+\[", line) and not Search(r"(?:delete|return)\s+\[", line): + error(filename, linenum, "whitespace/braces", 5, "Extra space before [") - # In range-based for, we wanted spaces before and after the colon, but - # not around "::" tokens that might appear. - if (Search(r'for *\(.*[^:]:[^: ]', line) or - Search(r'for *\(.*[^: ]:[^:]', line)): - error(filename, linenum, 'whitespace/forcolon', 2, - 'Missing space around colon in range-based for loop') + # In range-based for, we wanted spaces before and after the colon, but + # not around "::" tokens that might appear. + if Search(r"for *\(.*[^:]:[^: ]", line) or Search(r"for *\(.*[^: ]:[^:]", line): + error( + filename, + linenum, + "whitespace/forcolon", + 2, + "Missing space around colon in range-based for loop", + ) def CheckOperatorSpacing(filename, clean_lines, linenum, error): - """Checks for horizontal spacing around operators. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Don't try to do spacing checks for operator methods. Do this by - # replacing the troublesome characters with something else, - # preserving column position for all other characters. - # - # The replacement is done repeatedly to avoid false positives from - # operators that call operators. - while True: - match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) - if match: - line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) - else: - break - - # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". - # Otherwise not. Note we only check for non-spaces on *both* sides; - # sometimes people put non-spaces on one side when aligning ='s among - # many lines (not that this is behavior that I approve of...) - if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line): - error(filename, linenum, 'whitespace/operators', 4, - 'Missing spaces around =') - - # It's ok not to have spaces around binary operators like + - * /, but if - # there's too little whitespace, we get concerned. It's hard to tell, - # though, so we punt on this one for now. TODO. - - # You should always have whitespace around binary operators. - # - # Check <= and >= first to avoid false positives with < and >, then - # check non-include lines for spacing around < and >. - # - # If the operator is followed by a comma, assume it's be used in a - # macro context and don't do any checks. This avoids false - # positives. - # - # Note that && is not included here. Those are checked separately - # in CheckRValueReference - match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) - if match: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around %s' % match.group(1)) - elif not Match(r'#.*include', line): - # Look for < that is not surrounded by spaces. This is only - # triggered if both sides are missing spaces, even though - # technically should should flag if at least one side is missing a - # space. This is done to avoid some false positives with shifts. - match = Match(r'^(.*[^\s<])<[^\s=<,]', line) - if match: - (_, _, end_pos) = CloseExpression( - clean_lines, linenum, len(match.group(1))) - if end_pos <= -1: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around <') - - # Look for > that is not surrounded by spaces. Similar to the - # above, we only trigger if both sides are missing spaces to avoid - # false positives with shifts. - match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) - if match: - (_, _, start_pos) = ReverseCloseExpression( - clean_lines, linenum, len(match.group(1))) - if start_pos <= -1: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around >') - - # We allow no-spaces around << when used like this: 10<<20, but - # not otherwise (particularly, not when used as streams) - # - # We also allow operators following an opening parenthesis, since - # those tend to be macros that deal with operators. - match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<([^\s,=])', line) - if (match and match.group(1) != '(' and - not (match.group(1).isdigit() and match.group(2).isdigit()) and - not (match.group(1) == 'operator' and match.group(2) == ';')): - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around <<') - - # We allow no-spaces around >> for almost anything. This is because - # C++11 allows ">>" to close nested templates, which accounts for - # most cases when ">>" is not followed by a space. - # - # We still warn on ">>" followed by alpha character, because that is - # likely due to ">>" being used for right shifts, e.g.: - # value >> alpha - # - # When ">>" is used to close templates, the alphanumeric letter that - # follows would be part of an identifier, and there should still be - # a space separating the template type and the identifier. - # type> alpha - match = Search(r'>>[a-zA-Z_]', line) - if match: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around >>') - - # There shouldn't be space around unary operators - match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) - if match: - error(filename, linenum, 'whitespace/operators', 4, - 'Extra space for operator %s' % match.group(1)) - + """Checks for horizontal spacing around operators. -def CheckParenthesisSpacing(filename, clean_lines, linenum, error): - """Checks for horizontal spacing around parentheses. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # No spaces after an if, while, switch, or for - match = Search(r' (if\(|for\(|while\(|switch\()', line) - if match: - error(filename, linenum, 'whitespace/parens', 5, - 'Missing space before ( in %s' % match.group(1)) - - # For if/for/while/switch, the left and right parens should be - # consistent about how many spaces are inside the parens, and - # there should either be zero or one spaces inside the parens. - # We don't want: "if ( foo)" or "if ( foo )". - # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. - match = Search(r'\b(if|for|while|switch)\s*' - r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', - line) - if match: - if len(match.group(2)) != len(match.group(4)): - if not (match.group(3) == ';' and - len(match.group(2)) == 1 + len(match.group(4)) or - not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): - error(filename, linenum, 'whitespace/parens', 5, - 'Mismatching spaces inside () in %s' % match.group(1)) - if len(match.group(2)) not in [0, 1]: - error(filename, linenum, 'whitespace/parens', 5, - 'Should have zero or one spaces inside ( and ) in %s' % - match.group(1)) + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + # Don't try to do spacing checks for operator methods. Do this by + # replacing the troublesome characters with something else, + # preserving column position for all other characters. + # + # The replacement is done repeatedly to avoid false positives from + # operators that call operators. + while True: + match = Match(r"^(.*\boperator\b)(\S+)(\s*\(.*)$", line) + if match: + line = match.group(1) + ("_" * len(match.group(2))) + match.group(3) + else: + break -def CheckCommaSpacing(filename, clean_lines, linenum, error): - """Checks for horizontal spacing near commas and semicolons. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - raw = clean_lines.lines_without_raw_strings - line = clean_lines.elided[linenum] - - # You should always have a space after a comma (either as fn arg or operator) - # - # This does not apply when the non-space character following the - # comma is another comma, since the only time when that happens is - # for empty macro arguments. - # - # We run this check in two passes: first pass on elided lines to - # verify that lines contain missing whitespaces, second pass on raw - # lines to confirm that those missing whitespaces are not due to - # elided comments. - if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and - Search(r',[^,\s]', raw[linenum])): - error(filename, linenum, 'whitespace/comma', 3, - 'Missing space after ,') - - # You should always have a space after a semicolon - # except for few corner cases - # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more - # space after ; - if Search(r';[^\s};\\)/]', line): - error(filename, linenum, 'whitespace/semicolon', 3, - 'Missing space after ;') + # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". + # Otherwise not. Note we only check for non-spaces on *both* sides; + # sometimes people put non-spaces on one side when aligning ='s among + # many lines (not that this is behavior that I approve of...) + if Search(r"[\w.]=[\w.]", line) and not Search(r"\b(if|while) ", line): + error(filename, linenum, "whitespace/operators", 4, "Missing spaces around =") + # It's ok not to have spaces around binary operators like + - * /, but if + # there's too little whitespace, we get concerned. It's hard to tell, + # though, so we punt on this one for now. TODO. -def CheckBracesSpacing(filename, clean_lines, linenum, error): - """Checks for horizontal spacing near commas. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Except after an opening paren, or after another opening brace (in case of - # an initializer list, for instance), you should have spaces before your - # braces. And since you should never have braces at the beginning of a line, - # this is an easy test. - match = Match(r'^(.*[^ ({]){', line) - if match: - # Try a bit harder to check for brace initialization. This - # happens in one of the following forms: - # Constructor() : initializer_list_{} { ... } - # Constructor{}.MemberFunction() - # Type variable{}; - # FunctionCall(type{}, ...); - # LastArgument(..., type{}); - # LOG(INFO) << type{} << " ..."; - # map_of_type[{...}] = ...; - # ternary = expr ? new type{} : nullptr; - # OuterTemplate{}> + # You should always have whitespace around binary operators. # - # We check for the character following the closing brace, and - # silence the warning if it's one of those listed above, i.e. - # "{.;,)<>]:". + # Check <= and >= first to avoid false positives with < and >, then + # check non-include lines for spacing around < and >. # - # To account for nested initializer list, we allow any number of - # closing braces up to "{;,)<". We can't simply silence the - # warning on first sight of closing brace, because that would - # cause false negatives for things that are not initializer lists. - # Silence this: But not this: - # Outer{ if (...) { - # Inner{...} if (...){ // Missing space before { - # }; } + # If the operator is followed by a comma, assume it's be used in a + # macro context and don't do any checks. This avoids false + # positives. # - # There is a false negative with this approach if people inserted - # spurious semicolons, e.g. "if (cond){};", but we will catch the - # spurious semicolon with a separate check. - (endline, endlinenum, endpos) = CloseExpression( - clean_lines, linenum, len(match.group(1))) - trailing_text = '' - if endpos > -1: - trailing_text = endline[endpos:] - for offset in range(endlinenum + 1, - min(endlinenum + 3, clean_lines.NumLines() - 1)): - trailing_text += clean_lines.elided[offset] - if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text): - error(filename, linenum, 'whitespace/braces', 5, - 'Missing space before {') - - # Make sure '} else {' has spaces. - if Search(r'}else', line): - error(filename, linenum, 'whitespace/braces', 5, - 'Missing space before else') - - # You shouldn't have a space before a semicolon at the end of the line. - # There's a special case for "for" since the style guide allows space before - # the semicolon there. - if Search(r':\s*;\s*$', line): - error(filename, linenum, 'whitespace/semicolon', 5, - 'Semicolon defining empty statement. Use {} instead.') - elif Search(r'^\s*;\s*$', line): - error(filename, linenum, 'whitespace/semicolon', 5, - 'Line contains only semicolon. If this should be an empty statement, ' - 'use {} instead.') - elif (Search(r'\s+;\s*$', line) and - not Search(r'\bfor\b', line)): - error(filename, linenum, 'whitespace/semicolon', 5, - 'Extra space before last semicolon. If this should be an empty ' - 'statement, use {} instead.') + # Note that && is not included here. Those are checked separately + # in CheckRValueReference + match = Search(r"[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]", line) + if match: + error( + filename, + linenum, + "whitespace/operators", + 3, + "Missing spaces around %s" % match.group(1), + ) + elif not Match(r"#.*include", line): + # Look for < that is not surrounded by spaces. This is only + # triggered if both sides are missing spaces, even though + # technically should should flag if at least one side is missing a + # space. This is done to avoid some false positives with shifts. + match = Match(r"^(.*[^\s<])<[^\s=<,]", line) + if match: + (_, _, end_pos) = CloseExpression(clean_lines, linenum, len(match.group(1))) + if end_pos <= -1: + error( + filename, + linenum, + "whitespace/operators", + 3, + "Missing spaces around <", + ) + + # Look for > that is not surrounded by spaces. Similar to the + # above, we only trigger if both sides are missing spaces to avoid + # false positives with shifts. + match = Match(r"^(.*[^-\s>])>[^\s=>,]", line) + if match: + (_, _, start_pos) = ReverseCloseExpression( + clean_lines, linenum, len(match.group(1)) + ) + if start_pos <= -1: + error( + filename, + linenum, + "whitespace/operators", + 3, + "Missing spaces around >", + ) + + # We allow no-spaces around << when used like this: 10<<20, but + # not otherwise (particularly, not when used as streams) + # + # We also allow operators following an opening parenthesis, since + # those tend to be macros that deal with operators. + match = Search(r"(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<([^\s,=])", line) + if ( + match + and match.group(1) != "(" + and not (match.group(1).isdigit() and match.group(2).isdigit()) + and not (match.group(1) == "operator" and match.group(2) == ";") + ): + error(filename, linenum, "whitespace/operators", 3, "Missing spaces around <<") + + # We allow no-spaces around >> for almost anything. This is because + # C++11 allows ">>" to close nested templates, which accounts for + # most cases when ">>" is not followed by a space. + # + # We still warn on ">>" followed by alpha character, because that is + # likely due to ">>" being used for right shifts, e.g.: + # value >> alpha + # + # When ">>" is used to close templates, the alphanumeric letter that + # follows would be part of an identifier, and there should still be + # a space separating the template type and the identifier. + # type> alpha + match = Search(r">>[a-zA-Z_]", line) + if match: + error(filename, linenum, "whitespace/operators", 3, "Missing spaces around >>") + # There shouldn't be space around unary operators + match = Search(r"(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])", line) + if match: + error( + filename, + linenum, + "whitespace/operators", + 4, + "Extra space for operator %s" % match.group(1), + ) -def IsDecltype(clean_lines, linenum, column): - """Check if the token ending on (linenum, column) is decltype(). - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: the number of the line to check. - column: end column of the token to check. - Returns: - True if this token is decltype() expression, False otherwise. - """ - (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column) - if start_col < 0: - return False - if Search(r'\bdecltype\s*$', text[0:start_col]): - return True - return False +def CheckParenthesisSpacing(filename, clean_lines, linenum, error): + """Checks for horizontal spacing around parentheses. -def IsTemplateParameterList(clean_lines, linenum, column): - """Check if the token ending on (linenum, column) is the end of template<>. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: the number of the line to check. - column: end column of the token to check. - Returns: - True if this token is end of a template parameter list, False otherwise. - """ - (_, startline, startpos) = ReverseCloseExpression( - clean_lines, linenum, column) - if (startpos > -1 and - Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])): - return True - return False + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + # No spaces after an if, while, switch, or for + match = Search(r" (if\(|for\(|while\(|switch\()", line) + if match: + error( + filename, + linenum, + "whitespace/parens", + 5, + "Missing space before ( in %s" % match.group(1), + ) + + # For if/for/while/switch, the left and right parens should be + # consistent about how many spaces are inside the parens, and + # there should either be zero or one spaces inside the parens. + # We don't want: "if ( foo)" or "if ( foo )". + # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. + match = Search( + r"\b(if|for|while|switch)\s*" r"\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$", line + ) + if match: + if len(match.group(2)) != len(match.group(4)): + if not ( + match.group(3) == ";" + and len(match.group(2)) == 1 + len(match.group(4)) + or not match.group(2) + and Search(r"\bfor\s*\(.*; \)", line) + ): + error( + filename, + linenum, + "whitespace/parens", + 5, + "Mismatching spaces inside () in %s" % match.group(1), + ) + if len(match.group(2)) not in [0, 1]: + error( + filename, + linenum, + "whitespace/parens", + 5, + "Should have zero or one spaces inside ( and ) in %s" % match.group(1), + ) -def IsRValueType(clean_lines, nesting_state, linenum, column): - """Check if the token ending on (linenum, column) is a type. - - Assumes that text to the right of the column is "&&" or a function - name. - - Args: - clean_lines: A CleansedLines instance containing the file. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - linenum: the number of the line to check. - column: end column of the token to check. - Returns: - True if this token is a type, False if we are not sure. - """ - prefix = clean_lines.elided[linenum][0:column] - - # Get one word to the left. If we failed to do so, this is most - # likely not a type, since it's unlikely that the type name and "&&" - # would be split across multiple lines. - match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix) - if not match: - return False - # Check text following the token. If it's "&&>" or "&&," or "&&...", it's - # most likely a rvalue reference used inside a template. - suffix = clean_lines.elided[linenum][column:] - if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix): - return True +def CheckCommaSpacing(filename, clean_lines, linenum, error): + """Checks for horizontal spacing near commas and semicolons. - # Check for simple type and end of templates: - # int&& variable - # vector&& variable - # - # Because this function is called recursively, we also need to - # recognize pointer and reference types: - # int* Function() - # int& Function() - if match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool', - 'short', 'int', 'long', 'signed', 'unsigned', - 'float', 'double', 'void', 'auto', '>', '*', '&']: - return True + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + raw = clean_lines.lines_without_raw_strings + line = clean_lines.elided[linenum] - # If we see a close parenthesis, look for decltype on the other side. - # decltype would unambiguously identify a type, anything else is - # probably a parenthesized expression and not a type. - if match.group(2) == ')': - return IsDecltype( - clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1) - - # Check for casts and cv-qualifiers. - # match.group(1) remainder - # -------------- --------- - # const_cast< type&& - # const type&& - # type const&& - if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|' - r'reinterpret_cast\s*<|\w+\s)\s*$', - match.group(1)): - return True + # You should always have a space after a comma (either as fn arg or operator) + # + # This does not apply when the non-space character following the + # comma is another comma, since the only time when that happens is + # for empty macro arguments. + # + # We run this check in two passes: first pass on elided lines to + # verify that lines contain missing whitespaces, second pass on raw + # lines to confirm that those missing whitespaces are not due to + # elided comments. + if Search(r",[^,\s]", ReplaceAll(r"\boperator\s*,\s*\(", "F(", line)) and Search( + r",[^,\s]", raw[linenum] + ): + error(filename, linenum, "whitespace/comma", 3, "Missing space after ,") + + # You should always have a space after a semicolon + # except for few corner cases + # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more + # space after ; + if Search(r";[^\s};\\)/]", line): + error(filename, linenum, "whitespace/semicolon", 3, "Missing space after ;") - # Look for a preceding symbol that might help differentiate the context. - # These are the cases that would be ambiguous: - # match.group(1) remainder - # -------------- --------- - # Call ( expression && - # Declaration ( type&& - # sizeof ( type&& - # if ( expression && - # while ( expression && - # for ( type&& - # for( ; expression && - # statement ; type&& - # block { type&& - # constructor { expression && - start = linenum - line = match.group(1) - match_symbol = None - while start >= 0: - # We want to skip over identifiers and commas to get to a symbol. - # Commas are skipped so that we can find the opening parenthesis - # for function parameter lists. - match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line) - if match_symbol: - break - start -= 1 - line = clean_lines.elided[start] - - if not match_symbol: - # Probably the first statement in the file is an rvalue reference - return True - if match_symbol.group(2) == '}': - # Found closing brace, probably an indicate of this: - # block{} type&& - return True +def CheckBracesSpacing(filename, clean_lines, linenum, error): + """Checks for horizontal spacing near commas. - if match_symbol.group(2) == ';': - # Found semicolon, probably one of these: - # for(; expression && - # statement; type&& - - # Look for the previous 'for(' in the previous lines. - before_text = match_symbol.group(1) - for i in range(start - 1, max(start - 6, 0), -1): - before_text = clean_lines.elided[i] + before_text - if Search(r'for\s*\([^{};]*$', before_text): - # This is the condition inside a for-loop - return False - - # Did not find a for-init-statement before this semicolon, so this - # is probably a new statement and not a condition. - return True + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] - if match_symbol.group(2) == '{': - # Found opening brace, probably one of these: - # block{ type&& = ... ; } - # constructor{ expression && expression } + # Except after an opening paren, or after another opening brace (in case of + # an initializer list, for instance), you should have spaces before your + # braces. And since you should never have braces at the beginning of a line, + # this is an easy test. + match = Match(r"^(.*[^ ({]){", line) + if match: + # Try a bit harder to check for brace initialization. This + # happens in one of the following forms: + # Constructor() : initializer_list_{} { ... } + # Constructor{}.MemberFunction() + # Type variable{}; + # FunctionCall(type{}, ...); + # LastArgument(..., type{}); + # LOG(INFO) << type{} << " ..."; + # map_of_type[{...}] = ...; + # ternary = expr ? new type{} : nullptr; + # OuterTemplate{}> + # + # We check for the character following the closing brace, and + # silence the warning if it's one of those listed above, i.e. + # "{.;,)<>]:". + # + # To account for nested initializer list, we allow any number of + # closing braces up to "{;,)<". We can't simply silence the + # warning on first sight of closing brace, because that would + # cause false negatives for things that are not initializer lists. + # Silence this: But not this: + # Outer{ if (...) { + # Inner{...} if (...){ // Missing space before { + # }; } + # + # There is a false negative with this approach if people inserted + # spurious semicolons, e.g. "if (cond){};", but we will catch the + # spurious semicolon with a separate check. + (endline, endlinenum, endpos) = CloseExpression( + clean_lines, linenum, len(match.group(1)) + ) + trailing_text = "" + if endpos > -1: + trailing_text = endline[endpos:] + for offset in range( + endlinenum + 1, min(endlinenum + 3, clean_lines.NumLines() - 1) + ): + trailing_text += clean_lines.elided[offset] + if not Match(r"^[\s}]*[{.;,)<>\]:]", trailing_text): + error(filename, linenum, "whitespace/braces", 5, "Missing space before {") + + # Make sure '} else {' has spaces. + if Search(r"}else", line): + error(filename, linenum, "whitespace/braces", 5, "Missing space before else") + + # You shouldn't have a space before a semicolon at the end of the line. + # There's a special case for "for" since the style guide allows space before + # the semicolon there. + if Search(r":\s*;\s*$", line): + error( + filename, + linenum, + "whitespace/semicolon", + 5, + "Semicolon defining empty statement. Use {} instead.", + ) + elif Search(r"^\s*;\s*$", line): + error( + filename, + linenum, + "whitespace/semicolon", + 5, + "Line contains only semicolon. If this should be an empty statement, " + "use {} instead.", + ) + elif Search(r"\s+;\s*$", line) and not Search(r"\bfor\b", line): + error( + filename, + linenum, + "whitespace/semicolon", + 5, + "Extra space before last semicolon. If this should be an empty " + "statement, use {} instead.", + ) - # Look for a closing brace or a semicolon. If we see a semicolon - # first, this is probably a rvalue reference. - line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1] - end = start - depth = 1 - while True: - for ch in line: - if ch == ';': - return True - elif ch == '{': - depth += 1 - elif ch == '}': - depth -= 1 - if depth == 0: - return False - end += 1 - if end >= clean_lines.NumLines(): - break - line = clean_lines.elided[end] - # Incomplete program? - return False - if match_symbol.group(2) == '(': - # Opening parenthesis. Need to check what's to the left of the - # parenthesis. Look back one extra line for additional context. - before_text = match_symbol.group(1) - if linenum > 1: - before_text = clean_lines.elided[linenum - 1] + before_text - before_text = match_symbol.group(1) - - # Patterns that are likely to be types: - # [](type&& - # for (type&& - # sizeof(type&& - # operator=(type&& - # - if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text): - return True - - # Patterns that are likely to be expressions: - # if (expression && - # while (expression && - # : initializer(expression && - # , initializer(expression && - # ( FunctionCall(expression && - # + FunctionCall(expression && - # + (expression && - # - # The last '+' represents operators such as '+' and '-'. - if Search(r'(?:\bif|\bwhile|[-+=%^(]*>)?\s*$', - match_symbol.group(1)) - if match_func: - # Check for constructors, which don't have return types. - if Search(r'\b(?:explicit|inline)$', match_func.group(1)): - return True - implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix) - if (implicit_constructor and - implicit_constructor.group(1) == implicit_constructor.group(2)): +def IsDecltype(clean_lines, linenum, column): + """Check if the token ending on (linenum, column) is decltype(). + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: the number of the line to check. + column: end column of the token to check. + Returns: + True if this token is decltype() expression, False otherwise. + """ + (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column) + if start_col < 0: + return False + if Search(r"\bdecltype\s*$", text[0:start_col]): return True - return IsRValueType(clean_lines, nesting_state, linenum, - len(match_func.group(1))) + return False + - # Nothing before the function name. If this is inside a block scope, - # this is probably a function call. - return not (nesting_state.previous_stack_top and - nesting_state.previous_stack_top.IsBlockInfo()) +def IsTemplateParameterList(clean_lines, linenum, column): + """Check if the token ending on (linenum, column) is the end of template<>. - if match_symbol.group(2) == '>': - # Possibly a closing bracket, check that what's on the other side - # looks like the start of a template. - return IsTemplateParameterList( - clean_lines, start, len(match_symbol.group(1))) + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: the number of the line to check. + column: end column of the token to check. + Returns: + True if this token is end of a template parameter list, False otherwise. + """ + (_, startline, startpos) = ReverseCloseExpression(clean_lines, linenum, column) + if startpos > -1 and Search( + r"\btemplate\s*$", clean_lines.elided[startline][0:startpos] + ): + return True + return False - # Some other symbol, usually something like "a=b&&c". This is most - # likely not a type. - return False +def IsRValueType(clean_lines, nesting_state, linenum, column): + """Check if the token ending on (linenum, column) is a type. -def IsDeletedOrDefault(clean_lines, linenum): - """Check if current constructor or operator is deleted or default. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - Returns: - True if this is a deleted or default constructor. - """ - open_paren = clean_lines.elided[linenum].find('(') - if open_paren < 0: - return False - (close_line, _, close_paren) = CloseExpression( - clean_lines, linenum, open_paren) - if close_paren < 0: - return False - return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:]) + Assumes that text to the right of the column is "&&" or a function + name. + Args: + clean_lines: A CleansedLines instance containing the file. + nesting_state: A NestingState instance which maintains information about + the current stack of nested blocks being parsed. + linenum: the number of the line to check. + column: end column of the token to check. + Returns: + True if this token is a type, False if we are not sure. + """ + prefix = clean_lines.elided[linenum][0:column] -def IsRValueAllowed(clean_lines, linenum): - """Check if RValue reference is allowed on a particular line. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - Returns: - True if line is within the region where RValue references are allowed. - """ - # Allow region marked by PUSH/POP macros - for i in range(linenum, 0, -1): - line = clean_lines.elided[i] - if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): - if not line.endswith('PUSH'): + # Get one word to the left. If we failed to do so, this is most + # likely not a type, since it's unlikely that the type name and "&&" + # would be split across multiple lines. + match = Match(r"^(.*)(\b\w+|[>*)&])\s*$", prefix) + if not match: return False - for j in range(linenum, clean_lines.NumLines(), 1): - line = clean_lines.elided[j] - if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line): - return line.endswith('POP') - - # Allow operator= - line = clean_lines.elided[linenum] - if Search(r'\boperator\s*=\s*\(', line): - return IsDeletedOrDefault(clean_lines, linenum) - - # Allow constructors - match = Match(r'\s*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line) - if match and match.group(1) == match.group(2): - return IsDeletedOrDefault(clean_lines, linenum) - if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line): - return IsDeletedOrDefault(clean_lines, linenum) - - if Match(r'\s*[\w<>]+\s*\(', line): - previous_line = 'ReturnType' - if linenum > 0: - previous_line = clean_lines.elided[linenum - 1] - if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line): - return IsDeletedOrDefault(clean_lines, linenum) - return False + # Check text following the token. If it's "&&>" or "&&," or "&&...", it's + # most likely a rvalue reference used inside a template. + suffix = clean_lines.elided[linenum][column:] + if Match(r"&&\s*(?:[>,]|\.\.\.)", suffix): + return True + # Check for simple type and end of templates: + # int&& variable + # vector&& variable + # + # Because this function is called recursively, we also need to + # recognize pointer and reference types: + # int* Function() + # int& Function() + if match.group(2) in [ + "char", + "char16_t", + "char32_t", + "wchar_t", + "bool", + "short", + "int", + "long", + "signed", + "unsigned", + "float", + "double", + "void", + "auto", + ">", + "*", + "&", + ]: + return True -def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error): - """Check for rvalue references. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - # Find lines missing spaces around &&. - # TODO(unknown): currently we don't check for rvalue references - # with spaces surrounding the && to avoid false positives with - # boolean expressions. - line = clean_lines.elided[linenum] - match = Match(r'^(.*\S)&&', line) - if not match: - match = Match(r'(.*)&&\S', line) - if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)): - return - - # Either poorly formed && or an rvalue reference, check the context - # to get a more accurate error message. Mostly we want to determine - # if what's to the left of "&&" is a type or not. - and_pos = len(match.group(1)) - if IsRValueType(clean_lines, nesting_state, linenum, and_pos): - if not IsRValueAllowed(clean_lines, linenum): - error(filename, linenum, 'build/c++11', 3, - 'RValue references are an unapproved C++ feature.') - else: - error(filename, linenum, 'whitespace/operators', 3, - 'Missing spaces around &&') + # If we see a close parenthesis, look for decltype on the other side. + # decltype would unambiguously identify a type, anything else is + # probably a parenthesized expression and not a type. + if match.group(2) == ")": + return IsDecltype( + clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1 + ) + + # Check for casts and cv-qualifiers. + # match.group(1) remainder + # -------------- --------- + # const_cast< type&& + # const type&& + # type const&& + if Search( + r"\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|" + r"reinterpret_cast\s*<|\w+\s)\s*$", + match.group(1), + ): + return True + # Look for a preceding symbol that might help differentiate the context. + # These are the cases that would be ambiguous: + # match.group(1) remainder + # -------------- --------- + # Call ( expression && + # Declaration ( type&& + # sizeof ( type&& + # if ( expression && + # while ( expression && + # for ( type&& + # for( ; expression && + # statement ; type&& + # block { type&& + # constructor { expression && + start = linenum + line = match.group(1) + match_symbol = None + while start >= 0: + # We want to skip over identifiers and commas to get to a symbol. + # Commas are skipped so that we can find the opening parenthesis + # for function parameter lists. + match_symbol = Match(r"^(.*)([^\w\s,])[\w\s,]*$", line) + if match_symbol: + break + start -= 1 + line = clean_lines.elided[start] -def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): - """Checks for additional blank line issues related to sections. - - Currently the only thing checked here is blank line before protected/private. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - class_info: A _ClassInfo objects. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - # Skip checks if the class is small, where small means 25 lines or less. - # 25 lines seems like a good cutoff since that's the usual height of - # terminals, and any class that can't fit in one screen can't really - # be considered "small". - # - # Also skip checks if we are on the first line. This accounts for - # classes that look like - # class Foo { public: ... }; - # - # If we didn't find the end of the class, last_line would be zero, - # and the check will be skipped by the first condition. - if (class_info.last_line - class_info.starting_linenum <= 24 or - linenum <= class_info.starting_linenum): - return - - matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) - if matched: - # Issue warning if the line before public/protected/private was - # not a blank line, but don't do this if the previous line contains - # "class" or "struct". This can happen two ways: - # - We are at the beginning of the class. - # - We are forward-declaring an inner class that is semantically - # private, but needed to be public for implementation reasons. - # Also ignores cases where the previous line ends with a backslash as can be - # common when defining classes in C macros. - prev_line = clean_lines.lines[linenum - 1] - if (not IsBlankLine(prev_line) and - not Search(r'\b(class|struct)\b', prev_line) and - not Search(r'\\$', prev_line)): - # Try a bit harder to find the beginning of the class. This is to - # account for multi-line base-specifier lists, e.g.: - # class Derived - # : public Base { - end_class_head = class_info.starting_linenum - for i in range(class_info.starting_linenum, linenum): - if Search(r'\{\s*$', clean_lines.lines[i]): - end_class_head = i - break - if end_class_head < linenum - 1: - error(filename, linenum, 'whitespace/blank_line', 3, - '"%s:" should be preceded by a blank line' % matched.group(1)) + if not match_symbol: + # Probably the first statement in the file is an rvalue reference + return True + if match_symbol.group(2) == "}": + # Found closing brace, probably an indicate of this: + # block{} type&& + return True -def GetPreviousNonBlankLine(clean_lines, linenum): - """Return the most recent non-blank line and its line number. + if match_symbol.group(2) == ";": + # Found semicolon, probably one of these: + # for(; expression && + # statement; type&& + + # Look for the previous 'for(' in the previous lines. + before_text = match_symbol.group(1) + for i in range(start - 1, max(start - 6, 0), -1): + before_text = clean_lines.elided[i] + before_text + if Search(r"for\s*\([^{};]*$", before_text): + # This is the condition inside a for-loop + return False - Args: - clean_lines: A CleansedLines instance containing the file contents. - linenum: The number of the line to check. + # Did not find a for-init-statement before this semicolon, so this + # is probably a new statement and not a condition. + return True - Returns: - A tuple with two elements. The first element is the contents of the last - non-blank line before the current line, or the empty string if this is the - first non-blank line. The second is the line number of that line, or -1 - if this is the first non-blank line. - """ + if match_symbol.group(2) == "{": + # Found opening brace, probably one of these: + # block{ type&& = ... ; } + # constructor{ expression && expression } + + # Look for a closing brace or a semicolon. If we see a semicolon + # first, this is probably a rvalue reference. + line = clean_lines.elided[start][0 : len(match_symbol.group(1)) + 1] + end = start + depth = 1 + while True: + for ch in line: + if ch == ";": + return True + elif ch == "{": + depth += 1 + elif ch == "}": + depth -= 1 + if depth == 0: + return False + end += 1 + if end >= clean_lines.NumLines(): + break + line = clean_lines.elided[end] + # Incomplete program? + return False - prevlinenum = linenum - 1 - while prevlinenum >= 0: - prevline = clean_lines.elided[prevlinenum] - if not IsBlankLine(prevline): # if not a blank line... - return (prevline, prevlinenum) - prevlinenum -= 1 - return ('', -1) + if match_symbol.group(2) == "(": + # Opening parenthesis. Need to check what's to the left of the + # parenthesis. Look back one extra line for additional context. + before_text = match_symbol.group(1) + if linenum > 1: + before_text = clean_lines.elided[linenum - 1] + before_text + before_text = match_symbol.group(1) + + # Patterns that are likely to be types: + # [](type&& + # for (type&& + # sizeof(type&& + # operator=(type&& + # + if Search(r"(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$", before_text): + return True + + # Patterns that are likely to be expressions: + # if (expression && + # while (expression && + # : initializer(expression && + # , initializer(expression && + # ( FunctionCall(expression && + # + FunctionCall(expression && + # + (expression && + # + # The last '+' represents operators such as '+' and '-'. + if Search(r"(?:\bif|\bwhile|[-+=%^(]*>)?\s*$", match_symbol.group(1) + ) + if match_func: + # Check for constructors, which don't have return types. + if Search(r"\b(?:explicit|inline)$", match_func.group(1)): + return True + implicit_constructor = Match(r"\s*(\w+)\((?:const\s+)?(\w+)", prefix) + if implicit_constructor and implicit_constructor.group( + 1 + ) == implicit_constructor.group(2): + return True + return IsRValueType( + clean_lines, nesting_state, linenum, len(match_func.group(1)) + ) + + # Nothing before the function name. If this is inside a block scope, + # this is probably a function call. + return not ( + nesting_state.previous_stack_top + and nesting_state.previous_stack_top.IsBlockInfo() + ) + + if match_symbol.group(2) == ">": + # Possibly a closing bracket, check that what's on the other side + # looks like the start of a template. + return IsTemplateParameterList(clean_lines, start, len(match_symbol.group(1))) + + # Some other symbol, usually something like "a=b&&c". This is most + # likely not a type. + return False -def CheckBraces(filename, clean_lines, linenum, error): - """Looks for misplaced braces (e.g. at the end of line). - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - - line = clean_lines.elided[linenum] # get rid of comments and strings - - if Match(r'\s*{\s*$', line): - # We allow an open brace to start a line in the case where someone is using - # braces in a block to explicitly create a new scope, which is commonly used - # to control the lifetime of stack-allocated variables. Braces are also - # used for brace initializers inside function calls. We don't detect this - # perfectly: we just don't complain if the last non-whitespace character on - # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the - # previous line starts a preprocessor block. - prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] - if (not Search(r'[,;:}{(]\s*$', prevline) and - not Match(r'\s*#', prevline)): - error(filename, linenum, 'whitespace/braces', 4, - '{ should almost always be at the end of the previous line') - - # An else clause should be on the same line as the preceding closing brace. - if Match(r'\s*else\b\s*(?:if\b|\{|$)', line): - prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] - if Match(r'\s*}\s*$', prevline): - error(filename, linenum, 'whitespace/newline', 4, - 'An else should appear on the same line as the preceding }') - - # If braces come on one side of an else, they should be on both. - # However, we have to worry about "else if" that spans multiple lines! - if Search(r'else if\s*\(', line): # could be multi-line if - brace_on_left = bool(Search(r'}\s*else if\s*\(', line)) - # find the ( after the if - pos = line.find('else if') - pos = line.find('(', pos) - if pos > 0: - (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) - brace_on_right = endline[endpos:].find('{') != -1 - if brace_on_left != brace_on_right: # must be brace after if - error(filename, linenum, 'readability/braces', 5, - 'If an else has a brace on one side, it should have it on both') - elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): - error(filename, linenum, 'readability/braces', 5, - 'If an else has a brace on one side, it should have it on both') - - # Likewise, an else should never have the else clause on the same line - if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): - error(filename, linenum, 'whitespace/newline', 4, - 'Else clause should never be on same line as else (use 2 lines)') - - # In the same way, a do/while should never be on one line - if Match(r'\s*do [^\s{]', line): - error(filename, linenum, 'whitespace/newline', 4, - 'do/while clauses should not be on a single line') - - # Check single-line if/else bodies. The style guide says 'curly braces are not - # required for single-line statements'. We additionally allow multi-line, - # single statements, but we reject anything with more than one semicolon in - # it. This means that the first semicolon after the if should be at the end of - # its line, and the line after that should have an indent level equal to or - # lower than the if. We also check for ambiguous if/else nesting without - # braces. - if_else_match = Search(r'\b(if\s*\(|else\b)', line) - if if_else_match and not Match(r'\s*#', line): - if_indent = GetIndentLevel(line) - endline, endlinenum, endpos = line, linenum, if_else_match.end() - if_match = Search(r'\bif\s*\(', line) - if if_match: - # This could be a multiline if condition, so find the end first. - pos = if_match.end() - 1 - (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos) - # Check for an opening brace, either directly after the if or on the next - # line. If found, this isn't a single-statement conditional. - if (not Match(r'\s*{', endline[endpos:]) - and not (Match(r'\s*$', endline[endpos:]) - and endlinenum < (len(clean_lines.elided) - 1) - and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))): - while (endlinenum < len(clean_lines.elided) - and ';' not in clean_lines.elided[endlinenum][endpos:]): - endlinenum += 1 - endpos = 0 - if endlinenum < len(clean_lines.elided): - endline = clean_lines.elided[endlinenum] - # We allow a mix of whitespace and closing braces (e.g. for one-liner - # methods) and a single \ after the semicolon (for macros) - endpos = endline.find(';') - if not Match(r';[\s}]*(\\?)$', endline[endpos:]): - # Semicolon isn't the last character, there's something trailing. - # Output a warning if the semicolon is not contained inside - # a lambda expression. - if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$', - endline): - error(filename, linenum, 'readability/braces', 4, - 'If/else bodies with multiple statements require braces') - elif endlinenum < len(clean_lines.elided) - 1: - # Make sure the next line is dedented - next_line = clean_lines.elided[endlinenum + 1] - next_indent = GetIndentLevel(next_line) - # With ambiguous nested if statements, this will error out on the - # if that *doesn't* match the else, regardless of whether it's the - # inner one or outer one. - if (if_match and Match(r'\s*else\b', next_line) - and next_indent != if_indent): - error(filename, linenum, 'readability/braces', 4, - 'Else clause should be indented at the same level as if. ' - 'Ambiguous nested if/else chains require braces.') - elif next_indent > if_indent: - error(filename, linenum, 'readability/braces', 4, - 'If/else bodies with multiple statements require braces') + +def IsDeletedOrDefault(clean_lines, linenum): + """Check if current constructor or operator is deleted or default. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + Returns: + True if this is a deleted or default constructor. + """ + open_paren = clean_lines.elided[linenum].find("(") + if open_paren < 0: + return False + (close_line, _, close_paren) = CloseExpression(clean_lines, linenum, open_paren) + if close_paren < 0: + return False + return Match(r"\s*=\s*(?:delete|default)\b", close_line[close_paren:]) + + +def IsRValueAllowed(clean_lines, linenum): + """Check if RValue reference is allowed on a particular line. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + Returns: + True if line is within the region where RValue references are allowed. + """ + # Allow region marked by PUSH/POP macros + for i in range(linenum, 0, -1): + line = clean_lines.elided[i] + if Match(r"GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)", line): + if not line.endswith("PUSH"): + return False + for j in range(linenum, clean_lines.NumLines(), 1): + line = clean_lines.elided[j] + if Match(r"GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)", line): + return line.endswith("POP") + + # Allow operator= + line = clean_lines.elided[linenum] + if Search(r"\boperator\s*=\s*\(", line): + return IsDeletedOrDefault(clean_lines, linenum) + + # Allow constructors + match = Match(r"\s*([\w<>]+)\s*::\s*([\w<>]+)\s*\(", line) + if match and match.group(1) == match.group(2): + return IsDeletedOrDefault(clean_lines, linenum) + if Search(r"\b(?:explicit|inline)\s+[\w<>]+\s*\(", line): + return IsDeletedOrDefault(clean_lines, linenum) + + if Match(r"\s*[\w<>]+\s*\(", line): + previous_line = "ReturnType" + if linenum > 0: + previous_line = clean_lines.elided[linenum - 1] + if Match(r"^\s*$", previous_line) or Search(r"[{}:;]\s*$", previous_line): + return IsDeletedOrDefault(clean_lines, linenum) + + return False + + +def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error): + """Check for rvalue references. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + nesting_state: A NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + # Find lines missing spaces around &&. + # TODO(unknown): currently we don't check for rvalue references + # with spaces surrounding the && to avoid false positives with + # boolean expressions. + line = clean_lines.elided[linenum] + match = Match(r"^(.*\S)&&", line) + if not match: + match = Match(r"(.*)&&\S", line) + if (not match) or "(&&)" in line or Search(r"\boperator\s*$", match.group(1)): + return + + # Either poorly formed && or an rvalue reference, check the context + # to get a more accurate error message. Mostly we want to determine + # if what's to the left of "&&" is a type or not. + and_pos = len(match.group(1)) + if IsRValueType(clean_lines, nesting_state, linenum, and_pos): + if not IsRValueAllowed(clean_lines, linenum): + error( + filename, + linenum, + "build/c++11", + 3, + "RValue references are an unapproved C++ feature.", + ) + else: + error(filename, linenum, "whitespace/operators", 3, "Missing spaces around &&") + + +def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): + """Checks for additional blank line issues related to sections. + + Currently the only thing checked here is blank line before protected/private. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + class_info: A _ClassInfo objects. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + # Skip checks if the class is small, where small means 25 lines or less. + # 25 lines seems like a good cutoff since that's the usual height of + # terminals, and any class that can't fit in one screen can't really + # be considered "small". + # + # Also skip checks if we are on the first line. This accounts for + # classes that look like + # class Foo { public: ... }; + # + # If we didn't find the end of the class, last_line would be zero, + # and the check will be skipped by the first condition. + if ( + class_info.last_line - class_info.starting_linenum <= 24 + or linenum <= class_info.starting_linenum + ): + return + + matched = Match(r"\s*(public|protected|private):", clean_lines.lines[linenum]) + if matched: + # Issue warning if the line before public/protected/private was + # not a blank line, but don't do this if the previous line contains + # "class" or "struct". This can happen two ways: + # - We are at the beginning of the class. + # - We are forward-declaring an inner class that is semantically + # private, but needed to be public for implementation reasons. + # Also ignores cases where the previous line ends with a backslash as can be + # common when defining classes in C macros. + prev_line = clean_lines.lines[linenum - 1] + if ( + not IsBlankLine(prev_line) + and not Search(r"\b(class|struct)\b", prev_line) + and not Search(r"\\$", prev_line) + ): + # Try a bit harder to find the beginning of the class. This is to + # account for multi-line base-specifier lists, e.g.: + # class Derived + # : public Base { + end_class_head = class_info.starting_linenum + for i in range(class_info.starting_linenum, linenum): + if Search(r"\{\s*$", clean_lines.lines[i]): + end_class_head = i + break + if end_class_head < linenum - 1: + error( + filename, + linenum, + "whitespace/blank_line", + 3, + '"%s:" should be preceded by a blank line' % matched.group(1), + ) + + +def GetPreviousNonBlankLine(clean_lines, linenum): + """Return the most recent non-blank line and its line number. + + Args: + clean_lines: A CleansedLines instance containing the file contents. + linenum: The number of the line to check. + + Returns: + A tuple with two elements. The first element is the contents of the last + non-blank line before the current line, or the empty string if this is the + first non-blank line. The second is the line number of that line, or -1 + if this is the first non-blank line. + """ + + prevlinenum = linenum - 1 + while prevlinenum >= 0: + prevline = clean_lines.elided[prevlinenum] + if not IsBlankLine(prevline): # if not a blank line... + return (prevline, prevlinenum) + prevlinenum -= 1 + return ("", -1) + + +def CheckBraces(filename, clean_lines, linenum, error): + """Looks for misplaced braces (e.g. at the end of line). + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[linenum] # get rid of comments and strings + + if Match(r"\s*{\s*$", line): + # We allow an open brace to start a line in the case where someone is using + # braces in a block to explicitly create a new scope, which is commonly used + # to control the lifetime of stack-allocated variables. Braces are also + # used for brace initializers inside function calls. We don't detect this + # perfectly: we just don't complain if the last non-whitespace character on + # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the + # previous line starts a preprocessor block. + prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] + if not Search(r"[,;:}{(]\s*$", prevline) and not Match(r"\s*#", prevline): + error( + filename, + linenum, + "whitespace/braces", + 4, + "{ should almost always be at the end of the previous line", + ) + + # An else clause should be on the same line as the preceding closing brace. + if Match(r"\s*else\b\s*(?:if\b|\{|$)", line): + prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] + if Match(r"\s*}\s*$", prevline): + error( + filename, + linenum, + "whitespace/newline", + 4, + "An else should appear on the same line as the preceding }", + ) + + # If braces come on one side of an else, they should be on both. + # However, we have to worry about "else if" that spans multiple lines! + if Search(r"else if\s*\(", line): # could be multi-line if + brace_on_left = bool(Search(r"}\s*else if\s*\(", line)) + # find the ( after the if + pos = line.find("else if") + pos = line.find("(", pos) + if pos > 0: + (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) + brace_on_right = endline[endpos:].find("{") != -1 + if brace_on_left != brace_on_right: # must be brace after if + error( + filename, + linenum, + "readability/braces", + 5, + "If an else has a brace on one side, it should have it on both", + ) + elif Search(r"}\s*else[^{]*$", line) or Match(r"[^}]*else\s*{", line): + error( + filename, + linenum, + "readability/braces", + 5, + "If an else has a brace on one side, it should have it on both", + ) + + # Likewise, an else should never have the else clause on the same line + if Search(r"\belse [^\s{]", line) and not Search(r"\belse if\b", line): + error( + filename, + linenum, + "whitespace/newline", + 4, + "Else clause should never be on same line as else (use 2 lines)", + ) + + # In the same way, a do/while should never be on one line + if Match(r"\s*do [^\s{]", line): + error( + filename, + linenum, + "whitespace/newline", + 4, + "do/while clauses should not be on a single line", + ) + + # Check single-line if/else bodies. The style guide says 'curly braces are not + # required for single-line statements'. We additionally allow multi-line, + # single statements, but we reject anything with more than one semicolon in + # it. This means that the first semicolon after the if should be at the end of + # its line, and the line after that should have an indent level equal to or + # lower than the if. We also check for ambiguous if/else nesting without + # braces. + if_else_match = Search(r"\b(if\s*\(|else\b)", line) + if if_else_match and not Match(r"\s*#", line): + if_indent = GetIndentLevel(line) + endline, endlinenum, endpos = line, linenum, if_else_match.end() + if_match = Search(r"\bif\s*\(", line) + if if_match: + # This could be a multiline if condition, so find the end first. + pos = if_match.end() - 1 + (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos) + # Check for an opening brace, either directly after the if or on the next + # line. If found, this isn't a single-statement conditional. + if not Match(r"\s*{", endline[endpos:]) and not ( + Match(r"\s*$", endline[endpos:]) + and endlinenum < (len(clean_lines.elided) - 1) + and Match(r"\s*{", clean_lines.elided[endlinenum + 1]) + ): + while ( + endlinenum < len(clean_lines.elided) + and ";" not in clean_lines.elided[endlinenum][endpos:] + ): + endlinenum += 1 + endpos = 0 + if endlinenum < len(clean_lines.elided): + endline = clean_lines.elided[endlinenum] + # We allow a mix of whitespace and closing braces (e.g. for one-liner + # methods) and a single \ after the semicolon (for macros) + endpos = endline.find(";") + if not Match(r";[\s}]*(\\?)$", endline[endpos:]): + # Semicolon isn't the last character, there's something trailing. + # Output a warning if the semicolon is not contained inside + # a lambda expression. + if not Match( + r"^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$", endline + ): + error( + filename, + linenum, + "readability/braces", + 4, + "If/else bodies with multiple statements require braces", + ) + elif endlinenum < len(clean_lines.elided) - 1: + # Make sure the next line is dedented + next_line = clean_lines.elided[endlinenum + 1] + next_indent = GetIndentLevel(next_line) + # With ambiguous nested if statements, this will error out on the + # if that *doesn't* match the else, regardless of whether it's the + # inner one or outer one. + if ( + if_match + and Match(r"\s*else\b", next_line) + and next_indent != if_indent + ): + error( + filename, + linenum, + "readability/braces", + 4, + "Else clause should be indented at the same level as if. " + "Ambiguous nested if/else chains require braces.", + ) + elif next_indent > if_indent: + error( + filename, + linenum, + "readability/braces", + 4, + "If/else bodies with multiple statements require braces", + ) def CheckTrailingSemicolon(filename, clean_lines, linenum, error): - """Looks for redundant trailing semicolon. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - - line = clean_lines.elided[linenum] - - # Block bodies should not be followed by a semicolon. Due to C++11 - # brace initialization, there are more places where semicolons are - # required than not, so we use a whitelist approach to check these - # rather than a blacklist. These are the places where "};" should - # be replaced by just "}": - # 1. Some flavor of block following closing parenthesis: - # for (;;) {}; - # while (...) {}; - # switch (...) {}; - # Function(...) {}; - # if (...) {}; - # if (...) else if (...) {}; - # - # 2. else block: - # if (...) else {}; - # - # 3. const member function: - # Function(...) const {}; - # - # 4. Block following some statement: - # x = 42; - # {}; - # - # 5. Block at the beginning of a function: - # Function(...) { - # {}; - # } - # - # Note that naively checking for the preceding "{" will also match - # braces inside multi-dimensional arrays, but this is fine since - # that expression will not contain semicolons. - # - # 6. Block following another block: - # while (true) {} - # {}; - # - # 7. End of namespaces: - # namespace {}; - # - # These semicolons seems far more common than other kinds of - # redundant semicolons, possibly due to people converting classes - # to namespaces. For now we do not warn for this case. - # - # Try matching case 1 first. - match = Match(r'^(.*\)\s*)\{', line) - if match: - # Matched closing parenthesis (case 1). Check the token before the - # matching opening parenthesis, and don't warn if it looks like a - # macro. This avoids these false positives: - # - macro that defines a base class - # - multi-line macro that defines a base class - # - macro that defines the whole class-head + """Looks for redundant trailing semicolon. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[linenum] + + # Block bodies should not be followed by a semicolon. Due to C++11 + # brace initialization, there are more places where semicolons are + # required than not, so we use a whitelist approach to check these + # rather than a blacklist. These are the places where "};" should + # be replaced by just "}": + # 1. Some flavor of block following closing parenthesis: + # for (;;) {}; + # while (...) {}; + # switch (...) {}; + # Function(...) {}; + # if (...) {}; + # if (...) else if (...) {}; + # + # 2. else block: + # if (...) else {}; + # + # 3. const member function: + # Function(...) const {}; + # + # 4. Block following some statement: + # x = 42; + # {}; + # + # 5. Block at the beginning of a function: + # Function(...) { + # {}; + # } + # + # Note that naively checking for the preceding "{" will also match + # braces inside multi-dimensional arrays, but this is fine since + # that expression will not contain semicolons. + # + # 6. Block following another block: + # while (true) {} + # {}; + # + # 7. End of namespaces: + # namespace {}; + # + # These semicolons seems far more common than other kinds of + # redundant semicolons, possibly due to people converting classes + # to namespaces. For now we do not warn for this case. + # + # Try matching case 1 first. + match = Match(r"^(.*\)\s*)\{", line) + if match: + # Matched closing parenthesis (case 1). Check the token before the + # matching opening parenthesis, and don't warn if it looks like a + # macro. This avoids these false positives: + # - macro that defines a base class + # - multi-line macro that defines a base class + # - macro that defines the whole class-head + # + # But we still issue warnings for macros that we know are safe to + # warn, specifically: + # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P + # - TYPED_TEST + # - INTERFACE_DEF + # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: + # + # We implement a whitelist of safe macros instead of a blacklist of + # unsafe macros, even though the latter appears less frequently in + # google code and would have been easier to implement. This is because + # the downside for getting the whitelist wrong means some extra + # semicolons, while the downside for getting the blacklist wrong + # would result in compile errors. + # + # In addition to macros, we also don't want to warn on compound + # literals and lambdas. + closing_brace_pos = match.group(1).rfind(")") + opening_parenthesis = ReverseCloseExpression( + clean_lines, linenum, closing_brace_pos + ) + if opening_parenthesis[2] > -1: + line_prefix = opening_parenthesis[0][0 : opening_parenthesis[2]] + macro = Search(r"\b([A-Z_]+)\s*$", line_prefix) + func = Match(r"^(.*\])\s*$", line_prefix) + if ( + ( + macro + and macro.group(1) + not in ( + "TEST", + "TEST_F", + "MATCHER", + "MATCHER_P", + "TYPED_TEST", + "EXCLUSIVE_LOCKS_REQUIRED", + "SHARED_LOCKS_REQUIRED", + "LOCKS_EXCLUDED", + "INTERFACE_DEF", + ) + ) + or (func and not Search(r"\boperator\s*\[\s*\]", func.group(1))) + or Search(r"\s+=\s*$", line_prefix) + ): + match = None + if ( + match + and opening_parenthesis[1] > 1 + and Search(r"\]\s*$", clean_lines.elided[opening_parenthesis[1] - 1]) + ): + # Multi-line lambda-expression + match = None + + else: + # Try matching cases 2-3. + match = Match(r"^(.*(?:else|\)\s*const)\s*)\{", line) + if not match: + # Try matching cases 4-6. These are always matched on separate lines. + # + # Note that we can't simply concatenate the previous line to the + # current line and do a single match, otherwise we may output + # duplicate warnings for the blank line case: + # if (cond) { + # // blank line + # } + prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] + if prevline and Search(r"[;{}]\s*$", prevline): + match = Match(r"^(\s*)\{", line) + + # Check matching closing brace + if match: + (endline, endlinenum, endpos) = CloseExpression( + clean_lines, linenum, len(match.group(1)) + ) + if endpos > -1 and Match(r"^\s*;", endline[endpos:]): + # Current {} pair is eligible for semicolon check, and we have found + # the redundant semicolon, output warning here. + # + # Note: because we are scanning forward for opening braces, and + # outputting warnings for the matching closing brace, if there are + # nested blocks with trailing semicolons, we will get the error + # messages in reversed order. + error( + filename, + endlinenum, + "readability/braces", + 4, + "You don't need a ; after a }", + ) + + +def CheckEmptyBlockBody(filename, clean_lines, linenum, error): + """Look for empty loop/conditional body with only a single semicolon. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + + # Search for loop keywords at the beginning of the line. Because only + # whitespaces are allowed before the keywords, this will also ignore most + # do-while-loops, since those lines should start with closing brace. + # + # We also check "if" blocks here, since an empty conditional block + # is likely an error. + line = clean_lines.elided[linenum] + matched = Match(r"\s*(for|while|if)\s*\(", line) + if matched: + # Find the end of the conditional expression + (end_line, end_linenum, end_pos) = CloseExpression( + clean_lines, linenum, line.find("(") + ) + + # Output warning if what follows the condition expression is a semicolon. + # No warning for all other cases, including whitespace or newline, since we + # have a separate check for semicolons preceded by whitespace. + if end_pos >= 0 and Match(r";", end_line[end_pos:]): + if matched.group(1) == "if": + error( + filename, + end_linenum, + "whitespace/empty_conditional_body", + 5, + "Empty conditional bodies should use {}", + ) + else: + error( + filename, + end_linenum, + "whitespace/empty_loop_body", + 5, + "Empty loop bodies should use {} or continue", + ) + + +def FindCheckMacro(line): + """Find a replaceable CHECK-like macro. + + Args: + line: line to search on. + Returns: + (macro name, start position), or (None, -1) if no replaceable + macro is found. + """ + for macro in _CHECK_MACROS: + i = line.find(macro) + if i >= 0: + # Find opening parenthesis. Do a regular expression match here + # to make sure that we are matching the expected CHECK macro, as + # opposed to some other macro that happens to contain the CHECK + # substring. + matched = Match(r"^(.*\b" + macro + r"\s*)\(", line) + if not matched: + continue + return (macro, len(matched.group(1))) + return (None, -1) + + +def CheckCheck(filename, clean_lines, linenum, error): + """Checks the use of CHECK and EXPECT macros. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + + # Decide the set of replacement macros that should be suggested + lines = clean_lines.elided + (check_macro, start_pos) = FindCheckMacro(lines[linenum]) + if not check_macro: + return + + # Find end of the boolean expression by matching parentheses + (last_line, end_line, end_pos) = CloseExpression(clean_lines, linenum, start_pos) + if end_pos < 0: + return + + # If the check macro is followed by something other than a + # semicolon, assume users will log their own custom error messages + # and don't suggest any replacements. + if not Match(r"\s*;", last_line[end_pos:]): + return + + if linenum == end_line: + expression = lines[linenum][start_pos + 1 : end_pos - 1] + else: + expression = lines[linenum][start_pos + 1 :] + for i in range(linenum + 1, end_line): + expression += lines[i] + expression += last_line[0 : end_pos - 1] + + # Parse expression so that we can take parentheses into account. + # This avoids false positives for inputs like "CHECK((a < 4) == b)", + # which is not replaceable by CHECK_LE. + lhs = "" + rhs = "" + operator = None + while expression: + matched = Match( + r"^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||" r"==|!=|>=|>|<=|<|\()(.*)$", + expression, + ) + if matched: + token = matched.group(1) + if token == "(": + # Parenthesized operand + expression = matched.group(2) + (end, _) = FindEndOfExpressionInLine(expression, 0, ["("]) + if end < 0: + return # Unmatched parenthesis + lhs += "(" + expression[0:end] + expression = expression[end:] + elif token in ("&&", "||"): + # Logical and/or operators. This means the expression + # contains more than one term, for example: + # CHECK(42 < a && a < b); + # + # These are not replaceable with CHECK_LE, so bail out early. + return + elif token in ("<<", "<<=", ">>", ">>=", "->*", "->"): + # Non-relational operator + lhs += token + expression = matched.group(2) + else: + # Relational operator + operator = token + rhs = matched.group(2) + break + else: + # Unparenthesized operand. Instead of appending to lhs one character + # at a time, we do another regular expression match to consume several + # characters at once if possible. Trivial benchmark shows that this + # is more efficient when the operands are longer than a single + # character, which is generally the case. + matched = Match(r"^([^-=!<>()&|]+)(.*)$", expression) + if not matched: + matched = Match(r"^(\s*\S)(.*)$", expression) + if not matched: + break + lhs += matched.group(1) + expression = matched.group(2) + + # Only apply checks if we got all parts of the boolean expression + if not (lhs and operator and rhs): + return + + # Check that rhs do not contain logical operators. We already know + # that lhs is fine since the loop above parses out && and ||. + if rhs.find("&&") > -1 or rhs.find("||") > -1: + return + + # At least one of the operands must be a constant literal. This is + # to avoid suggesting replacements for unprintable things like + # CHECK(variable != iterator) + # + # The following pattern matches decimal, hex integers, strings, and + # characters (in that order). + lhs = lhs.strip() + rhs = rhs.strip() + match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' + if Match(match_constant, lhs) or Match(match_constant, rhs): + # Note: since we know both lhs and rhs, we can provide a more + # descriptive error message like: + # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) + # Instead of: + # Consider using CHECK_EQ instead of CHECK(a == b) + # + # We are still keeping the less descriptive message because if lhs + # or rhs gets long, the error message might become unreadable. + error( + filename, + linenum, + "readability/check", + 2, + "Consider using %s instead of %s(a %s b)" + % (_CHECK_REPLACEMENT[check_macro][operator], check_macro, operator), + ) + + +def CheckAltTokens(filename, clean_lines, linenum, error): + """Check alternative keywords being used in boolean expressions. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + + # Avoid preprocessor lines + if Match(r"^\s*#", line): + return + + # Last ditch effort to avoid multi-line comments. This will not help + # if the comment started before the current line or ended after the + # current line, but it catches most of the false positives. At least, + # it provides a way to workaround this warning for people who use + # multi-line comments in preprocessor macros. + # + # TODO(unknown): remove this once cpplint has better support for + # multi-line comments. + if line.find("/*") >= 0 or line.find("*/") >= 0: + return + + for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): + error( + filename, + linenum, + "readability/alt_tokens", + 2, + "Use operator %s instead of %s" + % (_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)), + ) + + +def GetLineWidth(line): + """Determines the width of the line in column positions. + + Args: + line: A string, which may be a Unicode string. + + Returns: + The width of the line in column positions, accounting for Unicode + combining characters and wide characters. + """ + if isinstance(line, str): + width = 0 + for uc in unicodedata.normalize("NFC", line): + if unicodedata.east_asian_width(uc) in ("W", "F"): + width += 2 + elif not unicodedata.combining(uc): + width += 1 + return width + else: + return len(line) + + +def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, error): + """Checks rules from the 'C++ style rules' section of cppguide.html. + + Most of these rules are hard to test (naming, comment style), but we + do what we can. In particular we check for 2-space indents, line lengths, + tab usage, spaces inside code, etc. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + file_extension: The extension (without the dot) of the filename. + nesting_state: A NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + + # Don't use "elided" lines here, otherwise we can't check commented lines. + # Don't want to use "raw" either, because we don't want to check inside C++11 + # raw strings, + raw_lines = clean_lines.lines_without_raw_strings + line = raw_lines[linenum] + + if line.find("\t") != -1: + error(filename, linenum, "whitespace/tab", 1, "Tab found; better to use spaces") + + # One or three blank spaces at the beginning of the line is weird; it's + # hard to reconcile that with 2-space indents. + # NOTE: here are the conditions rob pike used for his tests. Mine aren't + # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces + # if(RLENGTH > 20) complain = 0; + # if(match($0, " +(error|private|public|protected):")) complain = 0; + # if(match(prev, "&& *$")) complain = 0; + # if(match(prev, "\\|\\| *$")) complain = 0; + # if(match(prev, "[\",=><] *$")) complain = 0; + # if(match($0, " <<")) complain = 0; + # if(match(prev, " +for \\(")) complain = 0; + # if(prevodd && match(prevprev, " +for \\(")) complain = 0; + scope_or_label_pattern = r"\s*\w+\s*:\s*\\?$" + classinfo = nesting_state.InnermostClass() + initial_spaces = 0 + cleansed_line = clean_lines.elided[linenum] + while initial_spaces < len(line) and line[initial_spaces] == " ": + initial_spaces += 1 + if line and line[-1].isspace(): + error( + filename, + linenum, + "whitespace/end_of_line", + 4, + "Line ends in whitespace. Consider deleting these extra spaces.", + ) + # There are certain situations we allow one space, notably for + # section labels, and also lines containing multi-line raw strings. + elif ( + (initial_spaces == 1 or initial_spaces == 3) + and not Match(scope_or_label_pattern, cleansed_line) + and not (clean_lines.raw_lines[linenum] != line and Match(r'^\s*""', line)) + ): + error( + filename, + linenum, + "whitespace/indent", + 3, + "Weird number of spaces at line-start. " "Are you using a 2-space indent?", + ) + + # Check if the line is a header guard. + is_header_guard = False + if file_extension == "h": + cppvar = GetHeaderGuardCPPVariable(filename) + if ( + line.startswith("#ifndef %s" % cppvar) + or line.startswith("#define %s" % cppvar) + or line.startswith("#endif // %s" % cppvar) + ): + is_header_guard = True + # #include lines and header guards can be long, since there's no clean way to + # split them. + # + # URLs can be long too. It's possible to split these, but it makes them + # harder to cut&paste. + # + # The "$Id:...$" comment may also get very long without it being the + # developers fault. + if ( + not line.startswith("#include") + and not is_header_guard + and not Match(r"^\s*//.*http(s?)://\S*$", line) + and not Match(r"^// \$Id:.*#[0-9]+ \$$", line) + ): + line_width = GetLineWidth(line) + extended_length = int((_line_length * 1.25)) + if line_width > extended_length: + error( + filename, + linenum, + "whitespace/line_length", + 4, + "Lines should very rarely be longer than %i characters" + % extended_length, + ) + elif line_width > _line_length: + error( + filename, + linenum, + "whitespace/line_length", + 2, + "Lines should be <= %i characters long" % _line_length, + ) + + if ( + cleansed_line.count(";") > 1 + and + # for loops are allowed two ;'s (and may run over two lines). + cleansed_line.find("for") == -1 + and ( + GetPreviousNonBlankLine(clean_lines, linenum)[0].find("for") == -1 + or GetPreviousNonBlankLine(clean_lines, linenum)[0].find(";") != -1 + ) + and + # It's ok to have many commands in a switch case that fits in 1 line + not ( + (cleansed_line.find("case ") != -1 or cleansed_line.find("default:") != -1) + and cleansed_line.find("break;") != -1 + ) + ): + error( + filename, + linenum, + "whitespace/newline", + 0, + "More than one command on the same line", + ) + + # Some more style checks + CheckBraces(filename, clean_lines, linenum, error) + CheckTrailingSemicolon(filename, clean_lines, linenum, error) + CheckEmptyBlockBody(filename, clean_lines, linenum, error) + CheckAccess(filename, clean_lines, linenum, nesting_state, error) + CheckSpacing(filename, clean_lines, linenum, nesting_state, error) + CheckOperatorSpacing(filename, clean_lines, linenum, error) + CheckParenthesisSpacing(filename, clean_lines, linenum, error) + CheckCommaSpacing(filename, clean_lines, linenum, error) + CheckBracesSpacing(filename, clean_lines, linenum, error) + CheckSpacingForFunctionCall(filename, clean_lines, linenum, error) + CheckRValueReference(filename, clean_lines, linenum, nesting_state, error) + CheckCheck(filename, clean_lines, linenum, error) + CheckAltTokens(filename, clean_lines, linenum, error) + classinfo = nesting_state.InnermostClass() + if classinfo: + CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) + + +_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') +# Matches the first component of a filename delimited by -s and _s. That is: +# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' +# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' +# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' +# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' +_RE_FIRST_COMPONENT = re.compile(r"^[^-_.]+") + + +def _DropCommonSuffixes(filename): + """Drops common suffixes like _test.cc or -inl.h from filename. + + For example: + >>> _DropCommonSuffixes('foo/foo-inl.h') + 'foo/foo' + >>> _DropCommonSuffixes('foo/bar/foo.cc') + 'foo/bar/foo' + >>> _DropCommonSuffixes('foo/foo_internal.h') + 'foo/foo' + >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') + 'foo/foo_unusualinternal' + + Args: + filename: The input filename. + + Returns: + The filename with the common suffix removed. + """ + for suffix in ( + "test.cc", + "regtest.cc", + "unittest.cc", + "inl.h", + "impl.h", + "internal.h", + ): + if ( + filename.endswith(suffix) + and len(filename) > len(suffix) + and filename[-len(suffix) - 1] in ("-", "_") + ): + return filename[: -len(suffix) - 1] + return os.path.splitext(filename)[0] + + +def _IsTestFilename(filename): + """Determines if the given filename has a suffix that identifies it as a test. + + Args: + filename: The input filename. + + Returns: + True if 'filename' looks like a test, False otherwise. + """ + if ( + filename.endswith("_test.cc") + or filename.endswith("_unittest.cc") + or filename.endswith("_regtest.cc") + ): + return True + else: + return False + + +def _ClassifyInclude(fileinfo, include, is_system): + """Figures out what kind of header 'include' is. + + Args: + fileinfo: The current file cpplint is running over. A FileInfo instance. + include: The path to a #included file. + is_system: True if the #include used <> rather than "". + + Returns: + One of the _XXX_HEADER constants. + + For example: + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) + _C_SYS_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) + _CPP_SYS_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) + _LIKELY_MY_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), + ... 'bar/foo_other_ext.h', False) + _POSSIBLE_MY_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) + _OTHER_HEADER + """ + # This is a list of all standard c++ header files, except + # those already checked for above. + is_cpp_h = include in _CPP_HEADERS + + if is_system: + if is_cpp_h: + return _CPP_SYS_HEADER + else: + return _C_SYS_HEADER + + # If the target file and the include we're checking share a + # basename when we drop common extensions, and the include + # lives in . , then it's likely to be owned by the target file. + target_dir, target_base = os.path.split( + _DropCommonSuffixes(fileinfo.RepositoryName()) + ) + include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) + if target_base == include_base and ( + include_dir == target_dir + or include_dir == os.path.normpath(target_dir + "/../public") + ): + return _LIKELY_MY_HEADER + + # If the target and include share some initial basename + # component, it's possible the target is implementing the + # include, so it's allowed to be first, but we'll never + # complain if it's not there. + target_first_component = _RE_FIRST_COMPONENT.match(target_base) + include_first_component = _RE_FIRST_COMPONENT.match(include_base) + if ( + target_first_component + and include_first_component + and target_first_component.group(0) == include_first_component.group(0) + ): + return _POSSIBLE_MY_HEADER + + return _OTHER_HEADER + + +def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): + """Check rules that are applicable to #include lines. + + Strings on #include lines are NOT removed from elided line, to make + certain tasks easier. However, to prevent false positives, checks + applicable to #include lines in CheckLanguage must be put here. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + include_state: An _IncludeState instance in which the headers are inserted. + error: The function to call with any errors found. + """ + fileinfo = FileInfo(filename) + line = clean_lines.lines[linenum] + + # "include" should use the new style "foo/bar.h" instead of just "bar.h" + # Only do this check if the included header follows google naming + # conventions. If not, assume that it's a 3rd party API that + # requires special include conventions. # - # But we still issue warnings for macros that we know are safe to - # warn, specifically: - # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P - # - TYPED_TEST - # - INTERFACE_DEF - # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: + # We also make an exception for Lua headers, which follow google + # naming convention but not the include convention. + match = Match(r'#include\s*"([^/]+\.h)"', line) + if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)): + error( + filename, + linenum, + "build/include", + 4, + "Include the directory when naming .h files", + ) + + # we shouldn't include a file more than once. actually, there are a + # handful of instances where doing so is okay, but in general it's + # not. + match = _RE_PATTERN_INCLUDE.search(line) + if match: + include = match.group(2) + is_system = match.group(1) == "<" + duplicate_line = include_state.FindHeader(include) + if duplicate_line >= 0: + error( + filename, + linenum, + "build/include", + 4, + '"%s" already included at %s:%s' % (include, filename, duplicate_line), + ) + elif not _THIRD_PARTY_HEADERS_PATTERN.match(include): + include_state.include_list[-1].append((include, linenum)) + + # We want to ensure that headers appear in the right order: + # 1) for foo.cc, foo.h (preferred location) + # 2) c system files + # 3) cpp system files + # 4) for foo.cc, foo.h (deprecated location) + # 5) other google headers + # + # We classify each include statement as one of those 5 types + # using a number of techniques. The include_state object keeps + # track of the highest type seen, and complains if we see a + # lower type after that. + error_message = include_state.CheckNextIncludeOrder( + _ClassifyInclude(fileinfo, include, is_system) + ) + if error_message: + error( + filename, + linenum, + "build/include_order", + 4, + "%s. Should be: %s.h, c system, c++ system, other." + % (error_message, fileinfo.BaseName()), + ) + canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) + if not include_state.IsInAlphabeticalOrder( + clean_lines, linenum, canonical_include + ): + error( + filename, + linenum, + "build/include_alpha", + 4, + 'Include "%s" not in alphabetical order' % include, + ) + include_state.SetLastHeader(canonical_include) + + # Look for any of the stream classes that are part of standard C++. + match = _RE_PATTERN_INCLUDE.match(line) + if match: + include = match.group(2) + if Match(r"(f|ind|io|i|o|parse|pf|stdio|str|)?stream$", include): + # Many unit tests use cout, so we exempt them. + if not _IsTestFilename(filename): + # Suggest a different header for ostream + if include == "ostream": + error( + filename, + linenum, + "readability/streams", + 3, + 'For logging, include "base/logging.h" instead of .', + ) + else: + error( + filename, + linenum, + "readability/streams", + 3, + "Streams are highly discouraged.", + ) + + +def _GetTextInside(text, start_pattern): + r"""Retrieves all the text between matching open and close parentheses. + + Given a string of lines and a regular expression string, retrieve all the text + following the expression and between opening punctuation symbols like + (, [, or {, and the matching close-punctuation symbol. This properly nested + occurrences of the punctuations, so for the text like + printf(a(), b(c())); + a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. + start_pattern must match string having an open punctuation symbol at the end. + + Args: + text: The lines to extract text. Its comments and strings must be elided. + It can be single line and can span multiple lines. + start_pattern: The regexp string indicating where to start extracting + the text. + Returns: + The extracted text. + None if either the opening string or ending punctuation could not be found. + """ + # TODO(unknown): Audit cpplint.py to see what places could be profitably + # rewritten to use _GetTextInside (and use inferior regexp matching today). + + # Give opening punctuations to get the matching close-punctuations. + matching_punctuation = {"(": ")", "{": "}", "[": "]"} + closing_punctuation = set(matching_punctuation.values()) + + # Find the position to start extracting text. + match = re.search(start_pattern, text, re.M) + if not match: # start_pattern not found in text. + return None + start_position = match.end(0) + + assert start_position > 0, "start_pattern must ends with an opening punctuation." + assert ( + text[start_position - 1] in matching_punctuation + ), "start_pattern must ends with an opening punctuation." + # Stack of closing punctuations we expect to have in text after position. + punctuation_stack = [matching_punctuation[text[start_position - 1]]] + position = start_position + while punctuation_stack and position < len(text): + if text[position] == punctuation_stack[-1]: + punctuation_stack.pop() + elif text[position] in closing_punctuation: + # A closing punctuation without matching opening punctuations. + return None + elif text[position] in matching_punctuation: + punctuation_stack.append(matching_punctuation[text[position]]) + position += 1 + if punctuation_stack: + # Opening punctuations left without matching close-punctuations. + return None + # punctuations match. + return text[start_position : position - 1] + + +# Patterns for matching call-by-reference parameters. +# +# Supports nested templates up to 2 levels deep using this messy pattern: +# < (?: < (?: < [^<>]* +# > +# | [^<>] )* +# > +# | [^<>] )* +# > +_RE_PATTERN_IDENT = r"[_a-zA-Z]\w*" # =~ [[:alpha:]][[:alnum:]]* +_RE_PATTERN_TYPE = ( + r"(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?" + r"(?:\w|" + r"\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|" + r"::)+" +) +# A call-by-reference parameter ends with '& identifier'. +_RE_PATTERN_REF_PARAM = re.compile( + r"(" + _RE_PATTERN_TYPE + r"(?:\s*(?:\bconst\b|[*]))*\s*" + r"&\s*" + _RE_PATTERN_IDENT + r")\s*(?:=[^,()]+)?[,)]" +) +# A call-by-const-reference parameter either ends with 'const& identifier' +# or looks like 'const type& identifier' when 'type' is atomic. +_RE_PATTERN_CONST_REF_PARAM = ( + r"(?:.*\s*\bconst\s*&\s*" + + _RE_PATTERN_IDENT + + r"|const\s+" + + _RE_PATTERN_TYPE + + r"\s*&\s*" + + _RE_PATTERN_IDENT + + r")" +) + + +def CheckLanguage( + filename, clean_lines, linenum, file_extension, include_state, nesting_state, error +): + """Checks rules from the 'C++ language rules' section of cppguide.html. + + Some of these rules are hard to test (function overloading, using + uint32 inappropriately), but we do the best we can. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + file_extension: The extension (without the dot) of the filename. + include_state: An _IncludeState instance in which the headers are inserted. + nesting_state: A NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + # If the line is empty or consists of entirely a comment, no need to + # check it. + line = clean_lines.elided[linenum] + if not line: + return + + match = _RE_PATTERN_INCLUDE.search(line) + if match: + CheckIncludeLine(filename, clean_lines, linenum, include_state, error) + return + + # Reset include state across preprocessor directives. This is meant + # to silence warnings for conditional includes. + match = Match(r"^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b", line) + if match: + include_state.ResetSection(match.group(1)) + + # Make Windows paths like Unix. + fullname = os.path.abspath(filename).replace("\\", "/") + + # Perform other checks now that we are sure that this is not an include line + CheckCasts(filename, clean_lines, linenum, error) + CheckGlobalStatic(filename, clean_lines, linenum, error) + CheckPrintf(filename, clean_lines, linenum, error) + + if file_extension == "h": + # TODO(unknown): check that 1-arg constructors are explicit. + # How to tell it's a constructor? + # (handled in CheckForNonStandardConstructs for now) + # TODO(unknown): check that classes declare or disable copy/assign + # (level 1 error) + pass + + # Check if people are using the verboten C basic types. The only exception + # we regularly allow is "unsigned short port" for port. + if Search(r"\bshort port\b", line): + if not Search(r"\bunsigned short port\b", line): + error( + filename, + linenum, + "runtime/int", + 4, + 'Use "unsigned short" for ports, not "short"', + ) + else: + match = Search(r"\b(short|long(?! +double)|long long)\b", line) + if match: + error( + filename, + linenum, + "runtime/int", + 4, + "Use int16/int64/etc, rather than the C type %s" % match.group(1), + ) + + # Check if some verboten operator overloading is going on + # TODO(unknown): catch out-of-line unary operator&: + # class X {}; + # int operator&(const X& x) { return 42; } // unary operator& + # The trick is it's hard to tell apart from binary operator&: + # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& + if Search(r"\boperator\s*&\s*\(\s*\)", line): + error( + filename, + linenum, + "runtime/operator", + 4, + "Unary operator& is dangerous. Do not use it.", + ) + + # Check for suspicious usage of "if" like + # } if (a == b) { + if Search(r"\}\s*if\s*\(", line): + error( + filename, + linenum, + "readability/braces", + 4, + 'Did you mean "else if"? If not, start a new line for "if".', + ) + + # Check for potential format string bugs like printf(foo). + # We constrain the pattern not to pick things like DocidForPrintf(foo). + # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) + # TODO(unknown): Catch the following case. Need to change the calling + # convention of the whole function to process multiple line to handle it. + # printf( + # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); + printf_args = _GetTextInside(line, r"(?i)\b(string)?printf\s*\(") + if printf_args: + match = Match(r"([\w.\->()]+)$", printf_args) + if match and match.group(1) != "__VA_ARGS__": + function_name = re.search(r"\b((?:string)?printf)\s*\(", line, re.I).group( + 1 + ) + error( + filename, + linenum, + "runtime/printf", + 4, + 'Potential format string bug. Do %s("%%s", %s) instead.' + % (function_name, match.group(1)), + ) + + # Check for potential memset bugs like memset(buf, sizeof(buf), 0). + match = Search(r"memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)", line) + if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): + error( + filename, + linenum, + "runtime/memset", + 4, + 'Did you mean "memset(%s, 0, %s)"?' % (match.group(1), match.group(2)), + ) + + if Search(r"\busing namespace\b", line): + error( + filename, + linenum, + "build/namespaces", + 5, + "Do not use namespace using-directives. " + "Use using-declarations instead.", + ) + + # Detect variable-length arrays. + match = Match(r"\s*(.+::)?(\w+) [a-z]\w*\[(.+)];", line) + if ( + match + and match.group(2) != "return" + and match.group(2) != "delete" + and match.group(3).find("]") == -1 + ): + # Split the size using space and arithmetic operators as delimiters. + # If any of the resulting tokens are not compile time constants then + # report the error. + tokens = re.split(r"\s|\+|\-|\*|\/|<<|>>]", match.group(3)) + is_const = True + skip_next = False + for tok in tokens: + if skip_next: + skip_next = False + continue + + if Search(r"sizeof\(.+\)", tok): + continue + if Search(r"arraysize\(\w+\)", tok): + continue + + tok = tok.lstrip("(") + tok = tok.rstrip(")") + if not tok: + continue + if Match(r"\d+", tok): + continue + if Match(r"0[xX][0-9a-fA-F]+", tok): + continue + if Match(r"k[A-Z0-9]\w*", tok): + continue + if Match(r"(.+::)?k[A-Z0-9]\w*", tok): + continue + if Match(r"(.+::)?[A-Z][A-Z0-9_]*", tok): + continue + # A catch all for tricky sizeof cases, including 'sizeof expression', + # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' + # requires skipping the next token because we split on ' ' and '*'. + if tok.startswith("sizeof"): + skip_next = True + continue + is_const = False + break + if not is_const: + error( + filename, + linenum, + "runtime/arrays", + 1, + "Do not use variable-length arrays. Use an appropriately named " + "('k' followed by CamelCase) compile-time constant for the size.", + ) + + # If DISALLOW_COPY_AND_ASSIGN DISALLOW_IMPLICIT_CONSTRUCTORS is present, + # then it should be the last thing in the class declaration. + match = Match( + (r"\s*" r"(DISALLOW_(COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))" r"\(.*\);$"), line + ) + if match and linenum + 1 < clean_lines.NumLines(): + next_line = clean_lines.elided[linenum + 1] + # We allow some, but not all, declarations of variables to be present + # in the statement that defines the class. The [\w\*,\s]* fragment of + # the regular expression below allows users to declare instances of + # the class or pointers to instances, but not less common types such + # as function pointers or arrays. It's a tradeoff between allowing + # reasonable code and avoiding trying to parse more C++ using regexps. + if not Search(r"^\s*}[\w\*,\s]*;", next_line): + error( + filename, + linenum, + "readability/constructors", + 3, + match.group(1) + " should be the last thing in the class", + ) + + # Check for use of unnamed namespaces in header files. Registration + # macros are typically OK, so we allow use of "namespace {" on lines + # that end with backslashes. + if file_extension == "h" and Search(r"\bnamespace\s*{", line) and line[-1] != "\\": + error( + filename, + linenum, + "build/namespaces", + 4, + "Do not use unnamed namespaces in header files. See " + "http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces" + " for more information.", + ) + + +def CheckGlobalStatic(filename, clean_lines, linenum, error): + """Check for unsafe global or static objects. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + + # Match two lines at a time to support multiline declarations + if linenum + 1 < clean_lines.NumLines() and not Search(r"[;({]", line): + line += clean_lines.elided[linenum + 1].strip() + + # Check for people declaring static/global STL strings at the top level. + # This is dangerous because the C++ language does not guarantee that + # globals with constructors are initialized before the first access. + match = Match(r"((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)", line) + + # Remove false positives: + # - String pointers (as opposed to values). + # string *pointer + # const string *pointer + # string const *pointer + # string *const pointer # - # We implement a whitelist of safe macros instead of a blacklist of - # unsafe macros, even though the latter appears less frequently in - # google code and would have been easier to implement. This is because - # the downside for getting the whitelist wrong means some extra - # semicolons, while the downside for getting the blacklist wrong - # would result in compile errors. + # - Functions and template specializations. + # string Function(... + # string Class::Method(... # - # In addition to macros, we also don't want to warn on compound - # literals and lambdas. - closing_brace_pos = match.group(1).rfind(')') - opening_parenthesis = ReverseCloseExpression( - clean_lines, linenum, closing_brace_pos) - if opening_parenthesis[2] > -1: - line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] - macro = Search(r'\b([A-Z_]+)\s*$', line_prefix) - func = Match(r'^(.*\])\s*$', line_prefix) - if ((macro and - macro.group(1) not in ( - 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', - 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', - 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or - (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or - Search(r'\s+=\s*$', line_prefix)): - match = None - if (match and - opening_parenthesis[1] > 1 and - Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): - # Multi-line lambda-expression - match = None - - else: - # Try matching cases 2-3. - match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) + # - Operators. These are matched separately because operator names + # cross non-word boundaries, and trying to match both operators + # and functions at the same time would decrease accuracy of + # matching identifiers. + # string Class::operator*() + if ( + match + and not Search(r"\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w", line) + and not Search(r"\boperator\W", line) + and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3)) + ): + error( + filename, + linenum, + "runtime/string", + 4, + "For a static/global string constant, use a C style string instead: " + '"%schar %s[]".' % (match.group(1), match.group(2)), + ) + + if Search(r"\b([A-Za-z0-9_]*_)\(\1\)", line): + error( + filename, + linenum, + "runtime/init", + 4, + "You seem to be initializing a member variable with itself.", + ) + + +def CheckPrintf(filename, clean_lines, linenum, error): + """Check for printf related issues. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + + # When snprintf is used, the second argument shouldn't be a literal. + match = Search(r"snprintf\s*\(([^,]*),\s*([0-9]*)\s*,", line) + if match and match.group(2) != "0": + # If 2nd arg is zero, snprintf is used to calculate size. + error( + filename, + linenum, + "runtime/printf", + 3, + "If you can, use sizeof(%s) instead of %s as the 2nd arg " + "to snprintf." % (match.group(1), match.group(2)), + ) + + # Check if some verboten C functions are being used. + if Search(r"\bsprintf\s*\(", line): + error( + filename, + linenum, + "runtime/printf", + 5, + "Never use sprintf. Use snprintf instead.", + ) + match = Search(r"\b(strcpy|strcat)\s*\(", line) + if match: + error( + filename, + linenum, + "runtime/printf", + 4, + "Almost always, snprintf is better than %s" % match.group(1), + ) + + +def IsDerivedFunction(clean_lines, linenum): + """Check if current line contains an inherited function. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + Returns: + True if current line contains a function with "override" + virt-specifier. + """ + # Scan back a few lines for start of current function + for i in range(linenum, max(-1, linenum - 10), -1): + match = Match(r"^([^()]*\w+)\(", clean_lines.elided[i]) + if match: + # Look for "override" after the matching closing parenthesis + line, _, closing_paren = CloseExpression( + clean_lines, i, len(match.group(1)) + ) + return closing_paren >= 0 and Search(r"\boverride\b", line[closing_paren:]) + return False + + +def IsInitializerList(clean_lines, linenum): + """Check if current line is inside constructor initializer list. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + Returns: + True if current line appears to be inside constructor initializer + list, False otherwise. + """ + for i in range(linenum, 1, -1): + line = clean_lines.elided[i] + if i == linenum: + remove_function_body = Match(r"^(.*)\{\s*$", line) + if remove_function_body: + line = remove_function_body.group(1) + + if Search(r"\s:\s*\w+[({]", line): + # A lone colon tend to indicate the start of a constructor + # initializer list. It could also be a ternary operator, which + # also tend to appear in constructor initializer lists as + # opposed to parameter lists. + return True + if Search(r"\}\s*,\s*$", line): + # A closing brace followed by a comma is probably the end of a + # brace-initialized member in constructor initializer list. + return True + if Search(r"[{};]\s*$", line): + # Found one of the following: + # - A closing brace or semicolon, probably the end of the previous + # function. + # - An opening brace, probably the start of current class or namespace. + # + # Current line is probably not inside an initializer list since + # we saw one of those things without seeing the starting colon. + return False + + # Got to the beginning of the file without seeing the start of + # constructor initializer list. + return False + + +def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state, error): + """Check for non-const references. + + Separate from CheckLanguage since it scans backwards from current + line, instead of scanning forward. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + nesting_state: A NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + # Do nothing if there is no '&' on current line. + line = clean_lines.elided[linenum] + if "&" not in line: + return + + # If a function is inherited, current function doesn't have much of + # a choice, so any non-const references should not be blamed on + # derived function. + if IsDerivedFunction(clean_lines, linenum): + return + + # Long type names may be broken across multiple lines, usually in one + # of these forms: + # LongType + # ::LongTypeContinued &identifier + # LongType:: + # LongTypeContinued &identifier + # LongType< + # ...>::LongTypeContinued &identifier + # + # If we detected a type split across two lines, join the previous + # line to current line so that we can match const references + # accordingly. + # + # Note that this only scans back one line, since scanning back + # arbitrary number of lines would be expensive. If you have a type + # that spans more than 2 lines, please use a typedef. + if linenum > 1: + previous = None + if Match(r"\s*::(?:[\w<>]|::)+\s*&\s*\S", line): + # previous_line\n + ::current_line + previous = Search( + r"\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$", + clean_lines.elided[linenum - 1], + ) + elif Match(r"\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S", line): + # previous_line::\n + current_line + previous = Search( + r"\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$", + clean_lines.elided[linenum - 1], + ) + if previous: + line = previous.group(1) + line.lstrip() + else: + # Check for templated parameter that is split across multiple lines + endpos = line.rfind(">") + if endpos > -1: + (_, startline, startpos) = ReverseCloseExpression( + clean_lines, linenum, endpos + ) + if startpos > -1 and startline < linenum: + # Found the matching < on an earlier line, collect all + # pieces up to current line. + line = "" + for i in range(startline, linenum + 1): + line += clean_lines.elided[i].strip() + + # Check for non-const references in function parameters. A single '&' may + # found in the following places: + # inside expression: binary & for bitwise AND + # inside expression: unary & for taking the address of something + # inside declarators: reference parameter + # We will exclude the first two cases by checking that we are not inside a + # function body, including one that was just introduced by a trailing '{'. + # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. + if nesting_state.previous_stack_top and not ( + isinstance(nesting_state.previous_stack_top, _ClassInfo) + or isinstance(nesting_state.previous_stack_top, _NamespaceInfo) + ): + # Not at toplevel, not within a class, and not within a namespace + return + + # Avoid initializer lists. We only need to scan back from the + # current line for something that starts with ':'. + # + # We don't need to check the current line, since the '&' would + # appear inside the second set of parentheses on the current line as + # opposed to the first set. + if linenum > 0: + for i in range(linenum - 1, max(0, linenum - 10), -1): + previous_line = clean_lines.elided[i] + if not Search(r"[),]\s*$", previous_line): + break + if Match(r"^\s*:\s+\S", previous_line): + return + + # Avoid preprocessors + if Search(r"\\\s*$", line): + return + + # Avoid constructor initializer lists + if IsInitializerList(clean_lines, linenum): + return + + # We allow non-const references in a few standard places, like functions + # called "swap()" or iostream operators like "<<" or ">>". Do not check + # those function parameters. + # + # We also accept & in static_assert, which looks like a function but + # it's actually a declaration expression. + whitelisted_functions = ( + r"(?:[sS]wap(?:<\w:+>)?|" + r"operator\s*[<>][<>]|" + r"static_assert|COMPILE_ASSERT" + r")\s*\(" + ) + if Search(whitelisted_functions, line): + return + elif not Search(r"\S+\([^)]*$", line): + # Don't see a whitelisted function on this line. Actually we + # didn't see any function name on this line, so this is likely a + # multi-line parameter list. Try a bit harder to catch this case. + for i in range(2): + if linenum > i and Search( + whitelisted_functions, clean_lines.elided[linenum - i - 1] + ): + return + + decls = ReplaceAll(r"{[^}]*}", " ", line) # exclude function body + for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): + if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter): + error( + filename, + linenum, + "runtime/references", + 2, + "Is this a non-const reference? " + "If so, make const or use a pointer: " + + ReplaceAll(" *<", "<", parameter), + ) + + +def CheckCasts(filename, clean_lines, linenum, error): + """Various cast related checks. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + + # Check to see if they're using an conversion function cast. + # I just try to capture the most common basic types, though there are more. + # Parameterless conversion functions, such as bool(), are allowed as they are + # probably a member operator declaration or default constructor. + match = Search( + r"(\bnew\s+|\S<\s*(?:const\s+)?)?\b" + r"(int|float|double|bool|char|int32|uint32|int64|uint64)" + r"(\([^)].*)", + line, + ) + expecting_function = ExpectingFunctionArgs(clean_lines, linenum) + if match and not expecting_function: + matched_type = match.group(2) + + # matched_new_or_template is used to silence two false positives: + # - New operators + # - Template arguments with function types + # + # For template arguments, we match on types immediately following + # an opening bracket without any spaces. This is a fast way to + # silence the common case where the function type is the first + # template argument. False negative with less-than comparison is + # avoided because those operators are usually followed by a space. + # + # function // bracket + no space = false positive + # value < double(42) // bracket + space = true positive + matched_new_or_template = match.group(1) + + # Avoid arrays by looking for brackets that come after the closing + # parenthesis. + if Match(r"\([^()]+\)\s*\[", match.group(3)): + return + + # Other things to ignore: + # - Function pointers + # - Casts to pointer types + # - Placement new + # - Alias declarations + matched_funcptr = match.group(3) + if ( + matched_new_or_template is None + and not ( + matched_funcptr + and ( + Match(r"\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(", matched_funcptr) + or matched_funcptr.startswith("(*)") + ) + ) + and not Match(r"\s*using\s+\S+\s*=\s*" + matched_type, line) + and not Search(r"new\(\S+\)\s*" + matched_type, line) + ): + error( + filename, + linenum, + "readability/casting", + 4, + "Using deprecated casting style. " + "Use static_cast<%s>(...) instead" % matched_type, + ) + + if not expecting_function: + CheckCStyleCast( + filename, + clean_lines, + linenum, + "static_cast", + r"\((int|float|double|bool|char|u?int(16|32|64))\)", + error, + ) + + # This doesn't catch all cases. Consider (const char * const)"hello". + # + # (char *) "foo" should always be a const_cast (reinterpret_cast won't + # compile). + if CheckCStyleCast( + filename, clean_lines, linenum, "const_cast", r'\((char\s?\*+\s?)\)\s*"', error + ): + pass + else: + # Check pointer casts for other than string constants + CheckCStyleCast( + filename, + clean_lines, + linenum, + "reinterpret_cast", + r"\((\w+\s?\*+\s?)\)", + error, + ) + + # In addition, we look for people taking the address of a cast. This + # is dangerous -- casts can assign to temporaries, so the pointer doesn't + # point where you think. + # + # Some non-identifier character is required before the '&' for the + # expression to be recognized as a cast. These are casts: + # expression = &static_cast(temporary()); + # function(&(int*)(temporary())); + # + # This is not a cast: + # reference_type&(int* function_param); + match = Search( + r"(?:[^\w]&\(([^)]+)\)[\w(])|" + r"(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)", + line, + ) + if match and match.group(1) != "*": + # Try a better error message when the & is bound to something + # dereferenced by the casted pointer, as opposed to the casted + # pointer itself. + parenthesis_error = False + match = Match(r"^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<", line) + if match: + _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1))) + if x1 >= 0 and clean_lines.elided[y1][x1] == "(": + _, y2, x2 = CloseExpression(clean_lines, y1, x1) + if x2 >= 0: + extended_line = clean_lines.elided[y2][x2:] + if y2 < clean_lines.NumLines() - 1: + extended_line += clean_lines.elided[y2 + 1] + if Match(r"\s*(?:->|\[)", extended_line): + parenthesis_error = True + + if parenthesis_error: + error( + filename, + linenum, + "readability/casting", + 4, + ( + "Are you taking an address of something dereferenced " + "from a cast? Wrapping the dereferenced expression in " + "parentheses will make the binding more obvious" + ), + ) + else: + error( + filename, + linenum, + "runtime/casting", + 4, + ( + "Are you taking an address of a cast? " + "This is dangerous: could be a temp var. " + "Take the address before doing the cast, rather than after" + ), + ) + + +def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): + """Checks for a C-style cast by looking for the pattern. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + cast_type: The string for the C++ cast to recommend. This is either + reinterpret_cast, static_cast, or const_cast, depending. + pattern: The regular expression used to find C-style casts. + error: The function to call with any errors found. + + Returns: + True if an error was emitted. + False otherwise. + """ + line = clean_lines.elided[linenum] + match = Search(pattern, line) if not match: - # Try matching cases 4-6. These are always matched on separate lines. - # - # Note that we can't simply concatenate the previous line to the - # current line and do a single match, otherwise we may output - # duplicate warnings for the blank line case: - # if (cond) { - # // blank line - # } - prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] - if prevline and Search(r'[;{}]\s*$', prevline): - match = Match(r'^(\s*)\{', line) - - # Check matching closing brace - if match: - (endline, endlinenum, endpos) = CloseExpression( - clean_lines, linenum, len(match.group(1))) - if endpos > -1 and Match(r'^\s*;', endline[endpos:]): - # Current {} pair is eligible for semicolon check, and we have found - # the redundant semicolon, output warning here. - # - # Note: because we are scanning forward for opening braces, and - # outputting warnings for the matching closing brace, if there are - # nested blocks with trailing semicolons, we will get the error - # messages in reversed order. - error(filename, endlinenum, 'readability/braces', 4, - "You don't need a ; after a }") + return False + + # Exclude lines with keywords that tend to look like casts + context = line[0 : match.start(1) - 1] + if Match(r".*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$", context): + return False + + # Try expanding current context to see if we one level of + # parentheses inside a macro. + if linenum > 0: + for i in range(linenum - 1, max(0, linenum - 5), -1): + context = clean_lines.elided[i] + context + if Match(r".*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$", context): + return False + + # operator++(int) and operator--(int) + if context.endswith(" operator++") or context.endswith(" operator--"): + return False + + # A single unnamed argument for a function tends to look like old + # style cast. If we see those, don't issue warnings for deprecated + # casts, instead issue warnings for unnamed arguments where + # appropriate. + # + # These are things that we want warnings for, since the style guide + # explicitly require all parameters to be named: + # Function(int); + # Function(int) { + # ConstMember(int) const; + # ConstMember(int) const { + # ExceptionMember(int) throw (...); + # ExceptionMember(int) throw (...) { + # PureVirtual(int) = 0; + # + # These are functions of some sort, where the compiler would be fine + # if they had named parameters, but people often omit those + # identifiers to reduce clutter: + # (FunctionPointer)(int); + # (FunctionPointer)(int) = value; + # Function((function_pointer_arg)(int)) + # Function((function_pointer_arg)(int), int param) + # ; + # <(FunctionPointerTemplateArgument)(int)>; + remainder = line[match.end(0) :] + if Match(r"^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),])", remainder): + # Looks like an unnamed parameter. + + # Don't warn on any kind of template arguments. + if Match(r"^\s*>", remainder): + return False + + # Don't warn on assignments to function pointers, but keep warnings for + # unnamed parameters to pure virtual functions. Note that this pattern + # will also pass on assignments of "0" to function pointers, but the + # preferred values for those would be "nullptr" or "NULL". + matched_zero = Match(r"^\s=\s*(\S+)\s*;", remainder) + if matched_zero and matched_zero.group(1) != "0": + return False + + # Don't warn on function pointer declarations. For this we need + # to check what came before the "(type)" string. + if Match(r".*\)\s*$", line[0 : match.start(0)]): + return False + + # Don't warn if the parameter is named with block comments, e.g.: + # Function(int /*unused_param*/); + raw_line = clean_lines.raw_lines[linenum] + if "/*" in raw_line: + return False + + # Passed all filters, issue warning here. + error( + filename, + linenum, + "readability/function", + 3, + "All parameters should be named in a function", + ) + return True + + # At this point, all that should be left is actual casts. + error( + filename, + linenum, + "readability/casting", + 4, + "Using C-style cast. Use %s<%s>(...) instead" % (cast_type, match.group(1)), + ) + + return True -def CheckEmptyBlockBody(filename, clean_lines, linenum, error): - """Look for empty loop/conditional body with only a single semicolon. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - - # Search for loop keywords at the beginning of the line. Because only - # whitespaces are allowed before the keywords, this will also ignore most - # do-while-loops, since those lines should start with closing brace. - # - # We also check "if" blocks here, since an empty conditional block - # is likely an error. - line = clean_lines.elided[linenum] - matched = Match(r'\s*(for|while|if)\s*\(', line) - if matched: - # Find the end of the conditional expression - (end_line, end_linenum, end_pos) = CloseExpression( - clean_lines, linenum, line.find('(')) - - # Output warning if what follows the condition expression is a semicolon. - # No warning for all other cases, including whitespace or newline, since we - # have a separate check for semicolons preceded by whitespace. - if end_pos >= 0 and Match(r';', end_line[end_pos:]): - if matched.group(1) == 'if': - error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, - 'Empty conditional bodies should use {}') - else: - error(filename, end_linenum, 'whitespace/empty_loop_body', 5, - 'Empty loop bodies should use {} or continue') +def ExpectingFunctionArgs(clean_lines, linenum): + """Checks whether where function type arguments are expected. + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. -def FindCheckMacro(line): - """Find a replaceable CHECK-like macro. - - Args: - line: line to search on. - Returns: - (macro name, start position), or (None, -1) if no replaceable - macro is found. - """ - for macro in _CHECK_MACROS: - i = line.find(macro) - if i >= 0: - # Find opening parenthesis. Do a regular expression match here - # to make sure that we are matching the expected CHECK macro, as - # opposed to some other macro that happens to contain the CHECK - # substring. - matched = Match(r'^(.*\b' + macro + r'\s*)\(', line) - if not matched: - continue - return (macro, len(matched.group(1))) - return (None, -1) + Returns: + True if the line at 'linenum' is inside something that expects arguments + of function types. + """ + line = clean_lines.elided[linenum] + return Match(r"^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(", line) or ( + linenum >= 2 + and ( + Match( + r"^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$", + clean_lines.elided[linenum - 1], + ) + or Match( + r"^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$", + clean_lines.elided[linenum - 2], + ) + or Search(r"\bstd::m?function\s*\<\s*$", clean_lines.elided[linenum - 1]) + ) + ) -def CheckCheck(filename, clean_lines, linenum, error): - """Checks the use of CHECK and EXPECT macros. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - - # Decide the set of replacement macros that should be suggested - lines = clean_lines.elided - (check_macro, start_pos) = FindCheckMacro(lines[linenum]) - if not check_macro: - return - - # Find end of the boolean expression by matching parentheses - (last_line, end_line, end_pos) = CloseExpression( - clean_lines, linenum, start_pos) - if end_pos < 0: - return - - # If the check macro is followed by something other than a - # semicolon, assume users will log their own custom error messages - # and don't suggest any replacements. - if not Match(r'\s*;', last_line[end_pos:]): - return - - if linenum == end_line: - expression = lines[linenum][start_pos + 1:end_pos - 1] - else: - expression = lines[linenum][start_pos + 1:] - for i in range(linenum + 1, end_line): - expression += lines[i] - expression += last_line[0:end_pos - 1] - - # Parse expression so that we can take parentheses into account. - # This avoids false positives for inputs like "CHECK((a < 4) == b)", - # which is not replaceable by CHECK_LE. - lhs = '' - rhs = '' - operator = None - while expression: - matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' - r'==|!=|>=|>|<=|<|\()(.*)$', expression) - if matched: - token = matched.group(1) - if token == '(': - # Parenthesized operand - expression = matched.group(2) - (end, _) = FindEndOfExpressionInLine(expression, 0, ['(']) - if end < 0: - return # Unmatched parenthesis - lhs += '(' + expression[0:end] - expression = expression[end:] - elif token in ('&&', '||'): - # Logical and/or operators. This means the expression - # contains more than one term, for example: - # CHECK(42 < a && a < b); - # - # These are not replaceable with CHECK_LE, so bail out early. - return - elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): - # Non-relational operator - lhs += token - expression = matched.group(2) - else: - # Relational operator - operator = token - rhs = matched.group(2) - break - else: - # Unparenthesized operand. Instead of appending to lhs one character - # at a time, we do another regular expression match to consume several - # characters at once if possible. Trivial benchmark shows that this - # is more efficient when the operands are longer than a single - # character, which is generally the case. - matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) - if not matched: - matched = Match(r'^(\s*\S)(.*)$', expression) - if not matched: - break - lhs += matched.group(1) - expression = matched.group(2) - - # Only apply checks if we got all parts of the boolean expression - if not (lhs and operator and rhs): - return - - # Check that rhs do not contain logical operators. We already know - # that lhs is fine since the loop above parses out && and ||. - if rhs.find('&&') > -1 or rhs.find('||') > -1: - return - - # At least one of the operands must be a constant literal. This is - # to avoid suggesting replacements for unprintable things like - # CHECK(variable != iterator) - # - # The following pattern matches decimal, hex integers, strings, and - # characters (in that order). - lhs = lhs.strip() - rhs = rhs.strip() - match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' - if Match(match_constant, lhs) or Match(match_constant, rhs): - # Note: since we know both lhs and rhs, we can provide a more - # descriptive error message like: - # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) - # Instead of: - # Consider using CHECK_EQ instead of CHECK(a == b) - # - # We are still keeping the less descriptive message because if lhs - # or rhs gets long, the error message might become unreadable. - error(filename, linenum, 'readability/check', 2, - 'Consider using %s instead of %s(a %s b)' % ( - _CHECK_REPLACEMENT[check_macro][operator], - check_macro, operator)) +_HEADERS_CONTAINING_TEMPLATES = ( + ("", ("deque",)), + ( + "", + ( + "unary_function", + "binary_function", + "plus", + "minus", + "multiplies", + "divides", + "modulus", + "negate", + "equal_to", + "not_equal_to", + "greater", + "less", + "greater_equal", + "less_equal", + "logical_and", + "logical_or", + "logical_not", + "unary_negate", + "not1", + "binary_negate", + "not2", + "bind1st", + "bind2nd", + "pointer_to_unary_function", + "pointer_to_binary_function", + "ptr_fun", + "mem_fun_t", + "mem_fun", + "mem_fun1_t", + "mem_fun1_ref_t", + "mem_fun_ref_t", + "const_mem_fun_t", + "const_mem_fun1_t", + "const_mem_fun_ref_t", + "const_mem_fun1_ref_t", + "mem_fun_ref", + ), + ), + ("", ("numeric_limits",)), + ("", ("list",)), + ( + "", + ( + "map", + "multimap", + ), + ), + ("", ("allocator",)), + ( + "", + ( + "queue", + "priority_queue", + ), + ), + ( + "", + ( + "set", + "multiset", + ), + ), + ("", ("stack",)), + ( + "", + ( + "char_traits", + "basic_string", + ), + ), + ("", ("pair",)), + ("", ("vector",)), + # gcc extensions. + # Note: std::hash is their hash, ::hash is our hash + ( + "", + ( + "hash_map", + "hash_multimap", + ), + ), + ( + "", + ( + "hash_set", + "hash_multiset", + ), + ), + ("", ("slist",)), +) + +_RE_PATTERN_STRING = re.compile(r"\bstring\b") +_re_pattern_algorithm_header = [] +for _template in ("copy", "max", "min", "min_element", "sort", "swap", "transform"): + # Match max(..., ...), max(..., ...), but not foo->max, foo.max or + # type::max(). + _re_pattern_algorithm_header.append( + ( + re.compile(r"[^>.]\b" + _template + r"(<.*?>)?\([^\)]"), + _template, + "", + ) + ) -def CheckAltTokens(filename, clean_lines, linenum, error): - """Check alternative keywords being used in boolean expressions. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Avoid preprocessor lines - if Match(r'^\s*#', line): - return - - # Last ditch effort to avoid multi-line comments. This will not help - # if the comment started before the current line or ended after the - # current line, but it catches most of the false positives. At least, - # it provides a way to workaround this warning for people who use - # multi-line comments in preprocessor macros. - # - # TODO(unknown): remove this once cpplint has better support for - # multi-line comments. - if line.find('/*') >= 0 or line.find('*/') >= 0: - return - - for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): - error(filename, linenum, 'readability/alt_tokens', 2, - 'Use operator %s instead of %s' % ( - _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) +_re_pattern_templates = [] +for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: + for _template in _templates: + _re_pattern_templates.append( + (re.compile(r"(\<|\b)" + _template + r"\s*\<"), _template + "<>", _header) + ) -def GetLineWidth(line): - """Determines the width of the line in column positions. - - Args: - line: A string, which may be a Unicode string. - - Returns: - The width of the line in column positions, accounting for Unicode - combining characters and wide characters. - """ - if isinstance(line, str): - width = 0 - for uc in unicodedata.normalize('NFC', line): - if unicodedata.east_asian_width(uc) in ('W', 'F'): - width += 2 - elif not unicodedata.combining(uc): - width += 1 - return width - else: - return len(line) - - -def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, - error): - """Checks rules from the 'C++ style rules' section of cppguide.html. - - Most of these rules are hard to test (naming, comment style), but we - do what we can. In particular we check for 2-space indents, line lengths, - tab usage, spaces inside code, etc. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - file_extension: The extension (without the dot) of the filename. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - - # Don't use "elided" lines here, otherwise we can't check commented lines. - # Don't want to use "raw" either, because we don't want to check inside C++11 - # raw strings, - raw_lines = clean_lines.lines_without_raw_strings - line = raw_lines[linenum] - - if line.find('\t') != -1: - error(filename, linenum, 'whitespace/tab', 1, - 'Tab found; better to use spaces') - - # One or three blank spaces at the beginning of the line is weird; it's - # hard to reconcile that with 2-space indents. - # NOTE: here are the conditions rob pike used for his tests. Mine aren't - # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces - # if(RLENGTH > 20) complain = 0; - # if(match($0, " +(error|private|public|protected):")) complain = 0; - # if(match(prev, "&& *$")) complain = 0; - # if(match(prev, "\\|\\| *$")) complain = 0; - # if(match(prev, "[\",=><] *$")) complain = 0; - # if(match($0, " <<")) complain = 0; - # if(match(prev, " +for \\(")) complain = 0; - # if(prevodd && match(prevprev, " +for \\(")) complain = 0; - scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$' - classinfo = nesting_state.InnermostClass() - initial_spaces = 0 - cleansed_line = clean_lines.elided[linenum] - while initial_spaces < len(line) and line[initial_spaces] == ' ': - initial_spaces += 1 - if line and line[-1].isspace(): - error(filename, linenum, 'whitespace/end_of_line', 4, - 'Line ends in whitespace. Consider deleting these extra spaces.') - # There are certain situations we allow one space, notably for - # section labels, and also lines containing multi-line raw strings. - elif ((initial_spaces == 1 or initial_spaces == 3) and - not Match(scope_or_label_pattern, cleansed_line) and - not (clean_lines.raw_lines[linenum] != line and - Match(r'^\s*""', line))): - error(filename, linenum, 'whitespace/indent', 3, - 'Weird number of spaces at line-start. ' - 'Are you using a 2-space indent?') - - # Check if the line is a header guard. - is_header_guard = False - if file_extension == 'h': - cppvar = GetHeaderGuardCPPVariable(filename) - if (line.startswith('#ifndef %s' % cppvar) or - line.startswith('#define %s' % cppvar) or - line.startswith('#endif // %s' % cppvar)): - is_header_guard = True - # #include lines and header guards can be long, since there's no clean way to - # split them. - # - # URLs can be long too. It's possible to split these, but it makes them - # harder to cut&paste. - # - # The "$Id:...$" comment may also get very long without it being the - # developers fault. - if (not line.startswith('#include') and not is_header_guard and - not Match(r'^\s*//.*http(s?)://\S*$', line) and - not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): - line_width = GetLineWidth(line) - extended_length = int((_line_length * 1.25)) - if line_width > extended_length: - error(filename, linenum, 'whitespace/line_length', 4, - 'Lines should very rarely be longer than %i characters' % - extended_length) - elif line_width > _line_length: - error(filename, linenum, 'whitespace/line_length', 2, - 'Lines should be <= %i characters long' % _line_length) - - if (cleansed_line.count(';') > 1 and - # for loops are allowed two ;'s (and may run over two lines). - cleansed_line.find('for') == -1 and - (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or - GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and - # It's ok to have many commands in a switch case that fits in 1 line - not ((cleansed_line.find('case ') != -1 or - cleansed_line.find('default:') != -1) and - cleansed_line.find('break;') != -1)): - error(filename, linenum, 'whitespace/newline', 0, - 'More than one command on the same line') - - # Some more style checks - CheckBraces(filename, clean_lines, linenum, error) - CheckTrailingSemicolon(filename, clean_lines, linenum, error) - CheckEmptyBlockBody(filename, clean_lines, linenum, error) - CheckAccess(filename, clean_lines, linenum, nesting_state, error) - CheckSpacing(filename, clean_lines, linenum, nesting_state, error) - CheckOperatorSpacing(filename, clean_lines, linenum, error) - CheckParenthesisSpacing(filename, clean_lines, linenum, error) - CheckCommaSpacing(filename, clean_lines, linenum, error) - CheckBracesSpacing(filename, clean_lines, linenum, error) - CheckSpacingForFunctionCall(filename, clean_lines, linenum, error) - CheckRValueReference(filename, clean_lines, linenum, nesting_state, error) - CheckCheck(filename, clean_lines, linenum, error) - CheckAltTokens(filename, clean_lines, linenum, error) - classinfo = nesting_state.InnermostClass() - if classinfo: - CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) +def FilesBelongToSameModule(filename_cc, filename_h): + """Check if these two filenames belong to the same module. + The concept of a 'module' here is a as follows: + foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the + same 'module' if they are in the same directory. + some/path/public/xyzzy and some/path/internal/xyzzy are also considered + to belong to the same module here. -_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') -# Matches the first component of a filename delimited by -s and _s. That is: -# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' -# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' -# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' -# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' -_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') + If the filename_cc contains a longer path than the filename_h, for example, + '/absolute/path/to/base/sysinfo.cc', and this file would include + 'base/sysinfo.h', this function also produces the prefix needed to open the + header. This is used by the caller of this function to more robustly open the + header file. We don't have access to the real include paths in this context, + so we need this guesswork here. + Known bugs: tools/base/bar.cc and base/bar.h belong to the same module + according to this implementation. Because of this, this function gives + some false positives. This should be sufficiently rare in practice. -def _DropCommonSuffixes(filename): - """Drops common suffixes like _test.cc or -inl.h from filename. - - For example: - >>> _DropCommonSuffixes('foo/foo-inl.h') - 'foo/foo' - >>> _DropCommonSuffixes('foo/bar/foo.cc') - 'foo/bar/foo' - >>> _DropCommonSuffixes('foo/foo_internal.h') - 'foo/foo' - >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') - 'foo/foo_unusualinternal' - - Args: - filename: The input filename. - - Returns: - The filename with the common suffix removed. - """ - for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', - 'inl.h', 'impl.h', 'internal.h'): - if (filename.endswith(suffix) and len(filename) > len(suffix) and - filename[-len(suffix) - 1] in ('-', '_')): - return filename[:-len(suffix) - 1] - return os.path.splitext(filename)[0] + Args: + filename_cc: is the path for the .cc file + filename_h: is the path for the header path + Returns: + Tuple with a bool and a string: + bool: True if filename_cc and filename_h belong to the same module. + string: the additional prefix needed to open the header file. + """ -def _IsTestFilename(filename): - """Determines if the given filename has a suffix that identifies it as a test. + if not filename_cc.endswith(".cc"): + return (False, "") + filename_cc = filename_cc[: -len(".cc")] + if filename_cc.endswith("_unittest"): + filename_cc = filename_cc[: -len("_unittest")] + elif filename_cc.endswith("_test"): + filename_cc = filename_cc[: -len("_test")] + filename_cc = filename_cc.replace("/public/", "/") + filename_cc = filename_cc.replace("/internal/", "/") + + if not filename_h.endswith(".h"): + return (False, "") + filename_h = filename_h[: -len(".h")] + if filename_h.endswith("-inl"): + filename_h = filename_h[: -len("-inl")] + filename_h = filename_h.replace("/public/", "/") + filename_h = filename_h.replace("/internal/", "/") + + files_belong_to_same_module = filename_cc.endswith(filename_h) + common_path = "" + if files_belong_to_same_module: + common_path = filename_cc[: -len(filename_h)] + return files_belong_to_same_module, common_path - Args: - filename: The input filename. - Returns: - True if 'filename' looks like a test, False otherwise. - """ - if (filename.endswith('_test.cc') or - filename.endswith('_unittest.cc') or - filename.endswith('_regtest.cc')): - return True - else: - return False +def UpdateIncludeState(filename, include_dict, io=codecs): + """Fill up the include_dict with new includes found from the file. + Args: + filename: the name of the header to read. + include_dict: a dictionary in which the headers are inserted. + io: The io factory to use to read the file. Provided for testability. -def _ClassifyInclude(fileinfo, include, is_system): - """Figures out what kind of header 'include' is. - - Args: - fileinfo: The current file cpplint is running over. A FileInfo instance. - include: The path to a #included file. - is_system: True if the #include used <> rather than "". - - Returns: - One of the _XXX_HEADER constants. - - For example: - >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) - _C_SYS_HEADER - >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) - _CPP_SYS_HEADER - >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) - _LIKELY_MY_HEADER - >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), - ... 'bar/foo_other_ext.h', False) - _POSSIBLE_MY_HEADER - >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) - _OTHER_HEADER - """ - # This is a list of all standard c++ header files, except - # those already checked for above. - is_cpp_h = include in _CPP_HEADERS - - if is_system: - if is_cpp_h: - return _CPP_SYS_HEADER - else: - return _C_SYS_HEADER - - # If the target file and the include we're checking share a - # basename when we drop common extensions, and the include - # lives in . , then it's likely to be owned by the target file. - target_dir, target_base = ( - os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) - include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) - if target_base == include_base and ( - include_dir == target_dir or - include_dir == os.path.normpath(target_dir + '/../public')): - return _LIKELY_MY_HEADER - - # If the target and include share some initial basename - # component, it's possible the target is implementing the - # include, so it's allowed to be first, but we'll never - # complain if it's not there. - target_first_component = _RE_FIRST_COMPONENT.match(target_base) - include_first_component = _RE_FIRST_COMPONENT.match(include_base) - if (target_first_component and include_first_component and - target_first_component.group(0) == - include_first_component.group(0)): - return _POSSIBLE_MY_HEADER - - return _OTHER_HEADER + Returns: + True if a header was successfully added. False otherwise. + """ + headerfile = None + try: + headerfile = io.open(filename, "r", "utf8", "replace") + except IOError: + return False + linenum = 0 + for line in headerfile: + linenum += 1 + clean_line = CleanseComments(line) + match = _RE_PATTERN_INCLUDE.search(clean_line) + if match: + include = match.group(2) + include_dict.setdefault(include, linenum) + return True +def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs): + """Reports for missing stl includes. -def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): - """Check rules that are applicable to #include lines. - - Strings on #include lines are NOT removed from elided line, to make - certain tasks easier. However, to prevent false positives, checks - applicable to #include lines in CheckLanguage must be put here. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - include_state: An _IncludeState instance in which the headers are inserted. - error: The function to call with any errors found. - """ - fileinfo = FileInfo(filename) - line = clean_lines.lines[linenum] - - # "include" should use the new style "foo/bar.h" instead of just "bar.h" - # Only do this check if the included header follows google naming - # conventions. If not, assume that it's a 3rd party API that - # requires special include conventions. - # - # We also make an exception for Lua headers, which follow google - # naming convention but not the include convention. - match = Match(r'#include\s*"([^/]+\.h)"', line) - if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)): - error(filename, linenum, 'build/include', 4, - 'Include the directory when naming .h files') - - # we shouldn't include a file more than once. actually, there are a - # handful of instances where doing so is okay, but in general it's - # not. - match = _RE_PATTERN_INCLUDE.search(line) - if match: - include = match.group(2) - is_system = (match.group(1) == '<') - duplicate_line = include_state.FindHeader(include) - if duplicate_line >= 0: - error(filename, linenum, 'build/include', 4, - '"%s" already included at %s:%s' % - (include, filename, duplicate_line)) - elif not _THIRD_PARTY_HEADERS_PATTERN.match(include): - include_state.include_list[-1].append((include, linenum)) - - # We want to ensure that headers appear in the right order: - # 1) for foo.cc, foo.h (preferred location) - # 2) c system files - # 3) cpp system files - # 4) for foo.cc, foo.h (deprecated location) - # 5) other google headers - # - # We classify each include statement as one of those 5 types - # using a number of techniques. The include_state object keeps - # track of the highest type seen, and complains if we see a - # lower type after that. - error_message = include_state.CheckNextIncludeOrder( - _ClassifyInclude(fileinfo, include, is_system)) - if error_message: - error(filename, linenum, 'build/include_order', 4, - '%s. Should be: %s.h, c system, c++ system, other.' % - (error_message, fileinfo.BaseName())) - canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) - if not include_state.IsInAlphabeticalOrder( - clean_lines, linenum, canonical_include): - error(filename, linenum, 'build/include_alpha', 4, - 'Include "%s" not in alphabetical order' % include) - include_state.SetLastHeader(canonical_include) - - # Look for any of the stream classes that are part of standard C++. - match = _RE_PATTERN_INCLUDE.match(line) - if match: - include = match.group(2) - if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include): - # Many unit tests use cout, so we exempt them. - if not _IsTestFilename(filename): - # Suggest a different header for ostream - if include == 'ostream': - error(filename, linenum, 'readability/streams', 3, - 'For logging, include "base/logging.h" instead of .') - else: - error(filename, linenum, 'readability/streams', 3, - 'Streams are highly discouraged.') + This function will output warnings to make sure you are including the headers + necessary for the stl containers and functions that you use. We only give one + reason to include a header. For example, if you use both equal_to<> and + less<> in a .h file, only one (the latter in the file) of these will be + reported as a reason to include the . + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + include_state: An _IncludeState instance. + error: The function to call with any errors found. + io: The IO factory to use to read the header file. Provided for unittest + injection. + """ + required = {} # A map of header name to linenumber and the template entity. + # Example of required: { '': (1219, 'less<>') } -def _GetTextInside(text, start_pattern): - r"""Retrieves all the text between matching open and close parentheses. - - Given a string of lines and a regular expression string, retrieve all the text - following the expression and between opening punctuation symbols like - (, [, or {, and the matching close-punctuation symbol. This properly nested - occurrences of the punctuations, so for the text like - printf(a(), b(c())); - a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. - start_pattern must match string having an open punctuation symbol at the end. - - Args: - text: The lines to extract text. Its comments and strings must be elided. - It can be single line and can span multiple lines. - start_pattern: The regexp string indicating where to start extracting - the text. - Returns: - The extracted text. - None if either the opening string or ending punctuation could not be found. - """ - # TODO(unknown): Audit cpplint.py to see what places could be profitably - # rewritten to use _GetTextInside (and use inferior regexp matching today). - - # Give opening punctuations to get the matching close-punctuations. - matching_punctuation = {'(': ')', '{': '}', '[': ']'} - closing_punctuation = set(matching_punctuation.values()) - - # Find the position to start extracting text. - match = re.search(start_pattern, text, re.M) - if not match: # start_pattern not found in text. - return None - start_position = match.end(0) - - assert start_position > 0, ( - 'start_pattern must ends with an opening punctuation.') - assert text[start_position - 1] in matching_punctuation, ( - 'start_pattern must ends with an opening punctuation.') - # Stack of closing punctuations we expect to have in text after position. - punctuation_stack = [matching_punctuation[text[start_position - 1]]] - position = start_position - while punctuation_stack and position < len(text): - if text[position] == punctuation_stack[-1]: - punctuation_stack.pop() - elif text[position] in closing_punctuation: - # A closing punctuation without matching opening punctuations. - return None - elif text[position] in matching_punctuation: - punctuation_stack.append(matching_punctuation[text[position]]) - position += 1 - if punctuation_stack: - # Opening punctuations left without matching close-punctuations. - return None - # punctuations match. - return text[start_position:position - 1] + for linenum in range(clean_lines.NumLines()): + line = clean_lines.elided[linenum] + if not line or line[0] == "#": + continue + # String is special -- it is a non-templatized type in STL. + matched = _RE_PATTERN_STRING.search(line) + if matched: + # Don't warn about strings in non-STL namespaces: + # (We check only the first match per line; good enough.) + prefix = line[: matched.start()] + if prefix.endswith("std::") or not prefix.endswith("::"): + required[""] = (linenum, "string") + + for pattern, template, header in _re_pattern_algorithm_header: + if pattern.search(line): + required[header] = (linenum, template) + + # The following function is just a speed up, no semantics are changed. + if not "<" in line: # Reduces the cpu time usage by skipping lines. + continue -# Patterns for matching call-by-reference parameters. -# -# Supports nested templates up to 2 levels deep using this messy pattern: -# < (?: < (?: < [^<>]* -# > -# | [^<>] )* -# > -# | [^<>] )* -# > -_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]* -_RE_PATTERN_TYPE = ( - r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?' - r'(?:\w|' - r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|' - r'::)+') -# A call-by-reference parameter ends with '& identifier'. -_RE_PATTERN_REF_PARAM = re.compile( - r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*' - r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]') -# A call-by-const-reference parameter either ends with 'const& identifier' -# or looks like 'const type& identifier' when 'type' is atomic. -_RE_PATTERN_CONST_REF_PARAM = ( - r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + - r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')') - - -def CheckLanguage(filename, clean_lines, linenum, file_extension, - include_state, nesting_state, error): - """Checks rules from the 'C++ language rules' section of cppguide.html. - - Some of these rules are hard to test (function overloading, using - uint32 inappropriately), but we do the best we can. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - file_extension: The extension (without the dot) of the filename. - include_state: An _IncludeState instance in which the headers are inserted. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - # If the line is empty or consists of entirely a comment, no need to - # check it. - line = clean_lines.elided[linenum] - if not line: - return - - match = _RE_PATTERN_INCLUDE.search(line) - if match: - CheckIncludeLine(filename, clean_lines, linenum, include_state, error) - return - - # Reset include state across preprocessor directives. This is meant - # to silence warnings for conditional includes. - match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line) - if match: - include_state.ResetSection(match.group(1)) - - # Make Windows paths like Unix. - fullname = os.path.abspath(filename).replace('\\', '/') - - # Perform other checks now that we are sure that this is not an include line - CheckCasts(filename, clean_lines, linenum, error) - CheckGlobalStatic(filename, clean_lines, linenum, error) - CheckPrintf(filename, clean_lines, linenum, error) - - if file_extension == 'h': - # TODO(unknown): check that 1-arg constructors are explicit. - # How to tell it's a constructor? - # (handled in CheckForNonStandardConstructs for now) - # TODO(unknown): check that classes declare or disable copy/assign - # (level 1 error) - pass + for pattern, template, header in _re_pattern_templates: + if pattern.search(line): + required[header] = (linenum, template) - # Check if people are using the verboten C basic types. The only exception - # we regularly allow is "unsigned short port" for port. - if Search(r'\bshort port\b', line): - if not Search(r'\bunsigned short port\b', line): - error(filename, linenum, 'runtime/int', 4, - 'Use "unsigned short" for ports, not "short"') - else: - match = Search(r'\b(short|long(?! +double)|long long)\b', line) - if match: - error(filename, linenum, 'runtime/int', 4, - 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) - - # Check if some verboten operator overloading is going on - # TODO(unknown): catch out-of-line unary operator&: - # class X {}; - # int operator&(const X& x) { return 42; } // unary operator& - # The trick is it's hard to tell apart from binary operator&: - # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& - if Search(r'\boperator\s*&\s*\(\s*\)', line): - error(filename, linenum, 'runtime/operator', 4, - 'Unary operator& is dangerous. Do not use it.') - - # Check for suspicious usage of "if" like - # } if (a == b) { - if Search(r'\}\s*if\s*\(', line): - error(filename, linenum, 'readability/braces', 4, - 'Did you mean "else if"? If not, start a new line for "if".') - - # Check for potential format string bugs like printf(foo). - # We constrain the pattern not to pick things like DocidForPrintf(foo). - # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) - # TODO(unknown): Catch the following case. Need to change the calling - # convention of the whole function to process multiple line to handle it. - # printf( - # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); - printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') - if printf_args: - match = Match(r'([\w.\->()]+)$', printf_args) - if match and match.group(1) != '__VA_ARGS__': - function_name = re.search(r'\b((?:string)?printf)\s*\(', - line, re.I).group(1) - error(filename, linenum, 'runtime/printf', 4, - 'Potential format string bug. Do %s("%%s", %s) instead.' - % (function_name, match.group(1))) - - # Check for potential memset bugs like memset(buf, sizeof(buf), 0). - match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) - if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): - error(filename, linenum, 'runtime/memset', 4, - 'Did you mean "memset(%s, 0, %s)"?' - % (match.group(1), match.group(2))) - - if Search(r'\busing namespace\b', line): - error(filename, linenum, 'build/namespaces', 5, - 'Do not use namespace using-directives. ' - 'Use using-declarations instead.') - - # Detect variable-length arrays. - match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) - if (match and match.group(2) != 'return' and match.group(2) != 'delete' and - match.group(3).find(']') == -1): - # Split the size using space and arithmetic operators as delimiters. - # If any of the resulting tokens are not compile time constants then - # report the error. - tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) - is_const = True - skip_next = False - for tok in tokens: - if skip_next: - skip_next = False - continue - - if Search(r'sizeof\(.+\)', tok): continue - if Search(r'arraysize\(\w+\)', tok): continue - - tok = tok.lstrip('(') - tok = tok.rstrip(')') - if not tok: continue - if Match(r'\d+', tok): continue - if Match(r'0[xX][0-9a-fA-F]+', tok): continue - if Match(r'k[A-Z0-9]\w*', tok): continue - if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue - if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue - # A catch all for tricky sizeof cases, including 'sizeof expression', - # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' - # requires skipping the next token because we split on ' ' and '*'. - if tok.startswith('sizeof'): - skip_next = True - continue - is_const = False - break - if not is_const: - error(filename, linenum, 'runtime/arrays', 1, - 'Do not use variable-length arrays. Use an appropriately named ' - "('k' followed by CamelCase) compile-time constant for the size.") - - # If DISALLOW_COPY_AND_ASSIGN DISALLOW_IMPLICIT_CONSTRUCTORS is present, - # then it should be the last thing in the class declaration. - match = Match( - (r'\s*' - r'(DISALLOW_(COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))' - r'\(.*\);$'), - line) - if match and linenum + 1 < clean_lines.NumLines(): - next_line = clean_lines.elided[linenum + 1] - # We allow some, but not all, declarations of variables to be present - # in the statement that defines the class. The [\w\*,\s]* fragment of - # the regular expression below allows users to declare instances of - # the class or pointers to instances, but not less common types such - # as function pointers or arrays. It's a tradeoff between allowing - # reasonable code and avoiding trying to parse more C++ using regexps. - if not Search(r'^\s*}[\w\*,\s]*;', next_line): - error(filename, linenum, 'readability/constructors', 3, - match.group(1) + ' should be the last thing in the class') - - # Check for use of unnamed namespaces in header files. Registration - # macros are typically OK, so we allow use of "namespace {" on lines - # that end with backslashes. - if (file_extension == 'h' - and Search(r'\bnamespace\s*{', line) - and line[-1] != '\\'): - error(filename, linenum, 'build/namespaces', 4, - 'Do not use unnamed namespaces in header files. See ' - 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' - ' for more information.') + # The policy is that if you #include something in foo.h you don't need to + # include it again in foo.cc. Here, we will look at possible includes. + # Let's flatten the include_state include_list and copy it into a dictionary. + include_dict = dict( + [item for sublist in include_state.include_list for item in sublist] + ) + # Did we find the header for this file (if any) and successfully load it? + header_found = False + + # Use the absolute path so that matching works properly. + abs_filename = FileInfo(filename).FullName() + + # For Emacs's flymake. + # If cpplint is invoked from Emacs's flymake, a temporary file is generated + # by flymake and that file name might end with '_flymake.cc'. In that case, + # restore original file name here so that the corresponding header file can be + # found. + # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' + # instead of 'foo_flymake.h' + abs_filename = re.sub(r"_flymake\.cc$", ".cc", abs_filename) + + # include_dict is modified during iteration, so we iterate over a copy of + # the keys. + header_keys = list(include_dict.keys()) + for header in header_keys: + (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) + fullpath = common_path + header + if same_module and UpdateIncludeState(fullpath, include_dict, io): + header_found = True + + # If we can't find the header file for a .cc, assume it's because we don't + # know where to look. In that case we'll give up as we're not sure they + # didn't include it in the .h file. + # TODO(unknown): Do a better job of finding .h files so we are confident that + # not having the .h file means there isn't one. + if filename.endswith(".cc") and not header_found: + return -def CheckGlobalStatic(filename, clean_lines, linenum, error): - """Check for unsafe global or static objects. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Match two lines at a time to support multiline declarations - if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): - line += clean_lines.elided[linenum + 1].strip() - - # Check for people declaring static/global STL strings at the top level. - # This is dangerous because the C++ language does not guarantee that - # globals with constructors are initialized before the first access. - match = Match( - r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', - line) - - # Remove false positives: - # - String pointers (as opposed to values). - # string *pointer - # const string *pointer - # string const *pointer - # string *const pointer - # - # - Functions and template specializations. - # string Function(... - # string Class::Method(... - # - # - Operators. These are matched separately because operator names - # cross non-word boundaries, and trying to match both operators - # and functions at the same time would decrease accuracy of - # matching identifiers. - # string Class::operator*() - if (match and - not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and - not Search(r'\boperator\W', line) and - not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))): - error(filename, linenum, 'runtime/string', 4, - 'For a static/global string constant, use a C style string instead: ' - '"%schar %s[]".' % - (match.group(1), match.group(2))) - - if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): - error(filename, linenum, 'runtime/init', 4, - 'You seem to be initializing a member variable with itself.') + # All the lines have been processed, report the errors found. + for required_header_unstripped in required: + template = required[required_header_unstripped][1] + if required_header_unstripped.strip('<>"') not in include_dict: + error( + filename, + required[required_header_unstripped][0], + "build/include_what_you_use", + 4, + "Add #include " + required_header_unstripped + " for " + template, + ) -def CheckPrintf(filename, clean_lines, linenum, error): - """Check for printf related issues. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # When snprintf is used, the second argument shouldn't be a literal. - match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) - if match and match.group(2) != '0': - # If 2nd arg is zero, snprintf is used to calculate size. - error(filename, linenum, 'runtime/printf', 3, - 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' - 'to snprintf.' % (match.group(1), match.group(2))) - - # Check if some verboten C functions are being used. - if Search(r'\bsprintf\s*\(', line): - error(filename, linenum, 'runtime/printf', 5, - 'Never use sprintf. Use snprintf instead.') - match = Search(r'\b(strcpy|strcat)\s*\(', line) - if match: - error(filename, linenum, 'runtime/printf', 4, - 'Almost always, snprintf is better than %s' % match.group(1)) +_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r"\bmake_pair\s*<") -def IsDerivedFunction(clean_lines, linenum): - """Check if current line contains an inherited function. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - Returns: - True if current line contains a function with "override" - virt-specifier. - """ - # Scan back a few lines for start of current function - for i in range(linenum, max(-1, linenum - 10), -1): - match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i]) - if match: - # Look for "override" after the matching closing parenthesis - line, _, closing_paren = CloseExpression( - clean_lines, i, len(match.group(1))) - return (closing_paren >= 0 and - Search(r'\boverride\b', line[closing_paren:])) - return False +def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): + """Check that make_pair's template arguments are deduced. + G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are + specified explicitly, and such use isn't intended in any case. -def IsInitializerList(clean_lines, linenum): - """Check if current line is inside constructor initializer list. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - Returns: - True if current line appears to be inside constructor initializer - list, False otherwise. - """ - for i in range(linenum, 1, -1): - line = clean_lines.elided[i] - if i == linenum: - remove_function_body = Match(r'^(.*)\{\s*$', line) - if remove_function_body: - line = remove_function_body.group(1) - - if Search(r'\s:\s*\w+[({]', line): - # A lone colon tend to indicate the start of a constructor - # initializer list. It could also be a ternary operator, which - # also tend to appear in constructor initializer lists as - # opposed to parameter lists. - return True - if Search(r'\}\s*,\s*$', line): - # A closing brace followed by a comma is probably the end of a - # brace-initialized member in constructor initializer list. - return True - if Search(r'[{};]\s*$', line): - # Found one of the following: - # - A closing brace or semicolon, probably the end of the previous - # function. - # - An opening brace, probably the start of current class or namespace. - # - # Current line is probably not inside an initializer list since - # we saw one of those things without seeing the starting colon. - return False - - # Got to the beginning of the file without seeing the start of - # constructor initializer list. - return False - - -def CheckForNonConstReference(filename, clean_lines, linenum, - nesting_state, error): - """Check for non-const references. - - Separate from CheckLanguage since it scans backwards from current - line, instead of scanning forward. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: The function to call with any errors found. - """ - # Do nothing if there is no '&' on current line. - line = clean_lines.elided[linenum] - if '&' not in line: - return - - # If a function is inherited, current function doesn't have much of - # a choice, so any non-const references should not be blamed on - # derived function. - if IsDerivedFunction(clean_lines, linenum): - return - - # Long type names may be broken across multiple lines, usually in one - # of these forms: - # LongType - # ::LongTypeContinued &identifier - # LongType:: - # LongTypeContinued &identifier - # LongType< - # ...>::LongTypeContinued &identifier - # - # If we detected a type split across two lines, join the previous - # line to current line so that we can match const references - # accordingly. - # - # Note that this only scans back one line, since scanning back - # arbitrary number of lines would be expensive. If you have a type - # that spans more than 2 lines, please use a typedef. - if linenum > 1: - previous = None - if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): - # previous_line\n + ::current_line - previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', - clean_lines.elided[linenum - 1]) - elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): - # previous_line::\n + current_line - previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', - clean_lines.elided[linenum - 1]) - if previous: - line = previous.group(1) + line.lstrip() - else: - # Check for templated parameter that is split across multiple lines - endpos = line.rfind('>') - if endpos > -1: - (_, startline, startpos) = ReverseCloseExpression( - clean_lines, linenum, endpos) - if startpos > -1 and startline < linenum: - # Found the matching < on an earlier line, collect all - # pieces up to current line. - line = '' - for i in range(startline, linenum + 1): - line += clean_lines.elided[i].strip() - - # Check for non-const references in function parameters. A single '&' may - # found in the following places: - # inside expression: binary & for bitwise AND - # inside expression: unary & for taking the address of something - # inside declarators: reference parameter - # We will exclude the first two cases by checking that we are not inside a - # function body, including one that was just introduced by a trailing '{'. - # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. - if (nesting_state.previous_stack_top and - not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or - isinstance(nesting_state.previous_stack_top, _NamespaceInfo))): - # Not at toplevel, not within a class, and not within a namespace - return - - # Avoid initializer lists. We only need to scan back from the - # current line for something that starts with ':'. - # - # We don't need to check the current line, since the '&' would - # appear inside the second set of parentheses on the current line as - # opposed to the first set. - if linenum > 0: - for i in range(linenum - 1, max(0, linenum - 10), -1): - previous_line = clean_lines.elided[i] - if not Search(r'[),]\s*$', previous_line): - break - if Match(r'^\s*:\s+\S', previous_line): - return + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) + if match: + error( + filename, + linenum, + "build/explicit_make_pair", + 4, # 4 = high confidence + "For C++11-compatibility, omit template arguments from make_pair" + " OR use pair directly OR if appropriate, construct a pair directly", + ) - # Avoid preprocessors - if Search(r'\\\s*$', line): - return - - # Avoid constructor initializer lists - if IsInitializerList(clean_lines, linenum): - return - - # We allow non-const references in a few standard places, like functions - # called "swap()" or iostream operators like "<<" or ">>". Do not check - # those function parameters. - # - # We also accept & in static_assert, which looks like a function but - # it's actually a declaration expression. - whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' - r'operator\s*[<>][<>]|' - r'static_assert|COMPILE_ASSERT' - r')\s*\(') - if Search(whitelisted_functions, line): - return - elif not Search(r'\S+\([^)]*$', line): - # Don't see a whitelisted function on this line. Actually we - # didn't see any function name on this line, so this is likely a - # multi-line parameter list. Try a bit harder to catch this case. - for i in range(2): - if (linenum > i and - Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): - return - decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body - for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): - if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter): - error(filename, linenum, 'runtime/references', 2, - 'Is this a non-const reference? ' - 'If so, make const or use a pointer: ' + - ReplaceAll(' *<', '<', parameter)) +def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error): + """Check that default lambda captures are not used. + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] -def CheckCasts(filename, clean_lines, linenum, error): - """Various cast related checks. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Check to see if they're using an conversion function cast. - # I just try to capture the most common basic types, though there are more. - # Parameterless conversion functions, such as bool(), are allowed as they are - # probably a member operator declaration or default constructor. - match = Search( - r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b' - r'(int|float|double|bool|char|int32|uint32|int64|uint64)' - r'(\([^)].*)', line) - expecting_function = ExpectingFunctionArgs(clean_lines, linenum) - if match and not expecting_function: - matched_type = match.group(2) - - # matched_new_or_template is used to silence two false positives: - # - New operators - # - Template arguments with function types - # - # For template arguments, we match on types immediately following - # an opening bracket without any spaces. This is a fast way to - # silence the common case where the function type is the first - # template argument. False negative with less-than comparison is - # avoided because those operators are usually followed by a space. - # - # function // bracket + no space = false positive - # value < double(42) // bracket + space = true positive - matched_new_or_template = match.group(1) - - # Avoid arrays by looking for brackets that come after the closing - # parenthesis. - if Match(r'\([^()]+\)\s*\[', match.group(3)): - return - - # Other things to ignore: - # - Function pointers - # - Casts to pointer types - # - Placement new - # - Alias declarations - matched_funcptr = match.group(3) - if (matched_new_or_template is None and - not (matched_funcptr and - (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', - matched_funcptr) or - matched_funcptr.startswith('(*)'))) and - not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and - not Search(r'new\(\S+\)\s*' + matched_type, line)): - error(filename, linenum, 'readability/casting', 4, - 'Using deprecated casting style. ' - 'Use static_cast<%s>(...) instead' % - matched_type) - - if not expecting_function: - CheckCStyleCast(filename, clean_lines, linenum, 'static_cast', - r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) - - # This doesn't catch all cases. Consider (const char * const)"hello". - # - # (char *) "foo" should always be a const_cast (reinterpret_cast won't - # compile). - if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast', - r'\((char\s?\*+\s?)\)\s*"', error): - pass - else: - # Check pointer casts for other than string constants - CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast', - r'\((\w+\s?\*+\s?)\)', error) - - # In addition, we look for people taking the address of a cast. This - # is dangerous -- casts can assign to temporaries, so the pointer doesn't - # point where you think. - # - # Some non-identifier character is required before the '&' for the - # expression to be recognized as a cast. These are casts: - # expression = &static_cast(temporary()); - # function(&(int*)(temporary())); - # - # This is not a cast: - # reference_type&(int* function_param); - match = Search( - r'(?:[^\w]&\(([^)]+)\)[\w(])|' - r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line) - if match and match.group(1) != '*': - # Try a better error message when the & is bound to something - # dereferenced by the casted pointer, as opposed to the casted - # pointer itself. - parenthesis_error = False - match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line) + # A lambda introducer specifies a default capture if it starts with "[=" + # or if it starts with "[&" _not_ followed by an identifier. + match = Match(r"^(.*)\[\s*(?:=|&[^\w])", line) if match: - _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1))) - if x1 >= 0 and clean_lines.elided[y1][x1] == '(': - _, y2, x2 = CloseExpression(clean_lines, y1, x1) - if x2 >= 0: - extended_line = clean_lines.elided[y2][x2:] - if y2 < clean_lines.NumLines() - 1: - extended_line += clean_lines.elided[y2 + 1] - if Match(r'\s*(?:->|\[)', extended_line): - parenthesis_error = True - - if parenthesis_error: - error(filename, linenum, 'readability/casting', 4, - ('Are you taking an address of something dereferenced ' - 'from a cast? Wrapping the dereferenced expression in ' - 'parentheses will make the binding more obvious')) - else: - error(filename, linenum, 'runtime/casting', 4, - ('Are you taking an address of a cast? ' - 'This is dangerous: could be a temp var. ' - 'Take the address before doing the cast, rather than after')) + # Found a potential error, check what comes after the lambda-introducer. + # If it's not open parenthesis (for lambda-declarator) or open brace + # (for compound-statement), it's not a lambda. + line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1))) + if pos >= 0 and Match(r"^\s*[{(]", line[pos:]): + error( + filename, + linenum, + "build/c++11", + 4, # 4 = high confidence + "Default lambda captures are an unapproved C++ feature.", + ) -def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): - """Checks for a C-style cast by looking for the pattern. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - cast_type: The string for the C++ cast to recommend. This is either - reinterpret_cast, static_cast, or const_cast, depending. - pattern: The regular expression used to find C-style casts. - error: The function to call with any errors found. - - Returns: - True if an error was emitted. - False otherwise. - """ - line = clean_lines.elided[linenum] - match = Search(pattern, line) - if not match: - return False - - # Exclude lines with keywords that tend to look like casts - context = line[0:match.start(1) - 1] - if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): - return False - - # Try expanding current context to see if we one level of - # parentheses inside a macro. - if linenum > 0: - for i in range(linenum - 1, max(0, linenum - 5), -1): - context = clean_lines.elided[i] + context - if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): - return False +def CheckRedundantVirtual(filename, clean_lines, linenum, error): + """Check if line contains a redundant "virtual" function-specifier. - # operator++(int) and operator--(int) - if context.endswith(' operator++') or context.endswith(' operator--'): - return False + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + # Look for "virtual" on current line. + line = clean_lines.elided[linenum] + virtual = Match(r"^(.*\bvirtual\b)", line) + if not virtual: + return - # A single unnamed argument for a function tends to look like old - # style cast. If we see those, don't issue warnings for deprecated - # casts, instead issue warnings for unnamed arguments where - # appropriate. - # - # These are things that we want warnings for, since the style guide - # explicitly require all parameters to be named: - # Function(int); - # Function(int) { - # ConstMember(int) const; - # ConstMember(int) const { - # ExceptionMember(int) throw (...); - # ExceptionMember(int) throw (...) { - # PureVirtual(int) = 0; - # - # These are functions of some sort, where the compiler would be fine - # if they had named parameters, but people often omit those - # identifiers to reduce clutter: - # (FunctionPointer)(int); - # (FunctionPointer)(int) = value; - # Function((function_pointer_arg)(int)) - # Function((function_pointer_arg)(int), int param) - # ; - # <(FunctionPointerTemplateArgument)(int)>; - remainder = line[match.end(0):] - if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),])', - remainder): - # Looks like an unnamed parameter. - - # Don't warn on any kind of template arguments. - if Match(r'^\s*>', remainder): - return False - - # Don't warn on assignments to function pointers, but keep warnings for - # unnamed parameters to pure virtual functions. Note that this pattern - # will also pass on assignments of "0" to function pointers, but the - # preferred values for those would be "nullptr" or "NULL". - matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) - if matched_zero and matched_zero.group(1) != '0': - return False - - # Don't warn on function pointer declarations. For this we need - # to check what came before the "(type)" string. - if Match(r'.*\)\s*$', line[0:match.start(0)]): - return False - - # Don't warn if the parameter is named with block comments, e.g.: - # Function(int /*unused_param*/); - raw_line = clean_lines.raw_lines[linenum] - if '/*' in raw_line: - return False - - # Passed all filters, issue warning here. - error(filename, linenum, 'readability/function', 3, - 'All parameters should be named in a function') - return True + # Look for the next opening parenthesis. This is the start of the + # parameter list (possibly on the next line shortly after virtual). + # TODO(unknown): doesn't work if there are virtual functions with + # decltype() or other things that use parentheses, but csearch suggests + # that this is rare. + end_col = -1 + end_line = -1 + start_col = len(virtual.group(1)) + for start_line in range(linenum, min(linenum + 3, clean_lines.NumLines())): + line = clean_lines.elided[start_line][start_col:] + parameter_list = Match(r"^([^(]*)\(", line) + if parameter_list: + # Match parentheses to find the end of the parameter list + (_, end_line, end_col) = CloseExpression( + clean_lines, start_line, start_col + len(parameter_list.group(1)) + ) + break + start_col = 0 + + if end_col < 0: + return # Couldn't find end of parameter list, give up + + # Look for "override" or "final" after the parameter list + # (possibly on the next few lines). + for i in range(end_line, min(end_line + 3, clean_lines.NumLines())): + line = clean_lines.elided[i][end_col:] + match = Search(r"\b(override|final)\b", line) + if match: + error( + filename, + linenum, + "readability/inheritance", + 4, + ( + '"virtual" is redundant since function is ' + 'already declared as "%s"' % match.group(1) + ), + ) + + # Set end_col to check whole lines after we are done with the + # first line. + end_col = 0 + if Search(r"[^\w]\s*$", line): + break - # At this point, all that should be left is actual casts. - error(filename, linenum, 'readability/casting', 4, - 'Using C-style cast. Use %s<%s>(...) instead' % - (cast_type, match.group(1))) - return True +def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error): + """Check if line contains a redundant "override" or "final" virt-specifier. + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + # Check that at most one of "override" or "final" is present, not both + line = clean_lines.elided[linenum] + if Search(r"\boverride\b", line) and Search(r"\bfinal\b", line): + error( + filename, + linenum, + "readability/inheritance", + 4, + ( + '"override" is redundant since function is ' + 'already declared as "final"' + ), + ) -def ExpectingFunctionArgs(clean_lines, linenum): - """Checks whether where function type arguments are expected. - - Args: - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - - Returns: - True if the line at 'linenum' is inside something that expects arguments - of function types. - """ - line = clean_lines.elided[linenum] - return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or - (linenum >= 2 and - (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', - clean_lines.elided[linenum - 1]) or - Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', - clean_lines.elided[linenum - 2]) or - Search(r'\bstd::m?function\s*\<\s*$', - clean_lines.elided[linenum - 1])))) +# Returns true if we are at a new block, and it is directly +# inside of a namespace. +def IsBlockInNameSpace(nesting_state, is_forward_declaration): + """Checks that the new block is directly in a namespace. -_HEADERS_CONTAINING_TEMPLATES = ( - ('', ('deque',)), - ('', ('unary_function', 'binary_function', - 'plus', 'minus', 'multiplies', 'divides', 'modulus', - 'negate', - 'equal_to', 'not_equal_to', 'greater', 'less', - 'greater_equal', 'less_equal', - 'logical_and', 'logical_or', 'logical_not', - 'unary_negate', 'not1', 'binary_negate', 'not2', - 'bind1st', 'bind2nd', - 'pointer_to_unary_function', - 'pointer_to_binary_function', - 'ptr_fun', - 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', - 'mem_fun_ref_t', - 'const_mem_fun_t', 'const_mem_fun1_t', - 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', - 'mem_fun_ref', - )), - ('', ('numeric_limits',)), - ('', ('list',)), - ('', ('map', 'multimap',)), - ('', ('allocator',)), - ('', ('queue', 'priority_queue',)), - ('', ('set', 'multiset',)), - ('', ('stack',)), - ('', ('char_traits', 'basic_string',)), - ('', ('pair',)), - ('', ('vector',)), + Args: + nesting_state: The _NestingState object that contains info about our state. + is_forward_declaration: If the class is a forward declared class. + Returns: + Whether or not the new block is directly in a namespace. + """ + if is_forward_declaration: + if len(nesting_state.stack) >= 1 and ( + isinstance(nesting_state.stack[-1], _NamespaceInfo) + ): + return True + else: + return False - # gcc extensions. - # Note: std::hash is their hash, ::hash is our hash - ('', ('hash_map', 'hash_multimap',)), - ('', ('hash_set', 'hash_multiset',)), - ('', ('slist',)), + return ( + len(nesting_state.stack) > 1 + and nesting_state.stack[-1].check_namespace_indentation + and isinstance(nesting_state.stack[-2], _NamespaceInfo) ) -_RE_PATTERN_STRING = re.compile(r'\bstring\b') -_re_pattern_algorithm_header = [] -for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap', - 'transform'): - # Match max(..., ...), max(..., ...), but not foo->max, foo.max or - # type::max(). - _re_pattern_algorithm_header.append( - (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), - _template, - '')) +def ShouldCheckNamespaceIndentation( + nesting_state, is_namespace_indent_item, raw_lines_no_comments, linenum +): + """This method determines if we should apply our namespace indentation check. -_re_pattern_templates = [] -for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: - for _template in _templates: - _re_pattern_templates.append( - (re.compile(r'(\<|\b)' + _template + r'\s*\<'), - _template + '<>', - _header)) + Args: + nesting_state: The current nesting state. + is_namespace_indent_item: If we just put a new class on the stack, True. + If the top of the stack is not a class, or we did not recently + add the class, False. + raw_lines_no_comments: The lines without the comments. + linenum: The current line number we are processing. + Returns: + True if we should apply our namespace indentation check. Currently, it + only works for classes and namespaces inside of a namespace. + """ -def FilesBelongToSameModule(filename_cc, filename_h): - """Check if these two filenames belong to the same module. - - The concept of a 'module' here is a as follows: - foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the - same 'module' if they are in the same directory. - some/path/public/xyzzy and some/path/internal/xyzzy are also considered - to belong to the same module here. - - If the filename_cc contains a longer path than the filename_h, for example, - '/absolute/path/to/base/sysinfo.cc', and this file would include - 'base/sysinfo.h', this function also produces the prefix needed to open the - header. This is used by the caller of this function to more robustly open the - header file. We don't have access to the real include paths in this context, - so we need this guesswork here. - - Known bugs: tools/base/bar.cc and base/bar.h belong to the same module - according to this implementation. Because of this, this function gives - some false positives. This should be sufficiently rare in practice. - - Args: - filename_cc: is the path for the .cc file - filename_h: is the path for the header path - - Returns: - Tuple with a bool and a string: - bool: True if filename_cc and filename_h belong to the same module. - string: the additional prefix needed to open the header file. - """ - - if not filename_cc.endswith('.cc'): - return (False, '') - filename_cc = filename_cc[:-len('.cc')] - if filename_cc.endswith('_unittest'): - filename_cc = filename_cc[:-len('_unittest')] - elif filename_cc.endswith('_test'): - filename_cc = filename_cc[:-len('_test')] - filename_cc = filename_cc.replace('/public/', '/') - filename_cc = filename_cc.replace('/internal/', '/') - - if not filename_h.endswith('.h'): - return (False, '') - filename_h = filename_h[:-len('.h')] - if filename_h.endswith('-inl'): - filename_h = filename_h[:-len('-inl')] - filename_h = filename_h.replace('/public/', '/') - filename_h = filename_h.replace('/internal/', '/') - - files_belong_to_same_module = filename_cc.endswith(filename_h) - common_path = '' - if files_belong_to_same_module: - common_path = filename_cc[:-len(filename_h)] - return files_belong_to_same_module, common_path + is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments, linenum) + if not (is_namespace_indent_item or is_forward_declaration): + return False -def UpdateIncludeState(filename, include_dict, io=codecs): - """Fill up the include_dict with new includes found from the file. - - Args: - filename: the name of the header to read. - include_dict: a dictionary in which the headers are inserted. - io: The io factory to use to read the file. Provided for testability. - - Returns: - True if a header was successfully added. False otherwise. - """ - headerfile = None - try: - headerfile = io.open(filename, 'r', 'utf8', 'replace') - except IOError: - return False - linenum = 0 - for line in headerfile: - linenum += 1 - clean_line = CleanseComments(line) - match = _RE_PATTERN_INCLUDE.search(clean_line) - if match: - include = match.group(2) - include_dict.setdefault(include, linenum) - return True - - -def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, - io=codecs): - """Reports for missing stl includes. - - This function will output warnings to make sure you are including the headers - necessary for the stl containers and functions that you use. We only give one - reason to include a header. For example, if you use both equal_to<> and - less<> in a .h file, only one (the latter in the file) of these will be - reported as a reason to include the . - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - include_state: An _IncludeState instance. - error: The function to call with any errors found. - io: The IO factory to use to read the header file. Provided for unittest - injection. - """ - required = {} # A map of header name to linenumber and the template entity. - # Example of required: { '': (1219, 'less<>') } - - for linenum in range(clean_lines.NumLines()): - line = clean_lines.elided[linenum] - if not line or line[0] == '#': - continue + # If we are in a macro, we do not want to check the namespace indentation. + if IsMacroDefinition(raw_lines_no_comments, linenum): + return False - # String is special -- it is a non-templatized type in STL. - matched = _RE_PATTERN_STRING.search(line) - if matched: - # Don't warn about strings in non-STL namespaces: - # (We check only the first match per line; good enough.) - prefix = line[:matched.start()] - if prefix.endswith('std::') or not prefix.endswith('::'): - required[''] = (linenum, 'string') - - for pattern, template, header in _re_pattern_algorithm_header: - if pattern.search(line): - required[header] = (linenum, template) - - # The following function is just a speed up, no semantics are changed. - if not '<' in line: # Reduces the cpu time usage by skipping lines. - continue - - for pattern, template, header in _re_pattern_templates: - if pattern.search(line): - required[header] = (linenum, template) - - # The policy is that if you #include something in foo.h you don't need to - # include it again in foo.cc. Here, we will look at possible includes. - # Let's flatten the include_state include_list and copy it into a dictionary. - include_dict = dict([item for sublist in include_state.include_list - for item in sublist]) - - # Did we find the header for this file (if any) and successfully load it? - header_found = False - - # Use the absolute path so that matching works properly. - abs_filename = FileInfo(filename).FullName() - - # For Emacs's flymake. - # If cpplint is invoked from Emacs's flymake, a temporary file is generated - # by flymake and that file name might end with '_flymake.cc'. In that case, - # restore original file name here so that the corresponding header file can be - # found. - # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' - # instead of 'foo_flymake.h' - abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) - - # include_dict is modified during iteration, so we iterate over a copy of - # the keys. - header_keys = list(include_dict.keys()) - for header in header_keys: - (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) - fullpath = common_path + header - if same_module and UpdateIncludeState(fullpath, include_dict, io): - header_found = True - - # If we can't find the header file for a .cc, assume it's because we don't - # know where to look. In that case we'll give up as we're not sure they - # didn't include it in the .h file. - # TODO(unknown): Do a better job of finding .h files so we are confident that - # not having the .h file means there isn't one. - if filename.endswith('.cc') and not header_found: - return - - # All the lines have been processed, report the errors found. - for required_header_unstripped in required: - template = required[required_header_unstripped][1] - if required_header_unstripped.strip('<>"') not in include_dict: - error(filename, required[required_header_unstripped][0], - 'build/include_what_you_use', 4, - 'Add #include ' + required_header_unstripped + ' for ' + template) - - -_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<') + return IsBlockInNameSpace(nesting_state, is_forward_declaration) -def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): - """Check that make_pair's template arguments are deduced. - - G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are - specified explicitly, and such use isn't intended in any case. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) - if match: - error(filename, linenum, 'build/explicit_make_pair', - 4, # 4 = high confidence - 'For C++11-compatibility, omit template arguments from make_pair' - ' OR use pair directly OR if appropriate, construct a pair directly') +# Call this method if the line is directly inside of a namespace. +# If the line above is blank (excluding comments) or the start of +# an inner namespace, it cannot be indented. +def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum, error): + line = raw_lines_no_comments[linenum] + if Match(r"^\s+", line): + error( + filename, + linenum, + "runtime/indentation_namespace", + 4, + "Do not indent within a namespace", + ) + + +def ProcessLine( + filename, + file_extension, + clean_lines, + line, + include_state, + function_state, + nesting_state, + error, + extra_check_functions=[], +): + """Processes a single line in the file. + Args: + filename: Filename of the file that is being processed. + file_extension: The extension (dot not included) of the file. + clean_lines: An array of strings, each representing a line of the file, + with comments stripped. + line: Number of line being processed. + include_state: An _IncludeState instance in which the headers are inserted. + function_state: A _FunctionState instance which counts function lines, etc. + nesting_state: A NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: A callable to which errors are reported, which takes 4 arguments: + filename, line number, error level, and message + extra_check_functions: An array of additional check functions that will be + run on each source line. Each function takes 4 + arguments: filename, clean_lines, line, error + """ + raw_lines = clean_lines.raw_lines + ParseNolintSuppressions(filename, raw_lines[line], line, error) + nesting_state.Update(filename, clean_lines, line, error) + CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error) + if nesting_state.InAsmBlock(): + return + CheckForFunctionLengths(filename, clean_lines, line, function_state, error) + CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) + CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) + CheckLanguage( + filename, clean_lines, line, file_extension, include_state, nesting_state, error + ) + CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error) + CheckVlogArguments(filename, clean_lines, line, error) + CheckPosixThreading(filename, clean_lines, line, error) + CheckInvalidIncrement(filename, clean_lines, line, error) + CheckMakePairUsesDeduction(filename, clean_lines, line, error) + CheckDefaultLambdaCaptures(filename, clean_lines, line, error) + CheckRedundantVirtual(filename, clean_lines, line, error) + CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) + for check_fn in extra_check_functions: + check_fn(filename, clean_lines, line, error) -def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error): - """Check that default lambda captures are not used. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # A lambda introducer specifies a default capture if it starts with "[=" - # or if it starts with "[&" _not_ followed by an identifier. - match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line) - if match: - # Found a potential error, check what comes after the lambda-introducer. - # If it's not open parenthesis (for lambda-declarator) or open brace - # (for compound-statement), it's not a lambda. - line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1))) - if pos >= 0 and Match(r'^\s*[{(]', line[pos:]): - error(filename, linenum, 'build/c++11', - 4, # 4 = high confidence - 'Default lambda captures are an unapproved C++ feature.') +def FlagCxx11Features(filename, clean_lines, linenum, error): + """Flag those c++11 features that we only allow in certain places. -def CheckRedundantVirtual(filename, clean_lines, linenum, error): - """Check if line contains a redundant "virtual" function-specifier. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - # Look for "virtual" on current line. - line = clean_lines.elided[linenum] - virtual = Match(r'^(.*\bvirtual\b)', line) - if not virtual: return - - # Look for the next opening parenthesis. This is the start of the - # parameter list (possibly on the next line shortly after virtual). - # TODO(unknown): doesn't work if there are virtual functions with - # decltype() or other things that use parentheses, but csearch suggests - # that this is rare. - end_col = -1 - end_line = -1 - start_col = len(virtual.group(1)) - for start_line in range(linenum, min(linenum + 3, clean_lines.NumLines())): - line = clean_lines.elided[start_line][start_col:] - parameter_list = Match(r'^([^(]*)\(', line) - if parameter_list: - # Match parentheses to find the end of the parameter list - (_, end_line, end_col) = CloseExpression( - clean_lines, start_line, start_col + len(parameter_list.group(1))) - break - start_col = 0 - - if end_col < 0: - return # Couldn't find end of parameter list, give up - - # Look for "override" or "final" after the parameter list - # (possibly on the next few lines). - for i in range(end_line, min(end_line + 3, clean_lines.NumLines())): - line = clean_lines.elided[i][end_col:] - match = Search(r'\b(override|final)\b', line) - if match: - error(filename, linenum, 'readability/inheritance', 4, - ('"virtual" is redundant since function is ' - 'already declared as "%s"' % match.group(1))) + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] - # Set end_col to check whole lines after we are done with the - # first line. - end_col = 0 - if Search(r'[^\w]\s*$', line): - break + # Flag unapproved C++11 headers. + include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) + if include and include.group(1) in ( + "cfenv", + "fenv.h", + "condition_variable", + "future", + "ratio", + "regex", + "system_error", + ): + error( + filename, + linenum, + "build/c++11", + 5, + ("<%s> is an unapproved C++11 header.") % include.group(1), + ) + + # The only place where we need to worry about C++11 keywords and library + # features in preprocessor directives is in macro definitions. + if Match(r"\s*#", line) and not Match(r"\s*#\s*define\b", line): + return + # These are classes and free functions. The classes are always + # mentioned as std::*, but we only catch the free functions if + # they're not found by ADL. They're alphabetical by header. + for top_name in ( + # type_traits + "alignment_of", + "aligned_union", + # utility + "forward", + ): + if Search(r"\bstd::%s\b" % top_name, line): + error( + filename, + linenum, + "build/c++11", + 5, + ( + "std::%s is an unapproved C++11 class or function. Send c-style " + "an example of where it would make your code more readable, and " + "they may let you use it." + ) + % top_name, + ) + + +def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=[]): + """Performs lint checks and reports any errors to the given error function. -def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error): - """Check if line contains a redundant "override" or "final" virt-specifier. + Args: + filename: Filename of the file that is being processed. + file_extension: The extension (dot not included) of the file. + lines: An array of strings, each representing a line of the file, with the + last element being empty if the file is terminated with a newline. + error: A callable to which errors are reported, which takes 4 arguments: + filename, line number, error level, and message + extra_check_functions: An array of additional check functions that will be + run on each source line. Each function takes 4 + arguments: filename, clean_lines, line, error + """ + lines = ( + ["// marker so line numbers and indices both start at 1"] + + lines + + ["// marker so line numbers end in a known way"] + ) - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - # Check that at most one of "override" or "final" is present, not both - line = clean_lines.elided[linenum] - if Search(r'\boverride\b', line) and Search(r'\bfinal\b', line): - error(filename, linenum, 'readability/inheritance', 4, - ('"override" is redundant since function is ' - 'already declared as "final"')) + include_state = _IncludeState() + function_state = _FunctionState() + nesting_state = NestingState() + ResetNolintSuppressions() + CheckForCopyright(filename, lines, error) + if file_extension == "h": + CheckForHeaderGuard(filename, lines, error) -# Returns true if we are at a new block, and it is directly -# inside of a namespace. -def IsBlockInNameSpace(nesting_state, is_forward_declaration): - """Checks that the new block is directly in a namespace. - - Args: - nesting_state: The _NestingState object that contains info about our state. - is_forward_declaration: If the class is a forward declared class. - Returns: - Whether or not the new block is directly in a namespace. - """ - if is_forward_declaration: - if len(nesting_state.stack) >= 1 and ( - isinstance(nesting_state.stack[-1], _NamespaceInfo)): - return True - else: - return False + RemoveMultiLineComments(filename, lines, error) + clean_lines = CleansedLines(lines) + for line in range(clean_lines.NumLines()): + ProcessLine( + filename, + file_extension, + clean_lines, + line, + include_state, + function_state, + nesting_state, + error, + extra_check_functions, + ) + FlagCxx11Features(filename, clean_lines, line, error) + nesting_state.CheckCompletedBlocks(filename, error) - return (len(nesting_state.stack) > 1 and - nesting_state.stack[-1].check_namespace_indentation and - isinstance(nesting_state.stack[-2], _NamespaceInfo)) + CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) + # We check here rather than inside ProcessLine so that we see raw + # lines rather than "cleaned" lines. + CheckForBadCharacters(filename, lines, error) -def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, - raw_lines_no_comments, linenum): - """This method determines if we should apply our namespace indentation check. + CheckForNewlineAtEOF(filename, lines, error) - Args: - nesting_state: The current nesting state. - is_namespace_indent_item: If we just put a new class on the stack, True. - If the top of the stack is not a class, or we did not recently - add the class, False. - raw_lines_no_comments: The lines without the comments. - linenum: The current line number we are processing. - Returns: - True if we should apply our namespace indentation check. Currently, it - only works for classes and namespaces inside of a namespace. - """ +def ProcessConfigOverrides(filename): + """Loads the configuration files and processes the config overrides. - is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments, - linenum) + Args: + filename: The name of the file being processed by the linter. - if not (is_namespace_indent_item or is_forward_declaration): - return False + Returns: + False if the current |filename| should not be processed further. + """ - # If we are in a macro, we do not want to check the namespace indentation. - if IsMacroDefinition(raw_lines_no_comments, linenum): - return False + abs_filename = os.path.abspath(filename) + cfg_filters = [] + keep_looking = True + while keep_looking: + abs_path, base_name = os.path.split(abs_filename) + if not base_name: + break # Reached the root directory. + + cfg_file = os.path.join(abs_path, "CPPLINT.cfg") + abs_filename = abs_path + if not os.path.isfile(cfg_file): + continue - return IsBlockInNameSpace(nesting_state, is_forward_declaration) + try: + with open(cfg_file) as file_handle: + for line in file_handle: + line, _, _ = line.partition("#") # Remove comments. + if not line.strip(): + continue + + name, _, val = line.partition("=") + name = name.strip() + val = val.strip() + if name == "set noparent": + keep_looking = False + elif name == "filter": + cfg_filters.append(val) + elif name == "exclude_files": + # When matching exclude_files pattern, use the base_name of + # the current file name or the directory name we are processing. + # For example, if we are checking for lint errors in /foo/bar/baz.cc + # and we found the .cfg file at /foo/CPPLINT.cfg, then the config + # file's "exclude_files" filter is meant to be checked against "bar" + # and not "baz" nor "bar/baz.cc". + if base_name: + pattern = re.compile(val) + if pattern.match(base_name): + sys.stderr.write( + 'Ignoring "%s": file excluded by "%s". ' + 'File path component "%s" matches ' + 'pattern "%s"\n' + % (filename, cfg_file, base_name, val) + ) + return False + elif name == "linelength": + global _line_length + try: + _line_length = int(val) + except ValueError: + sys.stderr.write("Line length must be numeric.") + else: + sys.stderr.write( + "Invalid configuration option (%s) in file %s\n" + % (name, cfg_file) + ) + + except IOError: + sys.stderr.write( + "Skipping config file '%s': Can't open for reading\n" % cfg_file + ) + keep_looking = False + # Apply all the accumulated filters in reverse order (top-level directory + # config options having the least priority). + for filter in reversed(cfg_filters): + _AddFilters(filter) -# Call this method if the line is directly inside of a namespace. -# If the line above is blank (excluding comments) or the start of -# an inner namespace, it cannot be indented. -def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum, - error): - line = raw_lines_no_comments[linenum] - if Match(r'^\s+', line): - error(filename, linenum, 'runtime/indentation_namespace', 4, - 'Do not indent within a namespace') - - -def ProcessLine(filename, file_extension, clean_lines, line, - include_state, function_state, nesting_state, error, - extra_check_functions=[]): - """Processes a single line in the file. - - Args: - filename: Filename of the file that is being processed. - file_extension: The extension (dot not included) of the file. - clean_lines: An array of strings, each representing a line of the file, - with comments stripped. - line: Number of line being processed. - include_state: An _IncludeState instance in which the headers are inserted. - function_state: A _FunctionState instance which counts function lines, etc. - nesting_state: A NestingState instance which maintains information about - the current stack of nested blocks being parsed. - error: A callable to which errors are reported, which takes 4 arguments: - filename, line number, error level, and message - extra_check_functions: An array of additional check functions that will be - run on each source line. Each function takes 4 - arguments: filename, clean_lines, line, error - """ - raw_lines = clean_lines.raw_lines - ParseNolintSuppressions(filename, raw_lines[line], line, error) - nesting_state.Update(filename, clean_lines, line, error) - CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, - error) - if nesting_state.InAsmBlock(): return - CheckForFunctionLengths(filename, clean_lines, line, function_state, error) - CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) - CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) - CheckLanguage(filename, clean_lines, line, file_extension, include_state, - nesting_state, error) - CheckForNonStandardConstructs(filename, clean_lines, line, - nesting_state, error) - CheckVlogArguments(filename, clean_lines, line, error) - CheckPosixThreading(filename, clean_lines, line, error) - CheckInvalidIncrement(filename, clean_lines, line, error) - CheckMakePairUsesDeduction(filename, clean_lines, line, error) - CheckDefaultLambdaCaptures(filename, clean_lines, line, error) - CheckRedundantVirtual(filename, clean_lines, line, error) - CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) - for check_fn in extra_check_functions: - check_fn(filename, clean_lines, line, error) + return True -def FlagCxx11Features(filename, clean_lines, linenum, error): - """Flag those c++11 features that we only allow in certain places. - - Args: - filename: The name of the current file. - clean_lines: A CleansedLines instance containing the file. - linenum: The number of the line to check. - error: The function to call with any errors found. - """ - line = clean_lines.elided[linenum] - - # Flag unapproved C++11 headers. - include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) - if include and include.group(1) in ('cfenv', - 'fenv.h', - 'condition_variable', - 'future', - 'ratio', - 'regex', - 'system_error', - ): - error(filename, linenum, 'build/c++11', 5, - ('<%s> is an unapproved C++11 header.') % include.group(1)) - - # The only place where we need to worry about C++11 keywords and library - # features in preprocessor directives is in macro definitions. - if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return - - # These are classes and free functions. The classes are always - # mentioned as std::*, but we only catch the free functions if - # they're not found by ADL. They're alphabetical by header. - for top_name in ( - # type_traits - 'alignment_of', - 'aligned_union', - - # utility - 'forward', - ): - if Search(r'\bstd::%s\b' % top_name, line): - error(filename, linenum, 'build/c++11', 5, - ('std::%s is an unapproved C++11 class or function. Send c-style ' - 'an example of where it would make your code more readable, and ' - 'they may let you use it.') % top_name) - - -def ProcessFileData(filename, file_extension, lines, error, - extra_check_functions=[]): - """Performs lint checks and reports any errors to the given error function. - - Args: - filename: Filename of the file that is being processed. - file_extension: The extension (dot not included) of the file. - lines: An array of strings, each representing a line of the file, with the - last element being empty if the file is terminated with a newline. - error: A callable to which errors are reported, which takes 4 arguments: - filename, line number, error level, and message - extra_check_functions: An array of additional check functions that will be - run on each source line. Each function takes 4 - arguments: filename, clean_lines, line, error - """ - lines = (['// marker so line numbers and indices both start at 1'] + lines + - ['// marker so line numbers end in a known way']) - - include_state = _IncludeState() - function_state = _FunctionState() - nesting_state = NestingState() - - ResetNolintSuppressions() - - CheckForCopyright(filename, lines, error) - - if file_extension == 'h': - CheckForHeaderGuard(filename, lines, error) - - RemoveMultiLineComments(filename, lines, error) - clean_lines = CleansedLines(lines) - for line in range(clean_lines.NumLines()): - ProcessLine(filename, file_extension, clean_lines, line, - include_state, function_state, nesting_state, error, - extra_check_functions) - FlagCxx11Features(filename, clean_lines, line, error) - nesting_state.CheckCompletedBlocks(filename, error) - - CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) - - # We check here rather than inside ProcessLine so that we see raw - # lines rather than "cleaned" lines. - CheckForBadCharacters(filename, lines, error) - - CheckForNewlineAtEOF(filename, lines, error) -def ProcessConfigOverrides(filename): - """ Loads the configuration files and processes the config overrides. +def ProcessFile(filename, vlevel, extra_check_functions=[]): + """Does google-lint on a single file. + + Args: + filename: The name of the file to parse. - Args: - filename: The name of the file being processed by the linter. + vlevel: The level of errors to report. Every error of confidence + >= verbose_level will be reported. 0 is a good default. - Returns: - False if the current |filename| should not be processed further. - """ + extra_check_functions: An array of additional check functions that will be + run on each source line. Each function takes 4 + arguments: filename, clean_lines, line, error + """ - abs_filename = os.path.abspath(filename) - cfg_filters = [] - keep_looking = True - while keep_looking: - abs_path, base_name = os.path.split(abs_filename) - if not base_name: - break # Reached the root directory. + _SetVerboseLevel(vlevel) + _BackupFilters() - cfg_file = os.path.join(abs_path, "CPPLINT.cfg") - abs_filename = abs_path - if not os.path.isfile(cfg_file): - continue + if not ProcessConfigOverrides(filename): + _RestoreFilters() + return + lf_lines = [] + crlf_lines = [] try: - with open(cfg_file) as file_handle: - for line in file_handle: - line, _, _ = line.partition('#') # Remove comments. - if not line.strip(): - continue + # Support the UNIX convention of using "-" for stdin. Note that + # we are not opening the file with universal newline support + # (which codecs doesn't support anyway), so the resulting lines do + # contain trailing '\r' characters if we are reading a file that + # has CRLF endings. + # If after the split a trailing '\r' is present, it is removed + # below. + if filename == "-": + lines = ( + codecs.StreamReaderWriter( + sys.stdin, + codecs.getreader("utf8"), + codecs.getwriter("utf8"), + "replace", + ) + .read() + .split("\n") + ) + else: + lines = codecs.open(filename, "r", "utf8", "replace").read().split("\n") - name, _, val = line.partition('=') - name = name.strip() - val = val.strip() - if name == 'set noparent': - keep_looking = False - elif name == 'filter': - cfg_filters.append(val) - elif name == 'exclude_files': - # When matching exclude_files pattern, use the base_name of - # the current file name or the directory name we are processing. - # For example, if we are checking for lint errors in /foo/bar/baz.cc - # and we found the .cfg file at /foo/CPPLINT.cfg, then the config - # file's "exclude_files" filter is meant to be checked against "bar" - # and not "baz" nor "bar/baz.cc". - if base_name: - pattern = re.compile(val) - if pattern.match(base_name): - sys.stderr.write('Ignoring "%s": file excluded by "%s". ' - 'File path component "%s" matches ' - 'pattern "%s"\n' % - (filename, cfg_file, base_name, val)) - return False - elif name == 'linelength': - global _line_length - try: - _line_length = int(val) - except ValueError: - sys.stderr.write('Line length must be numeric.') - else: - sys.stderr.write( - 'Invalid configuration option (%s) in file %s\n' % - (name, cfg_file)) + # Remove trailing '\r'. + # The -1 accounts for the extra trailing blank line we get from split() + for linenum in range(len(lines) - 1): + if lines[linenum].endswith("\r"): + lines[linenum] = lines[linenum].rstrip("\r") + crlf_lines.append(linenum + 1) + else: + lf_lines.append(linenum + 1) except IOError: - sys.stderr.write( - "Skipping config file '%s': Can't open for reading\n" % cfg_file) - keep_looking = False - - # Apply all the accumulated filters in reverse order (top-level directory - # config options having the least priority). - for filter in reversed(cfg_filters): - _AddFilters(filter) + sys.stderr.write("Skipping input '%s': Can't open for reading\n" % filename) + _RestoreFilters() + return - return True + # Note, if no dot is found, this will give the entire filename as the ext. + file_extension = filename[filename.rfind(".") + 1 :] + # When reading from stdin, the extension is unknown, so no cpplint tests + # should rely on the extension. + if filename != "-" and file_extension not in _valid_extensions: + sys.stderr.write( + "Ignoring %s; not a valid file name " + "(%s)\n" % (filename, ", ".join(_valid_extensions)) + ) + else: + ProcessFileData(filename, file_extension, lines, Error, extra_check_functions) -def ProcessFile(filename, vlevel, extra_check_functions=[]): - """Does google-lint on a single file. + # If end-of-line sequences are a mix of LF and CR-LF, issue + # warnings on the lines with CR. + # + # Don't issue any warnings if all lines are uniformly LF or CR-LF, + # since critique can handle these just fine, and the style guide + # doesn't dictate a particular end of line sequence. + # + # We can't depend on os.linesep to determine what the desired + # end-of-line sequence should be, since that will return the + # server-side end-of-line sequence. + if lf_lines and crlf_lines: + # Warn on every line with CR. An alternative approach might be to + # check whether the file is mostly CRLF or just LF, and warn on the + # minority, we bias toward LF here since most tools prefer LF. + for linenum in crlf_lines: + Error( + filename, + linenum, + "whitespace/newline", + 1, + "Unexpected \\r (^M) found; better to use only \\n", + ) + + # sys.stderr.write('Done processing %s\n' % filename) + _RestoreFilters() - Args: - filename: The name of the file to parse. - vlevel: The level of errors to report. Every error of confidence - >= verbose_level will be reported. 0 is a good default. +def PrintUsage(message): + """Prints a brief usage string and exits, optionally with an error message. - extra_check_functions: An array of additional check functions that will be - run on each source line. Each function takes 4 - arguments: filename, clean_lines, line, error - """ + Args: + message: The optional error message. + """ + sys.stderr.write(_USAGE) + if message: + sys.exit("\nFATAL ERROR: " + message) + else: + sys.exit(1) - _SetVerboseLevel(vlevel) - _BackupFilters() - if not ProcessConfigOverrides(filename): - _RestoreFilters() - return - - lf_lines = [] - crlf_lines = [] - try: - # Support the UNIX convention of using "-" for stdin. Note that - # we are not opening the file with universal newline support - # (which codecs doesn't support anyway), so the resulting lines do - # contain trailing '\r' characters if we are reading a file that - # has CRLF endings. - # If after the split a trailing '\r' is present, it is removed - # below. - if filename == '-': - lines = codecs.StreamReaderWriter(sys.stdin, - codecs.getreader('utf8'), - codecs.getwriter('utf8'), - 'replace').read().split('\n') - else: - lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') - - # Remove trailing '\r'. - # The -1 accounts for the extra trailing blank line we get from split() - for linenum in range(len(lines) - 1): - if lines[linenum].endswith('\r'): - lines[linenum] = lines[linenum].rstrip('\r') - crlf_lines.append(linenum + 1) - else: - lf_lines.append(linenum + 1) - - except IOError: - sys.stderr.write( - "Skipping input '%s': Can't open for reading\n" % filename) - _RestoreFilters() - return - - # Note, if no dot is found, this will give the entire filename as the ext. - file_extension = filename[filename.rfind('.') + 1:] - - # When reading from stdin, the extension is unknown, so no cpplint tests - # should rely on the extension. - if filename != '-' and file_extension not in _valid_extensions: - sys.stderr.write('Ignoring %s; not a valid file name ' - '(%s)\n' % (filename, ', '.join(_valid_extensions))) - else: - ProcessFileData(filename, file_extension, lines, Error, - extra_check_functions) - - # If end-of-line sequences are a mix of LF and CR-LF, issue - # warnings on the lines with CR. - # - # Don't issue any warnings if all lines are uniformly LF or CR-LF, - # since critique can handle these just fine, and the style guide - # doesn't dictate a particular end of line sequence. - # - # We can't depend on os.linesep to determine what the desired - # end-of-line sequence should be, since that will return the - # server-side end-of-line sequence. - if lf_lines and crlf_lines: - # Warn on every line with CR. An alternative approach might be to - # check whether the file is mostly CRLF or just LF, and warn on the - # minority, we bias toward LF here since most tools prefer LF. - for linenum in crlf_lines: - Error(filename, linenum, 'whitespace/newline', 1, - 'Unexpected \\r (^M) found; better to use only \\n') +def PrintCategories(): + """Prints a list of all the error-categories used by error messages. - #sys.stderr.write('Done processing %s\n' % filename) - _RestoreFilters() + These are the categories used to filter messages via --filter. + """ + sys.stderr.write("".join(" %s\n" % cat for cat in _ERROR_CATEGORIES)) + sys.exit(0) -def PrintUsage(message): - """Prints a brief usage string and exits, optionally with an error message. +def ParseArguments(args): + """Parses the command line arguments. - Args: - message: The optional error message. - """ - sys.stderr.write(_USAGE) - if message: - sys.exit('\nFATAL ERROR: ' + message) - else: - sys.exit(1) + This may set the output format and verbosity level as side-effects. + Args: + args: The command line arguments: -def PrintCategories(): - """Prints a list of all the error-categories used by error messages. + Returns: + The list of filenames to lint. + """ + try: + (opts, filenames) = getopt.getopt( + args, + "", + [ + "help", + "output=", + "verbose=", + "counting=", + "filter=", + "root=", + "linelength=", + "extensions=", + ], + ) + except getopt.GetoptError: + PrintUsage("Invalid arguments.") + + verbosity = _VerboseLevel() + output_format = _OutputFormat() + filters = "" + counting_style = "" + + for (opt, val) in opts: + if opt == "--help": + PrintUsage(None) + elif opt == "--output": + if val not in ("emacs", "vs7", "eclipse"): + PrintUsage( + "The only allowed output formats are emacs, vs7 and eclipse." + ) + output_format = val + elif opt == "--verbose": + verbosity = int(val) + elif opt == "--filter": + filters = val + if not filters: + PrintCategories() + elif opt == "--counting": + if val not in ("total", "toplevel", "detailed"): + PrintUsage("Valid counting options are total, toplevel, and detailed") + counting_style = val + elif opt == "--root": + global _root + _root = val + elif opt == "--linelength": + global _line_length + try: + _line_length = int(val) + except ValueError: + PrintUsage("Line length must be digits.") + elif opt == "--extensions": + global _valid_extensions + try: + _valid_extensions = set(val.split(",")) + except ValueError: + PrintUsage("Extensions must be comma seperated list.") - These are the categories used to filter messages via --filter. - """ - sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) - sys.exit(0) + if not filenames: + PrintUsage("No files were specified.") + _SetOutputFormat(output_format) + _SetVerboseLevel(verbosity) + _SetFilters(filters) + _SetCountingStyle(counting_style) -def ParseArguments(args): - """Parses the command line arguments. - - This may set the output format and verbosity level as side-effects. - - Args: - args: The command line arguments: - - Returns: - The list of filenames to lint. - """ - try: - (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', - 'counting=', - 'filter=', - 'root=', - 'linelength=', - 'extensions=']) - except getopt.GetoptError: - PrintUsage('Invalid arguments.') - - verbosity = _VerboseLevel() - output_format = _OutputFormat() - filters = '' - counting_style = '' - - for (opt, val) in opts: - if opt == '--help': - PrintUsage(None) - elif opt == '--output': - if val not in ('emacs', 'vs7', 'eclipse'): - PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') - output_format = val - elif opt == '--verbose': - verbosity = int(val) - elif opt == '--filter': - filters = val - if not filters: - PrintCategories() - elif opt == '--counting': - if val not in ('total', 'toplevel', 'detailed'): - PrintUsage('Valid counting options are total, toplevel, and detailed') - counting_style = val - elif opt == '--root': - global _root - _root = val - elif opt == '--linelength': - global _line_length - try: - _line_length = int(val) - except ValueError: - PrintUsage('Line length must be digits.') - elif opt == '--extensions': - global _valid_extensions - try: - _valid_extensions = set(val.split(',')) - except ValueError: - PrintUsage('Extensions must be comma seperated list.') - - if not filenames: - PrintUsage('No files were specified.') - - _SetOutputFormat(output_format) - _SetVerboseLevel(verbosity) - _SetFilters(filters) - _SetCountingStyle(counting_style) - - return filenames + return filenames def main(): - filenames = ParseArguments(sys.argv[1:]) + filenames = ParseArguments(sys.argv[1:]) - # Change stderr to write with replacement characters so we don't die - # if we try to print something containing non-ASCII characters. - sys.stderr = codecs.StreamReaderWriter(sys.stderr, - codecs.getreader('utf8'), - codecs.getwriter('utf8'), - 'replace') + # Change stderr to write with replacement characters so we don't die + # if we try to print something containing non-ASCII characters. + sys.stderr = codecs.StreamReaderWriter( + sys.stderr, codecs.getreader("utf8"), codecs.getwriter("utf8"), "replace" + ) - _cpplint_state.ResetErrorCounts() - for filename in filenames: - ProcessFile(filename, _cpplint_state.verbose_level) - _cpplint_state.PrintErrorCounts() + _cpplint_state.ResetErrorCounts() + for filename in filenames: + ProcessFile(filename, _cpplint_state.verbose_level) + _cpplint_state.PrintErrorCounts() - sys.exit(_cpplint_state.error_count > 0) + sys.exit(_cpplint_state.error_count > 0) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/scripts/git/cpplint_repo.py b/scripts/git/cpplint_repo.py index e96f160b7d..7ab54f59c0 100755 --- a/scripts/git/cpplint_repo.py +++ b/scripts/git/cpplint_repo.py @@ -1,29 +1,37 @@ #!/usr/bin/env python3 -import os, importlib, fnmatch +import fnmatch +import importlib +import os num_errors = 0 + def get_cpplint_path(): return os.path.dirname(os.path.realpath(__file__)) + "/cpplint.py" + def get_repo_path(): return os.path.realpath(os.path.dirname(os.path.realpath(__file__)) + "/../..") + def run_cpplint(filename, cpplint_path): - cpplint = importlib.machinery.SourceFileLoader('cpplint', cpplint_path).load_module() + cpplint = importlib.machinery.SourceFileLoader( + "cpplint", cpplint_path + ).load_module() cpplint._cpplint_state.ResetErrorCounts() cpplint.print_stdout = False cpplint._line_length = 120 cpplint.output = [] try: index = filename.split("/").index("include") - cpplint._root = "/".join(filename.split("/")[:index+1]) + cpplint._root = "/".join(filename.split("/")[: index + 1]) except ValueError: pass cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level) return cpplint.output + def print_objection(): print(" ____ __ _ __ _ __") print(" / __ \/ /_ (_)__ _____/ /_(_)___ ____ / /") @@ -32,6 +40,7 @@ def print_objection(): print("\____/_.___/_/ /\___/\___/\__/_/\____/_/ /_(_) ") print(" /___/ ") + def main(): num_errors = 0 @@ -40,27 +49,56 @@ def main(): # Lets look for source files and headers in our repo for root, dirnames, filenames in os.walk(get_repo_path()): - if "Software" in root or "external" in root or "gnc/matlab" in root or \ - "submodules" in root or "debian" in root or "build" in root: + if ( + "Software" in root + or "external" in root + or "gnc/matlab" in root + or "submodules" in root + or "debian" in root + or "build" in root + ): continue for filename in filenames: if "agast_score" in filename or "brisk" in filename: continue - if not filename.endswith((".cpp",".cc",".h",".hpp",".cc.in",".c.in",".h.in", - ".hpp.in",".cxx",".hxx")): + if not filename.endswith( + ( + ".cpp", + ".cc", + ".h", + ".hpp", + ".cc.in", + ".c.in", + ".h.in", + ".hpp.in", + ".cxx", + ".hxx", + ) + ): continue - output = run_cpplint((root + "/" + filename).replace(get_repo_path() + "/", ''), cpplint_path) + output = run_cpplint( + (root + "/" + filename).replace(get_repo_path() + "/", ""), cpplint_path + ) # Print an objection at first sight of errors if num_errors == 0 and len(output) > 0: print_objection() - + num_errors += len(output) for error in output: - print("%s:%s: %s" % ((root + "/" + filename).replace(get_repo_path() + "/", ''), str(error[0]), error[1])) + print( + ( + "%s:%s: %s" + % ( + (root + "/" + filename).replace(get_repo_path() + "/", ""), + str(error[0]), + error[1], + ) + ) + ) - print("="*50) + print(("=" * 50)) if num_errors > 0: - print(" You have %d lint errors" % num_errors) + print((" You have %d lint errors" % num_errors)) elif num_errors == 0: print(" Code adheres to style guide lines") @@ -68,4 +106,4 @@ def main(): if __name__ == "__main__": - main() + main() diff --git a/scripts/git/pre-commit b/scripts/git/pre-commit index f20691a56c..a6fc491446 100755 --- a/scripts/git/pre-commit +++ b/scripts/git/pre-commit @@ -1,114 +1,24 @@ -#!/usr/bin/env python3 - -import datetime, os, importlib, subprocess -from operator import itemgetter -from itertools import groupby - -def get_cpplint_path(): - pwd = os.path.realpath(os.path.curdir) - root = -1 - if 'FreeFlyerMLP' in pwd: - root = pwd.split("/").index("FreeFlyerMLP") - return "/".join(pwd.split("/")[:root+1] + ["scripts","git","cpplint.py"]) - else: - return get_repo_path().decode() + "/scripts/git/cpplint.py" - return "" - -def get_repo_path(): - pwd = os.path.realpath(os.path.curdir) - root = -1 - if 'FreeFlyerMLP' in pwd: - root = pwd.split("/").index("FreeFlyerMLP") - return "/".join(pwd.split("/")[:root+1]) - else: - try: - return subprocess.Popen(["git", "rev-parse", "--show-toplevel"], - stdout=subprocess.PIPE).communicate()[0].rstrip() - except: - print("Can't find repo path") - exit(1) - return "" - -def run_cpplint(filename, cpplint_path): - cpplint = importlib.machinery.SourceFileLoader('cpplint', cpplint_path).load_module() - cpplint._cpplint_state.ResetErrorCounts() - cpplint.print_stdout = False - cpplint._line_length = 120 - cpplint.output = [] - try: - index = filename.split("/").index("include") - cpplint._root = "/".join(filename.split("/")[:index+1]) - except ValueError: - pass - cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level) - return cpplint.output - -def print_objection(): - print(" ____ __ _ __ _ __") - print(" / __ \/ /_ (_)__ _____/ /_(_)___ ____ / /") - print(" / / / / __ \ / / _ \/ ___/ __/ / __ \/ __ \/ / ") - print("/ /_/ / /_/ / / / __/ /__/ /_/ / /_/ / / / /_/ ") - print("\____/_.___/_/ /\___/\___/\__/_/\____/_/ /_(_) ") - print(" /___/ ") - -def main(): - num_errors = 0 - - cpplint_path = get_cpplint_path() - repo_path = get_repo_path() - - os.chdir(repo_path) - cmd = ["git", "diff", "--cached", "--name-only"] - p = subprocess.Popen(cmd, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - output, err = p.communicate() - if p.returncode: - print("Failed to run git ls-files") - exit(1) - cpp_files_to_lint = [] - for filename in output.split(): - if str(filename).endswith((".cpp",".cc",".h",".hpp",".cc.in",".c",".c.in",".h.in", - ".hpp.in",".cxx",".hxx")): - if not "external/" in filename and not "Software/" in filename and \ - not "gnc/matlab/" in filename and not "agast_score" in filename and \ - not "brisk" in filename and not "debian" in filename and \ - not "build" in filename: - cpp_files_to_lint.append(filename) - - for filename in cpp_files_to_lint: - output = run_cpplint(filename, cpplint_path) - - # Print an objection at first sight of errors - if num_errors == 0 and len(output) > 0: - print_objection() - - num_errors += len(output) - if len(output) > 0: - lines = [] - for error in output: - print("%s:%s: %s" % (filename, str(error[0]), error[1])) - lines.append(error[0]) - # Run the clang-format if there are errors in the identified ranges - if len(output) > 0: - command = "clang-format-8 -style=file -i " + filename - ranges = [] - for k, g in groupby(enumerate(lines), lambda i_x:i_x[0]-i_x[1]): - group = list(map(itemgetter(1), g)) - ranges.append((group[0], group[-1])) - for clang_range in ranges: - command = command + " --lines=" + str(clang_range[0]) + ":" + str(clang_range[1]) - # print command - retcode = subprocess.Popen(command, shell=True) - - print("="*50) - if num_errors > 0: - print(" You have %d lint errors, commit failed" % num_errors) - print(" The errors were automatically piped through clang-format-8") - elif num_errors == 0: - print(" Code adheres to style guide lines, committing ...") - - exit(num_errors) - - -if __name__ == "__main__": - main() +#!/bin/sh +# +# Copyright (c) 2017, United States Government, as represented by the +# Administrator of the National Aeronautics and Space Administration. +# +# All rights reserved. +# +# The Astrobee platform is licensed under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with the +# License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +set -e + +rootdir=`git rev-parse --show-toplevel` + +${rootdir}/scripts/git/pre-commit.linter_cpp +${rootdir}/scripts/git/pre-commit.linter_python diff --git a/scripts/git/pre-commit.linter_cpp b/scripts/git/pre-commit.linter_cpp new file mode 100755 index 0000000000..836ddbb117 --- /dev/null +++ b/scripts/git/pre-commit.linter_cpp @@ -0,0 +1,156 @@ +#!/usr/bin/env python3 + +import datetime +import importlib.machinery +import os +import subprocess +from itertools import groupby +from operator import itemgetter + + +def get_cpplint_path(): + pwd = os.path.realpath(os.path.curdir) + root = -1 + if "FreeFlyerMLP" in pwd: + root = pwd.split("/").index("FreeFlyerMLP") + return "/".join(pwd.split("/")[: root + 1] + ["scripts", "git", "cpplint.py"]) + else: + return get_repo_path().decode() + "/scripts/git/cpplint.py" + return "" + + +def get_repo_path(): + pwd = os.path.realpath(os.path.curdir) + root = -1 + if "FreeFlyerMLP" in pwd: + root = pwd.split("/").index("FreeFlyerMLP") + return "/".join(pwd.split("/")[: root + 1]) + else: + try: + return ( + subprocess.Popen( + ["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE + ) + .communicate()[0] + .rstrip() + ) + except: + print("Can't find repo path") + exit(1) + return "" + + +def run_cpplint(filename, cpplint_path): + cpplint = importlib.machinery.SourceFileLoader( + "cpplint", cpplint_path + ).load_module() + cpplint._cpplint_state.ResetErrorCounts() + cpplint.print_stdout = False + cpplint._line_length = 120 + cpplint.output = [] + try: + index = filename.split("/").index("include") + cpplint._root = "/".join(filename.split("/")[: index + 1]) + except ValueError: + pass + cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level) + return cpplint.output + + +def print_objection(): + print(" ____ __ _ __ _ __") + print(" / __ \/ /_ (_)__ _____/ /_(_)___ ____ / /") + print(" / / / / __ \ / / _ \/ ___/ __/ / __ \/ __ \/ / ") + print("/ /_/ / /_/ / / / __/ /__/ /_/ / /_/ / / / /_/ ") + print("\____/_.___/_/ /\___/\___/\__/_/\____/_/ /_(_) ") + print(" /___/ ") + + +def main(): + num_errors = 0 + + cpplint_path = get_cpplint_path() + repo_path = get_repo_path() + + os.chdir(repo_path) + cmd = ["git", "diff", "--cached", "--name-only"] + p = subprocess.Popen( + cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE + ) + output, err = p.communicate() + if p.returncode: + print("Failed to run git ls-files") + exit(1) + cpp_files_to_lint = [] + for filename in output.split(): + # Needed for python3 + filename = filename.decode('ascii') + if str(filename).endswith( + ( + ".cpp", + ".cc", + ".h", + ".hpp", + ".cc.in", + ".c", + ".c.in", + ".h.in", + ".hpp.in", + ".cxx", + ".hxx", + ) + ): + if ( + not "external/" in filename + and not "Software/" in filename + and not "gnc/matlab/" in filename + and not "agast_score" in filename + and not "brisk" in filename + and not "debian" in filename + and not "build" in filename + ): + cpp_files_to_lint.append(filename) + + for filename in cpp_files_to_lint: + output = run_cpplint(filename, cpplint_path) + + # Print an objection at first sight of errors + if num_errors == 0 and len(output) > 0: + print_objection() + + num_errors += len(output) + if len(output) > 0: + lines = [] + for error in output: + print("%s:%s: %s" % (filename, str(error[0]), error[1])) + lines.append(error[0]) + # Run the clang-format if there are errors in the identified ranges + if len(output) > 0: + command = "clang-format-8 -style=file -i " + filename + ranges = [] + for k, g in groupby(enumerate(lines), lambda i_x: i_x[0] - i_x[1]): + group = list(map(itemgetter(1), g)) + ranges.append((group[0], group[-1])) + for clang_range in ranges: + command = ( + command + + " --lines=" + + str(clang_range[0]) + + ":" + + str(clang_range[1]) + ) + # print command + retcode = subprocess.Popen(command, shell=True) + + print("=" * 50) + if num_errors > 0: + print(" You have %d lint errors, commit failed" % num_errors) + print(" The errors were automatically piped through clang-format-8") + elif num_errors == 0: + print(" Code adheres to style guide lines, committing ...") + + exit(num_errors) + + +if __name__ == "__main__": + main() diff --git a/scripts/git/pre-commit.linter_python b/scripts/git/pre-commit.linter_python new file mode 100755 index 0000000000..80ea0a186b --- /dev/null +++ b/scripts/git/pre-commit.linter_python @@ -0,0 +1,69 @@ +#!/bin/sh +# +# Copyright (c) 2017, United States Government, as represented by the +# Administrator of the National Aeronautics and Space Administration. +# +# All rights reserved. +# +# The Astrobee platform is licensed under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with the +# License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +files=$(git diff --cached --name-only | grep '\.py$') + +# If files is empty exit success +if [ -z "${files}" ]; then + echo "==================================================" + echo " No Python files changed, no checks needed." + exit 0 +fi + +failed_lint=false + +if ! command -v black &> /dev/null +then + echo "Black was not found, to install check https://github.com/psf/black" + echo "Unfortunately black requires Python 3.6.2+ to run" + exit 0 +fi + +if ! command -v isort &> /dev/null +then + echo "isort was not found, to install check https://github.com/PyCQA/isort" + exit 0 +fi + +echo "==================================================" +echo " Analysing with black" +# This check the files but they will not be commited +if `black . --include ${files} --check --quiet`; then + echo "Linter black checks passed" +else + echo "Errors detected, fixing ..." + black . --include ${files} + failed_lint=true +fi + +echo "==================================================" +echo " Analysing with isort" +if $(isort ${files} --extend-skip cmake --profile black --diff --check-only --quiet >/dev/null); then + echo "Linter isort checks passed" +else + echo "Errors detected, fixing ..." + isort ${files} --extend-skip cmake --profile black >/dev/null + failed_lint=true +fi + +# Cancel commit if linter failed +# The user should add manually the corrected files +if ${failed_lint}; then + exit 1 +fi diff --git a/scripts/setup/debians/build_install_debians.sh b/scripts/setup/debians/build_install_debians.sh index 16ea11ec85..6f166261c6 100755 --- a/scripts/setup/debians/build_install_debians.sh +++ b/scripts/setup/debians/build_install_debians.sh @@ -19,6 +19,7 @@ # # Install the dependencies needed for the debians. Build and install flight # software debians. +set -e DEBIAN_LOC=$(dirname "$(readlink -f "$0")") diff --git a/shared/ff_util/include/ff_util/ff_names.h b/shared/ff_util/include/ff_util/ff_names.h index 58dc340a8e..151bb72a60 100644 --- a/shared/ff_util/include/ff_util/ff_names.h +++ b/shared/ff_util/include/ff_util/ff_names.h @@ -188,7 +188,7 @@ #define TOPIC_MANAGEMENT_SYS_MONITOR_CONFIG "mgt/sys_monitor/config" #define TOPIC_MANAGEMENT_SYS_MONITOR_STATE "mgt/sys_monitor/state" #define TOPIC_MANAGEMENT_SYS_MONITOR_HEARTBEAT "mgt/sys_monitor/heartbeat" -#define TOPIC_MANAGEMENT_SYS_MONITOR_TIME_DIFF "mgt/sys_monitor/time_diff" +#define TOPIC_MANAGEMENT_SYS_MONITOR_TIME_SYNC "mgt/sys_monitor/time_sync" #define TOPIC_MANAGEMENT_DATA_BAGGER_STATE "mgt/data_bagger/state" #define TOPIC_MANAGEMENT_DATA_BAGGER_TOPICS "mgt/data_bagger/topics" #define TOPIC_MANAGEMENT_CAMERA_STATE "mgt/camera_state" @@ -232,10 +232,14 @@ #define SERVICE_MOBILITY_SET_INERTIA "mob/set_inertia" #define SERVICE_MOBILITY_SET_ZONES "mob/set_zones" #define SERVICE_MOBILITY_GET_ZONES "mob/get_zones" +#define SERVICE_MOBILITY_GET_ZONES_MAP "mob/get_zones_map" #define SERVICE_MOBILITY_SET_STATE "mob/set_state" -#define SERVICE_MOBILITY_UPDATE_MAP_RESOLUTION "mob/mapper/update_resolution" -#define SERVICE_MOBILITY_UPDATE_MEMORY_TIME "mob/mapper/update_memory_time" -#define SERVICE_MOBILITY_UPDATE_INFLATION "mob/mapper/update_inflation_radius" +#define SERVICE_MOBILITY_SET_MAP_RESOLUTION "mob/mapper/set_map_resolution" +#define SERVICE_MOBILITY_SET_MEMORY_TIME "mob/mapper/set_memory_time" +#define SERVICE_MOBILITY_SET_COLLISION_DISTANCE "mob/mapper/set_collision_distance" +#define SERVICE_MOBILITY_GET_MAP_RESOLUTION "mob/mapper/get_map_resolution" +#define SERVICE_MOBILITY_GET_MEMORY_TIME "mob/mapper/get_memory_time" +#define SERVICE_MOBILITY_GET_MAP_INFLATION "mob/mapper/get_map_inflation" #define SERVICE_MOBILITY_RESET_MAP "mob/mapper/reset_map" #define SERVICE_MOBILITY_GET_FREE_MAP "mob/mapper/get_free_map" #define SERVICE_MOBILITY_GET_OBSTACLE_MAP "mob/mapper/get_obstacle_map" @@ -425,6 +429,7 @@ #define SERVICE_HARDWARE_EPS_GET_BOARD_INFO "hw/eps/get_board_info" #define SERVICE_HARDWARE_EPS_CLEAR_TERMINATE "hw/eps/clear_terminate" +#define SERVICE_HARDWARE_PERCHING_ARM_ENABLE "hw/arm/enable_arm" #define SERVICE_HARDWARE_PERCHING_ARM_DIST_VEL "hw/arm/set_dist_vel" #define SERVICE_HARDWARE_PERCHING_ARM_PROX_VEL "hw/arm/set_prox_vel" #define SERVICE_HARDWARE_PERCHING_ARM_PROX_SERVO "hw/arm/enable_proximal_servo" diff --git a/shared/ff_util/src/ff_flight/ff_flight.cc b/shared/ff_util/src/ff_flight/ff_flight.cc index cfa9c54f1c..975f4c0cc8 100644 --- a/shared/ff_util/src/ff_flight/ff_flight.cc +++ b/shared/ff_util/src/ff_flight/ff_flight.cc @@ -176,6 +176,7 @@ namespace ff_util { || !mode.GetReal("hard_limit_vel", &info.hard_limit_vel) || !mode.GetReal("hard_limit_alpha", &info.hard_limit_alpha) || !mode.GetReal("hard_limit_accel", &info.hard_limit_accel) + || !mode.GetReal("tolerance_pos_endpoint", &info.tolerance_pos_endpoint) || !mode.GetReal("tolerance_pos", &info.tolerance_pos) || !mode.GetReal("tolerance_att", &info.tolerance_att) || !mode.GetReal("tolerance_omega", &info.tolerance_omega) diff --git a/simulation/scripts/spawn_model b/simulation/scripts/spawn_model index 2edf70724e..359ea73147 100755 --- a/simulation/scripts/spawn_model +++ b/simulation/scripts/spawn_model @@ -18,18 +18,21 @@ # Author: John Hsu, Dave Coleman # -import rospy, sys, os, time +import os +import re import string +import sys +import time import warnings -import re - -from gazebo_ros import gazebo_interface +import rospy +import tf.transformations as tft from gazebo_msgs.msg import * from gazebo_msgs.srv import * -from std_srvs.srv import Empty +from gazebo_ros import gazebo_interface from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Wrench -import tf.transformations as tft +from std_srvs.srv import Empty + def excepthook(*args): print('caught', STDERR) diff --git a/simulation/src/gazebo_model_plugin_eps/gazebo_model_plugin_eps.cc b/simulation/src/gazebo_model_plugin_eps/gazebo_model_plugin_eps.cc index 61bba2983b..24870bc6fc 100644 --- a/simulation/src/gazebo_model_plugin_eps/gazebo_model_plugin_eps.cc +++ b/simulation/src/gazebo_model_plugin_eps/gazebo_model_plugin_eps.cc @@ -90,8 +90,8 @@ class GazeboModelPluginEps : public FreeFlyerModelPlugin { GazeboModelPluginEps() : FreeFlyerModelPlugin("eps_driver", "", true), fsm_(UNKNOWN, std::bind(&GazeboModelPluginEps::StateCallback, this, std::placeholders::_1, std::placeholders::_2)), - rate_(10.0), distance_(0.05), delay_(5.0), lock_(false), - battery_capacity_(3.4), battery_charge_(3.0), + rate_(10.0), distance_near_(0.05), distance_far_(0.05), delay_(5.0), + lock_(false), battery_capacity_(3.4), battery_charge_(3.0), battery_discharge_rate_(0.005) { // In an unknown state, if we are sensed to be near or far from a berth // then update to either a docked or undocked state. @@ -116,7 +116,6 @@ class GazeboModelPluginEps : public FreeFlyerModelPlugin { // Setup a timer to expire after a givent delay timer_delay_.stop(); timer_delay_.start(); - ros::Duration(1.0).sleep(); // Create a virtual link Lock(true); // We are now in the process of docking @@ -196,8 +195,10 @@ class GazeboModelPluginEps : public FreeFlyerModelPlugin { // Get parameters if (sdf->HasElement("rate")) rate_ = sdf->Get("rate"); - if (sdf->HasElement("distance")) - distance_ = sdf->Get("distance"); + if (sdf->HasElement("distance_near")) + distance_near_ = sdf->Get("distance_near"); + if (sdf->HasElement("distance_far")) + distance_far_ = sdf->Get("distance_far"); if (sdf->HasElement("delay")) delay_ = sdf->Get("delay"); // Setup telemetry publishers @@ -445,24 +446,32 @@ class GazeboModelPluginEps : public FreeFlyerModelPlugin { // There are smarter ways to do this sort of search (kNN) but this we are // only expecting fewer than 6 berths, it seems like needless optimization. bool near = false; + bool far = true; for (nearest_ = berths_.begin(); nearest_ != berths_.end(); nearest_++) { #if GAZEBO_MAJOR_VERSION > 7 - if (GetModel()->WorldPose().Pos().Distance( - nearest_->second.Pos()) > distance_) continue; + double distance = GetModel()->WorldPose().Pos().Distance( + nearest_->second.Pos()); #else - if (GetModel()->GetWorldPose().Ign().Pos().Distance( - nearest_->second.Pos()) > distance_) continue; + double distance = GetModel()->GetWorldPose().Ign().Pos().Distance( + nearest_->second.Pos()); #endif - // Now, send an event to the FSM to signal that we are close! - fsm_.Update(SENSE_NEAR); + // There should always only be one dock that we are close to - near = true; - break; + if (distance < distance_near_) { + near = true; + break; + } + if (distance < distance_far_) + far = false; } - + // Send an NEAR event if we are close to a berth + if (near) { + fsm_.Update(SENSE_NEAR); // Send an FAR event if we aren't close to any berth - if (!near) + } else if (far) { fsm_.Update(SENSE_FAR); + } + // Whatever the result, publish the dock state to be used by other entites // in the system, and in particular the dock procedure. ff_hw_msgs::EpsDockStateStamped msg; @@ -668,7 +677,7 @@ class GazeboModelPluginEps : public FreeFlyerModelPlugin { private: ff_util::FSM fsm_; - double rate_, distance_, delay_; + double rate_, distance_near_, distance_far_ , delay_; bool lock_; double battery_capacity_, battery_charge_, battery_discharge_rate_; event::ConnectionPtr update_; diff --git a/tools/ekf_bag/scripts/bag_and_parameter_sweep.py b/tools/ekf_bag/scripts/bag_and_parameter_sweep.py index 444b0915cf..d06b7f8ee5 100755 --- a/tools/ekf_bag/scripts/bag_and_parameter_sweep.py +++ b/tools/ekf_bag/scripts/bag_and_parameter_sweep.py @@ -23,46 +23,69 @@ import parameter_sweep import utilities -def bag_and_parameter_sweep(bag_files, output_dir, map_file, image_topic, gnc_config): - for bag_file in bag_files: - # Save parameter sweep output in different directory for each bagfile, name directory using bagfile - bag_name_prefix = os.path.splitext(os.path.basename(bag_file))[0] - bag_output_dir = os.path.join(output_dir, bag_name_prefix) - parameter_sweep.make_values_and_parameter_sweep(bag_output_dir, bag_file, map_file, image_topic, gnc_config) +def bag_and_parameter_sweep(bag_files, output_dir, map_file, image_topic, gnc_config): + for bag_file in bag_files: + # Save parameter sweep output in different directory for each bagfile, name directory using bagfile + bag_name_prefix = os.path.splitext(os.path.basename(bag_file))[0] + bag_output_dir = os.path.join(output_dir, bag_name_prefix) + parameter_sweep.make_values_and_parameter_sweep( + bag_output_dir, bag_file, map_file, image_topic, gnc_config + ) -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('map_file', help='Full path to map file.') - parser.add_argument('gnc_config', help='Full path to gnc config file.') - parser.add_argument('-d', '--bag_directory', default=None, help='Full path to directory containing bag files.') - parser.add_argument( - '-o', - '--output_directory', - default=None, - help= - 'Full path to output directory where files will be saved. If not specified, timestamped directory will be created in current path.' - ) - parser.add_argument('-r', '--robot_name', metavar='ROBOT', help='Specify the robot to use (just name, not path).') - parser.add_argument('-i', '--image_topic', dest='image_topic', default=None, help='Use specified image topic.') - parser.add_argument('--save_stats', action='store_true', help='Save stats to csv file.') - parser.add_argument('--make_plots', type=bool, default=True, help='Make pdf of plots of results.') +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("map_file", help="Full path to map file.") + parser.add_argument("gnc_config", help="Full path to gnc config file.") + parser.add_argument( + "-d", + "--bag_directory", + default=None, + help="Full path to directory containing bag files.", + ) + parser.add_argument( + "-o", + "--output_directory", + default=None, + help="Full path to output directory where files will be saved. If not specified, timestamped directory will be created in current path.", + ) - args, args_unkonown = parser.parse_known_args() + parser.add_argument( + "-r", + "--robot_name", + metavar="ROBOT", + help="Specify the robot to use (just name, not path).", + ) + parser.add_argument( + "-i", + "--image_topic", + dest="image_topic", + default=None, + help="Use specified image topic.", + ) + parser.add_argument( + "--save_stats", action="store_true", help="Save stats to csv file." + ) + parser.add_argument( + "--make_plots", type=bool, default=True, help="Make pdf of plots of results." + ) - bag_directory = args.bag_directory - if bag_directory == None: - bag_directory = os.getcwd() + args, args_unkonown = parser.parse_known_args() - if not os.path.exists(bag_directory): - print("Bag directory {} does not exist.".format(bag_directory)) - exit() - bag_files = utilities.get_files(bag_directory, '*.bag') - print("Found {} bag files in {}.".format(len(bag_files), bag_directory)) + bag_directory = args.bag_directory + if bag_directory == None: + bag_directory = os.getcwd() - output_dir = utilities.create_directory(args.output_directory) - print('Output directory for results is {}'.format(output_dir)) + if not os.path.exists(bag_directory): + print(("Bag directory {} does not exist.".format(bag_directory))) + exit() + bag_files = utilities.get_files(bag_directory, "*.bag") + print(("Found {} bag files in {}.".format(len(bag_files), bag_directory))) - bag_and_parameter_sweep(bag_files, output_dir, args.map_file, args.image_topic, args.gnc_config) + output_dir = utilities.create_directory(args.output_directory) + print(("Output directory for results is {}".format(output_dir))) + bag_and_parameter_sweep( + bag_files, output_dir, args.map_file, args.image_topic, args.gnc_config + ) diff --git a/tools/ekf_bag/scripts/bag_sweep.py b/tools/ekf_bag/scripts/bag_sweep.py index 77b3066f44..a4b1728dde 100755 --- a/tools/ekf_bag/scripts/bag_sweep.py +++ b/tools/ekf_bag/scripts/bag_sweep.py @@ -21,9 +21,9 @@ import itertools import multiprocessing import os -import pandas as pd import ekf_graph +import pandas as pd import utilities @@ -33,88 +33,118 @@ # library call @utilities.full_traceback def test_on_bag(bag_file, output_dir, args): - print("Running EKF test on {}".format(bag_file)) - name_prefix = os.path.splitext(os.path.basename(bag_file))[0] - ekf_output_file = os.path.join(output_dir, name_prefix + '_ekf.txt') - pdf_output_file = os.path.join(output_dir, name_prefix + '_plots.pdf') - results_csv_output_file = os.path.join(output_dir, name_prefix + '_results.csv') - options = ekf_graph.RunEKFOptions(bag_file, args.map_file, ekf_output_file, pdf_output_file, results_csv_output_file) - options.set_bag_sweep_params(args) - options.features_in_bag = True - ekf_graph.run_ekf_and_save_stats(options) + print(("Running EKF test on {}".format(bag_file))) + name_prefix = os.path.splitext(os.path.basename(bag_file))[0] + ekf_output_file = os.path.join(output_dir, name_prefix + "_ekf.txt") + pdf_output_file = os.path.join(output_dir, name_prefix + "_plots.pdf") + results_csv_output_file = os.path.join(output_dir, name_prefix + "_results.csv") + options = ekf_graph.RunEKFOptions( + bag_file, + args.map_file, + ekf_output_file, + pdf_output_file, + results_csv_output_file, + ) + options.set_bag_sweep_params(args) + options.features_in_bag = True + ekf_graph.run_ekf_and_save_stats(options) # Helper that unpacks arguments and calls original function # Aides running jobs in parallel as pool only supports # passing a single argument to workers def test_on_bag_helper(zipped_vals): - return test_on_bag(*zipped_vals) + return test_on_bag(*zipped_vals) -#TODO: replace args with individual params +# TODO: replace args with individual params def bag_sweep(bag_files, output_dir, args): - num_processes = 6 - pool = multiprocessing.Pool(num_processes) - # izip arguments so we can pass as one argument to pool worker - pool.map(test_on_bag_helper, itertools.izip(bag_files, itertools.repeat(output_dir), itertools.repeat(args))) + num_processes = 6 + pool = multiprocessing.Pool(num_processes) + # izip arguments so we can pass as one argument to pool worker + pool.map( + test_on_bag_helper, + list(zip(bag_files, itertools.repeat(output_dir), itertools.repeat(args))), + ) def combined_results(csv_files): - dataframes = [pd.read_csv(file) for file in csv_files] - if not dataframes: - print('Failed to create dataframes') - exit() - names = dataframes[0].columns - combined_dataframes = pd.DataFrame(None, None, names) - for dataframe in dataframes: - trimmed_dataframe = pd.DataFrame(dataframe.values[0:1], columns=names) - combined_dataframes = combined_dataframes.append(trimmed_dataframe, ignore_index=True) - return combined_dataframes + dataframes = [pd.read_csv(file) for file in csv_files] + if not dataframes: + print("Failed to create dataframes") + exit() + names = dataframes[0].columns + combined_dataframes = pd.DataFrame(None, None, names) + for dataframe in dataframes: + trimmed_dataframe = pd.DataFrame(dataframe.values[0:1], columns=names) + combined_dataframes = combined_dataframes.append( + trimmed_dataframe, ignore_index=True + ) + return combined_dataframes def combine_results_in_csv_file(bag_files, output_dir): - # Don't save this as *stats.csv otherwise it will be including when averaging bag results in average_results.py - combined_results_csv_file = os.path.join(output_dir, 'bag_sweep_stats_combined.csv') - output_csv_files = [] - for bag_file in bag_files: - bag_name = os.path.splitext(os.path.basename(bag_file))[0] - output_csv_files.append(os.path.join(output_dir, bag_name + '_results.csv')) - combined_dataframe = combined_results(output_csv_files) - combined_dataframe.to_csv(combined_results_csv_file, index=False) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('map_file', help='Full path to map file.') - parser.add_argument('gnc_config', help='Full path to gnc config file.') - parser.add_argument('-d', '--bag_directory', default=None, help='Full path to directory containing bag files.') - parser.add_argument( - '-o', - '--output_directory', - default=None, - help= - 'Full path to output directory where files will be saved. If not specified, timestamped directory will be created in current path.' - ) - - parser.add_argument('-r', '--robot_name', metavar='ROBOT', help='Specify the robot to use (just name, not path).') - parser.add_argument('-i', '--image_topic', dest='image_topic', default=None, help='Use specified image topic.') - parser.add_argument('--save_stats', action='store_true', help='Save stats to csv file.') - parser.add_argument('--make_plots', type=bool, default=True, help='Make pdf of plots of results.') - - args, args_unkonown = parser.parse_known_args() - - bag_directory = args.bag_directory - if bag_directory == None: - bag_directory = os.getcwd() - - if not os.path.exists(bag_directory): - print("Bag directory {} does not exist.".format(bag_directory)) - exit() - bag_files = utilities.get_files(bag_directory, '*.bag') - print("Found {} bag files in {}.".format(len(bag_files), bag_directory)) - - output_dir = utilities.create_directory(args.output_directory) - print('Output directory for results is {}'.format(output_dir)) - - bag_sweep(bag_files, output_dir, args) - combine_results_in_csv_file(bag_files, output_dir) + # Don't save this as *stats.csv otherwise it will be including when averaging bag results in average_results.py + combined_results_csv_file = os.path.join(output_dir, "bag_sweep_stats_combined.csv") + output_csv_files = [] + for bag_file in bag_files: + bag_name = os.path.splitext(os.path.basename(bag_file))[0] + output_csv_files.append(os.path.join(output_dir, bag_name + "_results.csv")) + combined_dataframe = combined_results(output_csv_files) + combined_dataframe.to_csv(combined_results_csv_file, index=False) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("map_file", help="Full path to map file.") + parser.add_argument("gnc_config", help="Full path to gnc config file.") + parser.add_argument( + "-d", + "--bag_directory", + default=None, + help="Full path to directory containing bag files.", + ) + parser.add_argument( + "-o", + "--output_directory", + default=None, + help="Full path to output directory where files will be saved. If not specified, timestamped directory will be created in current path.", + ) + + parser.add_argument( + "-r", + "--robot_name", + metavar="ROBOT", + help="Specify the robot to use (just name, not path).", + ) + parser.add_argument( + "-i", + "--image_topic", + dest="image_topic", + default=None, + help="Use specified image topic.", + ) + parser.add_argument( + "--save_stats", action="store_true", help="Save stats to csv file." + ) + parser.add_argument( + "--make_plots", type=bool, default=True, help="Make pdf of plots of results." + ) + + args, args_unkonown = parser.parse_known_args() + + bag_directory = args.bag_directory + if bag_directory == None: + bag_directory = os.getcwd() + + if not os.path.exists(bag_directory): + print(("Bag directory {} does not exist.".format(bag_directory))) + exit() + bag_files = utilities.get_files(bag_directory, "*.bag") + print(("Found {} bag files in {}.".format(len(bag_files), bag_directory))) + + output_dir = utilities.create_directory(args.output_directory) + print(("Output directory for results is {}".format(output_dir))) + + bag_sweep(bag_files, output_dir, args) + combine_results_in_csv_file(bag_files, output_dir) diff --git a/tools/ekf_bag/scripts/config_creator.py b/tools/ekf_bag/scripts/config_creator.py index dc4ec9f994..546402ddbe 100644 --- a/tools/ekf_bag/scripts/config_creator.py +++ b/tools/ekf_bag/scripts/config_creator.py @@ -19,32 +19,32 @@ def check_and_fill_line(value_map, config_file_line): - line_strings = config_file_line.split() - # Overwrite val if config variable is in value map - if len(line_strings) > 0 and line_strings[0] in value_map: - return line_strings[0] + ' = ' + str(value_map[line_strings[0]]) + '\n' - return config_file_line + line_strings = config_file_line.split() + # Overwrite val if config variable is in value map + if len(line_strings) > 0 and line_strings[0] in value_map: + return line_strings[0] + " = " + str(value_map[line_strings[0]]) + "\n" + return config_file_line def fill_in_values(original_gnc_config, value_map, new_gnc_config): - original_config_file = open(original_gnc_config, 'r') - new_config_file = open(new_gnc_config, 'w') - for config_file_line in original_config_file: - new_config_file.write(check_and_fill_line(value_map, config_file_line)) + original_config_file = open(original_gnc_config, "r") + new_config_file = open(new_gnc_config, "w") + for config_file_line in original_config_file: + new_config_file.write(check_and_fill_line(value_map, config_file_line)) def make_value_map(values, value_names): - value_map = {} - if len(values) != len(value_names): - print("values and value_names not same length!") - exit() + value_map = {} + if len(values) != len(value_names): + print("values and value_names not same length!") + exit() - for index, value_name in enumerate(value_names): - value_map[value_name] = values[index] + for index, value_name in enumerate(value_names): + value_map[value_name] = values[index] - return value_map + return value_map def make_config(values, value_names, original_gnc_config, new_gnc_config): - value_map = make_value_map(values, value_names) - fill_in_values(original_gnc_config, value_map, new_gnc_config) + value_map = make_value_map(values, value_names) + fill_in_values(original_gnc_config, value_map, new_gnc_config) diff --git a/tools/ekf_bag/scripts/create_averaged_plots.py b/tools/ekf_bag/scripts/create_averaged_plots.py index f8634a7d24..444e640122 100755 --- a/tools/ekf_bag/scripts/create_averaged_plots.py +++ b/tools/ekf_bag/scripts/create_averaged_plots.py @@ -22,37 +22,45 @@ import plot_creator import utilities + def get_subdirectory_files(file_string): - subdirectory_csv_files = [] - _, subdirectories, _ = os.walk(os.getcwd()).next() - for subdirectory in subdirectories: - subdirectory_path = os.path.join(os.getcwd(), subdirectory) - for subdirectory_csv_file in utilities.get_files(subdirectory_path, file_string): - subdirectory_csv_files.append(subdirectory_csv_file) - return subdirectory_csv_files + subdirectory_csv_files = [] + _, subdirectories, _ = next(os.walk(os.getcwd())) + for subdirectory in subdirectories: + subdirectory_path = os.path.join(os.getcwd(), subdirectory) + for subdirectory_csv_file in utilities.get_files( + subdirectory_path, file_string + ): + subdirectory_csv_files.append(subdirectory_csv_file) + return subdirectory_csv_files + def remove_repeat_job_id_rows(dataframe): - return dataframe.drop_duplicates('job_id') + return dataframe.drop_duplicates("job_id") + def average_same_jobs(dataframe): - return dataframe.groupby(['job_id']).mean() + return dataframe.groupby(["job_id"]).mean() + # Averages results from all *results.csv files in subdirectories in a directory. Only looks for files at a depth of one (one subdirectory, doesn't search recursively) -if __name__ == '__main__': - dataframes = [] - - results_csv_files = get_subdirectory_files('*results.csv') - results_dataframe = plot_creator.load_dataframe(results_csv_files) - averaged_results_dataframe = average_same_jobs(results_dataframe) - averaged_results_dataframe.reset_index(inplace=True) - dataframes.append(averaged_results_dataframe) - - values_csv_files = get_subdirectory_files('*values.csv') - values_dataframe = plot_creator.load_dataframe(values_csv_files) - values_dataframe.columns.name = 'values' - values_dataframe = remove_repeat_job_id_rows(values_dataframe) - dataframes.append(values_dataframe) - a = values_dataframe['job_id'] - - pdf_filename = 'averaged_result_plots.pdf' - plot_creator.create_pdf(dataframes, pdf_filename, True, os.path.basename(os.getcwd())) +if __name__ == "__main__": + dataframes = [] + + results_csv_files = get_subdirectory_files("*results.csv") + results_dataframe = plot_creator.load_dataframe(results_csv_files) + averaged_results_dataframe = average_same_jobs(results_dataframe) + averaged_results_dataframe.reset_index(inplace=True) + dataframes.append(averaged_results_dataframe) + + values_csv_files = get_subdirectory_files("*values.csv") + values_dataframe = plot_creator.load_dataframe(values_csv_files) + values_dataframe.columns.name = "values" + values_dataframe = remove_repeat_job_id_rows(values_dataframe) + dataframes.append(values_dataframe) + a = values_dataframe["job_id"] + + pdf_filename = "averaged_result_plots.pdf" + plot_creator.create_pdf( + dataframes, pdf_filename, True, os.path.basename(os.getcwd()) + ) diff --git a/tools/ekf_bag/scripts/create_plots.py b/tools/ekf_bag/scripts/create_plots.py index f33349a0c6..87fa20b837 100755 --- a/tools/ekf_bag/scripts/create_plots.py +++ b/tools/ekf_bag/scripts/create_plots.py @@ -23,37 +23,51 @@ import plot_creator import utilities -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--directory', default=None, help='Directory where results csv files are stored') - parser.add_argument('--write_values', '-v', action='store_true', help='Write parameter sweep values to a plot in pdf') - parser.add_argument('--write_values_table', '-t', action='store_true', help='Write parameter sweep values to a table in pdf') - - args = parser.parse_args() - if args.write_values_table and not args.write_values: - print("If write_values_table enabled, write_values must be as well.") - exit() - - directory = args.directory - if directory == None: - directory = os.getcwd() - - dataframes = [] - - results_filestring = '*results.csv' - results_files = utilities.get_files(directory, results_filestring) - if len(results_files) == 0: - print("No results csv files found in directory " + directory) - exit() - - dataframes.append(plot_creator.load_dataframe(results_files)) - - if args.write_values: - values_filestring = '*values.csv' - values_files = utilities.get_files(directory, values_filestring) - values_dataframe = plot_creator.load_dataframe(values_files) - values_dataframe.columns.name = 'values' - dataframes.append(values_dataframe) - - pdf_filename = 'result_plots.pdf' - plot_creator.create_pdf(dataframes, pdf_filename, args.write_values_table, os.path.basename(directory)) +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--directory", default=None, help="Directory where results csv files are stored" + ) + parser.add_argument( + "--write_values", + "-v", + action="store_true", + help="Write parameter sweep values to a plot in pdf", + ) + parser.add_argument( + "--write_values_table", + "-t", + action="store_true", + help="Write parameter sweep values to a table in pdf", + ) + + args = parser.parse_args() + if args.write_values_table and not args.write_values: + print("If write_values_table enabled, write_values must be as well.") + exit() + + directory = args.directory + if directory == None: + directory = os.getcwd() + + dataframes = [] + + results_filestring = "*results.csv" + results_files = utilities.get_files(directory, results_filestring) + if len(results_files) == 0: + print(("No results csv files found in directory " + directory)) + exit() + + dataframes.append(plot_creator.load_dataframe(results_files)) + + if args.write_values: + values_filestring = "*values.csv" + values_files = utilities.get_files(directory, values_filestring) + values_dataframe = plot_creator.load_dataframe(values_files) + values_dataframe.columns.name = "values" + dataframes.append(values_dataframe) + + pdf_filename = "result_plots.pdf" + plot_creator.create_pdf( + dataframes, pdf_filename, args.write_values_table, os.path.basename(directory) + ) diff --git a/tools/ekf_bag/scripts/ekf_diff b/tools/ekf_bag/scripts/ekf_diff index 16e1efd6cd..e9e2ab89f0 100755 --- a/tools/ekf_bag/scripts/ekf_diff +++ b/tools/ekf_bag/scripts/ekf_diff @@ -17,17 +17,20 @@ # License for the specific language governing permissions and limitations # under the License. -from scipy.spatial.transform import Slerp -from scipy.spatial.transform import Rotation as R import matplotlib +from scipy.spatial.transform import Rotation as R +from scipy.spatial.transform import Slerp + matplotlib.use('pdf') -import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages import argparse +import copy +import math import os import os.path -import math -import copy + +import matplotlib.pyplot as plt +from matplotlib.backends.backend_pdf import PdfPages + def quat_to_euler(q): q2q2 = q[1] * q[1] diff --git a/tools/ekf_bag/scripts/ekf_graph.py b/tools/ekf_bag/scripts/ekf_graph.py index 1f8312362d..612e33e55e 100755 --- a/tools/ekf_bag/scripts/ekf_graph.py +++ b/tools/ekf_bag/scripts/ekf_graph.py @@ -17,767 +17,1186 @@ # License for the specific language governing permissions and limitations # under the License. -import environment - import csv import math import os import sys import time +import environment import matplotlib -matplotlib.use('pdf') -import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages +matplotlib.use("pdf") +import matplotlib.pyplot as plt import numpy as np -from numpy.linalg import norm import scipy.spatial.transform +from matplotlib.backends.backend_pdf import PdfPages +from numpy.linalg import norm -def run_ekf(astrobee_map, - astrobee_bag, - output_file, - ekf_in_bag=False, - features_in_bag=False, - image_topic=None, - gnc_config='gnc.config'): - cmd = 'bag_to_csv' if ekf_in_bag else 'bag_to_csv --run_ekf %s' % ('' if features_in_bag else '--gen_features') - - robot_bias = os.path.dirname(os.path.abspath(astrobee_bag)) + '/imu_bias.config' - cmd = 'rosrun ekf_bag %s %s %s %s %s' % (cmd, astrobee_map, astrobee_bag, output_file, gnc_config) - print('Not running EKF.' if ekf_in_bag else 'Running EKF.') - if not ekf_in_bag: - print('Using features from bag.' if features_in_bag else 'Generating features from image.') - if os.path.isfile(robot_bias): - cmd += ' ' + robot_bias - if image_topic is not None: - cmd += ' --image_topic ' + image_topic - os.system(cmd) +def run_ekf( + astrobee_map, + astrobee_bag, + output_file, + ekf_in_bag=False, + features_in_bag=False, + image_topic=None, + gnc_config="gnc.config", +): + cmd = ( + "bag_to_csv" + if ekf_in_bag + else "bag_to_csv --run_ekf %s" % ("" if features_in_bag else "--gen_features") + ) + + robot_bias = os.path.dirname(os.path.abspath(astrobee_bag)) + "/imu_bias.config" + cmd = "rosrun ekf_bag %s %s %s %s %s" % ( + cmd, + astrobee_map, + astrobee_bag, + output_file, + gnc_config, + ) + print(("Not running EKF." if ekf_in_bag else "Running EKF.")) + if not ekf_in_bag: + print( + ( + "Using features from bag." + if features_in_bag + else "Generating features from image." + ) + ) + if os.path.isfile(robot_bias): + cmd += " " + robot_bias + if image_topic is not None: + cmd += " --image_topic " + image_topic + os.system(cmd) def quat_to_euler(q): - q2q2 = q[1] * q[1] - x = math.atan2(2 * (q[0] * q[3] + q[1] * q[2]), 1 - 2 * (q[0] * q[0] + q2q2)) - arg = max(-1.0, min(1.0, 2 * (q[1] * q[3] - q[0] * q[2]))) - y = math.asin(arg) - z = math.atan2(2 * (q[0] * q[1] + q[2] * q[3]), 1 - 2 * (q2q2 + q[2] * q[2])) - return (x, y, z) + q2q2 = q[1] * q[1] + x = math.atan2(2 * (q[0] * q[3] + q[1] * q[2]), 1 - 2 * (q[0] * q[0] + q2q2)) + arg = max(-1.0, min(1.0, 2 * (q[1] * q[3] - q[0] * q[2]))) + y = math.asin(arg) + z = math.atan2(2 * (q[0] * q[1] + q[2] * q[3]), 1 - 2 * (q2q2 + q[2] * q[2])) + return (x, y, z) # RMSE of matrix interpreted as n (dim(rows) vectors of dim(col) def rmse_matrix(error_matrix): - return np.sqrt(np.mean(np.sum(np.square(error_matrix), axis=1))) + return np.sqrt(np.mean(np.sum(np.square(error_matrix), axis=1))) # Removes the ith entry of a_xs, a_ys, and a_zs if a_times[i] is # not in b_times[i]. Assumes timestamp vectors are sorted def prune_missing_timestamps(a_xs, a_ys, a_zs, a_times, b_times): - a_index = 0 - missing_indices = [] - pruned_a_matrix = np.empty(shape=(len(b_times), 3)) - for b_index, b_time in enumerate(b_times): - while not np.isclose(a_times[a_index], b_time, rtol=0) and a_times[a_index] < b_time and a_index < len(a_times): - a_index += 1 - if not np.isclose(a_times[a_index], b_time, rtol=0, atol=0.02): - print('Failed to find a time close to b time.') - missing_indices.append(b_index) - pruned_a_matrix[b_index] = np.array([a_xs[a_index], a_ys[a_index], a_zs[a_index]]) - return pruned_a_matrix, missing_indices + a_index = 0 + missing_indices = [] + pruned_a_matrix = np.empty(shape=(len(b_times), 3)) + for b_index, b_time in enumerate(b_times): + while ( + not np.isclose(a_times[a_index], b_time, rtol=0) + and a_times[a_index] < b_time + and a_index < len(a_times) + ): + a_index += 1 + if not np.isclose(a_times[a_index], b_time, rtol=0, atol=0.02): + print("Failed to find a time close to b time.") + missing_indices.append(b_index) + pruned_a_matrix[b_index] = np.array( + [a_xs[a_index], a_ys[a_index], a_zs[a_index]] + ) + return pruned_a_matrix, missing_indices + def orientation_rmse(a_xs, a_ys, a_zs, b_xs, b_ys, b_zs, a_times, b_times): - a_index = 0 - mean_squared_orientation_error = 0 - for b_index, b_time in enumerate(b_times): - while not np.isclose(a_times[a_index], b_time, rtol=0) and a_times[a_index] < b_time and a_index < len(a_times): - a_index += 1 - if not np.isclose(a_times[a_index], b_time, rtol=0, atol=0.02): - print('Failed to find a time close to b time.') - continue - a_rot = scipy.spatial.transform.Rotation.from_euler('ZYX', [a_xs[a_index], a_ys[a_index], a_zs[a_index]], degrees=False) - b_rot = scipy.spatial.transform.Rotation.from_euler('ZYX', [b_xs[b_index], b_ys[b_index], b_zs[b_index]], degrees=False) - rot_diff = a_rot.inv()*b_rot - rot_squared_error = np.inner(rot_diff.as_rotvec(), rot_diff.as_rotvec()) - mean_squared_orientation_error += (rot_squared_error - mean_squared_orientation_error) / (b_index + 1) - return math.sqrt(mean_squared_orientation_error) + a_index = 0 + mean_squared_orientation_error = 0 + for b_index, b_time in enumerate(b_times): + while ( + not np.isclose(a_times[a_index], b_time, rtol=0) + and a_times[a_index] < b_time + and a_index < len(a_times) + ): + a_index += 1 + if not np.isclose(a_times[a_index], b_time, rtol=0, atol=0.02): + print("Failed to find a time close to b time.") + continue + a_rot = scipy.spatial.transform.Rotation.from_euler( + "ZYX", [a_xs[a_index], a_ys[a_index], a_zs[a_index]], degrees=False + ) + b_rot = scipy.spatial.transform.Rotation.from_euler( + "ZYX", [b_xs[b_index], b_ys[b_index], b_zs[b_index]], degrees=False + ) + rot_diff = a_rot.inv() * b_rot + rot_squared_error = np.inner(rot_diff.as_rotvec(), rot_diff.as_rotvec()) + mean_squared_orientation_error += ( + rot_squared_error - mean_squared_orientation_error + ) / (b_index + 1) + return math.sqrt(mean_squared_orientation_error) # RMSE between two sequences of timestamped positions. Prunes timestamped positions in sequence a # not present in sequence b. def rmse_timestamped_sequences(a_xs, a_ys, a_zs, a_times, b_xs, b_ys, b_zs, b_times): - a_positions, missing_indices = prune_missing_timestamps(a_xs, a_ys, a_zs, a_times, b_times) - b_positions = np.column_stack((b_xs, b_ys, b_zs)) - for i in missing_indices: - a_positions = np.delete(a_positions, i, 0) - b_positions = np.delete(b_positions, i, 0) - return rmse_matrix(a_positions - b_positions) + a_positions, missing_indices = prune_missing_timestamps( + a_xs, a_ys, a_zs, a_times, b_times + ) + b_positions = np.column_stack((b_xs, b_ys, b_zs)) + for i in missing_indices: + a_positions = np.delete(a_positions, i, 0) + b_positions = np.delete(b_positions, i, 0) + return rmse_matrix(a_positions - b_positions) -#TODO: This does some sort of l2 on stddevs, is this what we want? +# TODO: This does some sort of l2 on stddevs, is this what we want? def covariance_map(ekf, cov_1_index, cov_2_index, cov_3_index): - return map(lambda (x, y, z): math.sqrt(x + y + z), zip(ekf[cov_1_index], ekf[cov_2_index], ekf[cov_3_index])) + return [ + math.sqrt(x_y_z[0] + x_y_z[1] + x_y_z[2]) + for x_y_z in zip(ekf[cov_1_index], ekf[cov_2_index], ekf[cov_3_index]) + ] + def integrated_velocities(start_x, vs, ts): - delta_times = [j - i for i, j in zip(ts[:-1], ts[1:])] - # Make sure times are same length as velocities, ignore last velocity - delta_times.append(0) - x_increments = [velocity * delta_t for velocity, delta_t in zip(vs, delta_times)] - cumulative_x_increments = np.cumsum(x_increments) - xs = [start_x + cumulative_x_increment for cumulative_x_increment in cumulative_x_increments] - return xs + delta_times = [j - i for i, j in zip(ts[:-1], ts[1:])] + # Make sure times are same length as velocities, ignore last velocity + delta_times.append(0) + x_increments = [velocity * delta_t for velocity, delta_t in zip(vs, delta_times)] + cumulative_x_increments = np.cumsum(x_increments) + xs = [ + start_x + cumulative_x_increment + for cumulative_x_increment in cumulative_x_increments + ] + return xs class EkfLog(object): - - def __init__(self, filename, start_time=float('-inf'), end_time=float('inf')): - self.ekf = {'t': [], 'x': [], 'y': [], 'z': [], 'angle1': [], 'angle2': [], 'angle3': [], \ - 'vx': [], 'vy': [], 'vz': [], 'ox': [], 'oy': [], 'oz': [], 'ax': [], 'ay': [], 'az': [], \ - 'abx': [], 'aby': [], 'abz': [], 'gbx': [], 'gby': [], 'gbz': [], 'c': [], 's': [], \ - 'ml_count': [], 'of_count': [], 'mahal' : []} - for i in range(1, 16): - self.ekf['cov_' + str(i)] = [] - - self.mahal = {'times': [], 'boxes': []} - self.CAMERA_RES = (1280, 960) - self.HEATMAP_SIZE = (self.CAMERA_RES[0] / 32, self.CAMERA_RES[1] / 32) - self.vl_heatmap = np.zeros((self.HEATMAP_SIZE[0] + 1, self.HEATMAP_SIZE[1] + 1)) - self.of_heatmap = np.zeros((self.HEATMAP_SIZE[0] + 1, self.HEATMAP_SIZE[1] + 1)) - - self.vl = {'t': [], 'count': [], 'x': [], 'y': [], 'z': [], 'angle1': [], 'angle2': [], 'angle3': []} - self.of = {'t': [], 'count': [], 'oldest': [], 'youngest': [], 'median': []} - self.gt = {'t': [], 'x': [], 'y': [], 'z': [], 'angle1': [], 'angle2': [], 'angle3': []} - - f = open(filename, 'r') - for l in f: - p = l.split(' ') - if l.startswith('EKF '): - t = float(p[1]) - if t < start_time or t > end_time: - continue - self.ekf['t'].append(float(p[1])) - self.ekf['x'].append(float(p[2])) - self.ekf['y'].append(float(p[3])) - self.ekf['z'].append(float(p[4])) - self.ekf['angle1'].append(float(p[5]) * 180 / math.pi) - self.ekf['angle2'].append(float(p[6]) * 180 / math.pi) - self.ekf['angle3'].append(float(p[7]) * 180 / math.pi) - self.ekf['vx'].append(float(p[8])) - self.ekf['vy'].append(float(p[9])) - self.ekf['vz'].append(float(p[10])) - self.ekf['ox'].append(float(p[11]) * 180 / math.pi) - self.ekf['oy'].append(float(p[12]) * 180 / math.pi) - self.ekf['oz'].append(float(p[13]) * 180 / math.pi) - self.ekf['ax'].append(float(p[14])) - self.ekf['ay'].append(float(p[15])) - self.ekf['az'].append(float(p[16])) - self.ekf['abx'].append(float(p[17])) - self.ekf['aby'].append(float(p[18])) - self.ekf['abz'].append(float(p[19])) - self.ekf['gbx'].append(float(p[20]) * 180 / math.pi) - self.ekf['gby'].append(float(p[21]) * 180 / math.pi) - self.ekf['gbz'].append(float(p[22]) * 180 / math.pi) - self.ekf['c'].append(int(p[23])) - self.ekf['s'].append(int(p[24])) - self.ekf['ml_count'].append(int(p[25])) - if self.ekf['ml_count'][-1] == 0: - self.ekf['ml_count'][-1] = float('nan') - self.ekf['of_count'].append(int(p[26])) - if self.ekf['of_count'][-1] == 0: - self.ekf['of_count'][-1] = float('nan') + def __init__(self, filename, start_time=float("-inf"), end_time=float("inf")): + self.ekf = { + "t": [], + "x": [], + "y": [], + "z": [], + "angle1": [], + "angle2": [], + "angle3": [], + "vx": [], + "vy": [], + "vz": [], + "ox": [], + "oy": [], + "oz": [], + "ax": [], + "ay": [], + "az": [], + "abx": [], + "aby": [], + "abz": [], + "gbx": [], + "gby": [], + "gbz": [], + "c": [], + "s": [], + "ml_count": [], + "of_count": [], + "mahal": [], + } for i in range(1, 16): - self.ekf['cov_' + str(i)].append(math.sqrt(abs(float(p[26 + i])))) - # convert quaternion to euler angles - # TODO: shouldn't be using this for covariances - q = (0.5 * self.ekf['cov_1'][-1], 0.5 * self.ekf['cov_2'][-1], 0.5 * self.ekf['cov_3'][-1], 1.0) - euler = quat_to_euler(q) - self.ekf['cov_1'][-1] = euler[0] - self.ekf['cov_2'][-1] = euler[1] - self.ekf['cov_3'][-1] = euler[2] - m = [] - MAHAL_MAX = 30.0 - for i in range(50): - t = float(p[42 + i]) - if not math.isnan(t) and t > 0.0: - m.append(min(t, MAHAL_MAX)) - if len(m) > 0: - self.mahal['times'].append(float(p[1])) - self.mahal['boxes'].append(m) - self.ekf['mahal'].append(m) - elif l.startswith('OF ') and (len(p) - 3) / 4 >= 1: - t = float(p[1]) - if t > 0.0: - self.of['t'].append(t) - self.of['count'].append(int(p[2])) - times = [] - for i in range((len(p) - 3) / 4): - of_id = float(p[3 + 4 * i + 0]) - origt = float(p[3 + 4 * i + 1]) - u = float(p[3 + 4 * i + 2]) - v = float(p[3 + 4 * i + 3]) - times.append(self.of['t'][-1] - origt) - self.of_heatmap[int(round(u / self.CAMERA_RES[0] * self.HEATMAP_SIZE[0])) + self.HEATMAP_SIZE[0] / 2, - int(round(v / self.CAMERA_RES[1] * self.HEATMAP_SIZE[1])) + self.HEATMAP_SIZE[1] / 2] += 1 - self.of['oldest'].append(np.max(times)) - self.of['youngest'].append(np.min(times)) - self.of['median'].append(np.median(times)) - elif l.startswith('VL ') and (len(p) - 9) / 5 >= 1: - if float(p[1]) > -1.0: - self.vl['t'].append(float(p[1])) - self.vl['count'].append(int(p[2])) - self.vl['x'].append(float(p[3])) - self.vl['y'].append(float(p[4])) - self.vl['z'].append(float(p[5])) - self.vl['angle1'].append(float(p[6]) * 180 / math.pi) - self.vl['angle2'].append(float(p[7]) * 180 / math.pi) - self.vl['angle3'].append(float(p[8]) * 180 / math.pi) - for i in range((len(p) - 9) / 5): - u = float(p[9 + 5 * i + 0]) - v = float(p[9 + 5 * i + 1]) - x = float(p[9 + 5 * i + 2]) - y = float(p[9 + 5 * i + 3]) - z = float(p[9 + 5 * i + 4]) - self.vl_heatmap[int(round(u / self.CAMERA_RES[0] * self.HEATMAP_SIZE[0])) + self.HEATMAP_SIZE[0] / 2, - int(round(v / self.CAMERA_RES[1] * self.HEATMAP_SIZE[1])) + self.HEATMAP_SIZE[1] / 2] += 1 - elif l.startswith('GT '): - self.gt['t'].append(float(p[1])) - self.gt['x'].append(float(p[2])) - self.gt['y'].append(float(p[3])) - self.gt['z'].append(float(p[4])) - self.gt['angle1'].append(float(p[5]) * 180 / math.pi) - self.gt['angle2'].append(float(p[6]) * 180 / math.pi) - self.gt['angle3'].append(float(p[7]) * 180 / math.pi) - for key in self.ekf.keys(): - self.ekf[key] = np.array(self.ekf[key]) - #self.vl_heatmap = self.vl_heatmap / np.amax(self.vl_heatmap) - #self.of_heatmap = self.of_heatmap / np.amax(self.of_heatmap) - - def correct_ground_truth(self): - # find the best time offset for ground truth - best_error = float('inf') - best_offset = 0 - for i in range(125): - offset = (i - 62.5) / 62.5 - (pos_err, angle_err) = self.evaluate_error(offset) - err = 100 * pos_err + angle_err # 1 cm = 1 degree error - if err < best_error: - best_error = err - best_offset = offset - - # now actually do the shift - for i in range(len(self.gt['t'])): - self.gt['t'][i] += best_offset - return best_offset - - def evaluate_error(self, time_offset=0.0): - pos_err = 0.0 - angle_err = 0.0 - err_count = 0 - ekf_i = 0 - for gt_i in range(len(self.gt['t'])): - t = self.gt['t'][gt_i] + time_offset - while ekf_i < len(self.ekf['t']) - 1 and self.ekf['t'][ekf_i] < t: - ekf_i += 1 - if ekf_i >= len(self.ekf['t']) - 1: - break - if ekf_i == 0: - continue - # now gt_i falls between ekf_i - 1 and ekf_i, we will interpolate for position - u = (t - self.ekf['t'][ekf_i - 1]) / (self.ekf['t'][ekf_i] - self.ekf['t'][ekf_i - 1]) - x1 = np.array([self.ekf['x'][ekf_i - 1], self.ekf['y'][ekf_i - 1], self.ekf['z'][ekf_i - 1]]) - x2 = np.array([self.ekf['x'][ekf_i], self.ekf['y'][ekf_i], self.ekf['z'][ekf_i]]) - x = x1 + u * (x2 - x1) - # but just use the older angle, not worth trouble to interpolate in python - a = np.array([self.ekf['angle1'][ekf_i - 1], self.ekf['angle2'][ekf_i - 1], self.ekf['angle3'][ekf_i - 1]]) - - err_count += 1 - pos_err += float(norm(np.array([self.gt['x'][gt_i], self.gt['y'][gt_i], self.gt['z'][gt_i]]) - x))**2 - # again, not best metric, but good enough for this - angle_err += float( - norm(np.array([self.gt['angle1'][gt_i], self.gt['angle2'][gt_i], self.gt['angle3'][gt_i]]) - a))**2 - if err_count == 0: - return (0.0, 0.0) - return (math.sqrt(pos_err / err_count), math.sqrt(angle_err / err_count)) - - def evaluate_features(self): - total_time = self.ekf['t'][len(self.ekf['t']) - 1] - self.ekf['t'][0] - total_count = sum(self.vl['count']) - max_gap = 0 - for i in range(1, len(self.vl['t'])): - max_gap = max(max_gap, self.vl['t'][i] - self.vl['t'][i - 1]) - return (total_count / total_time, max_gap) - - - def rmse_using_single_image_sparse_mapping(self): - rmse_pos = rmse_timestamped_sequences(self.ekf['x'], self.ekf['y'], self.ekf['z'], self.ekf['t'], self.vl['x'], - self.vl['y'], self.vl['z'], self.vl['t']) - int_x = integrated_velocities(self.ekf['x'][0], self.ekf['vx'], self.ekf['t']) - int_y = integrated_velocities(self.ekf['y'][0], self.ekf['vy'], self.ekf['t']) - int_z = integrated_velocities(self.ekf['z'][0], self.ekf['vz'], self.ekf['t']) - rmse_int_v = rmse_timestamped_sequences(int_x, int_y, int_z, self.ekf['t'], self.vl['x'], self.vl['y'], - self.vl['z'], self.vl['t']) - rmse_angle = orientation_rmse(self.ekf['angle1'], self.ekf['angle2'], self.ekf['angle3'], - self.vl['angle1'], self.vl['angle2'], self.vl['angle3'], self.ekf['t'], self.vl['t']) - return rmse_pos, rmse_angle, rmse_int_v - - - def get_stats(self, job_id): - stats = [] - stat_names = [] - - # job id - stats.append(job_id) - stat_names.append('job_id') - - # OF stats - of_count_no_nans = [0 if math.isnan(val) else val for val in self.ekf['of_count']] - avg_num_of_features = np.mean(of_count_no_nans) - stats.append(avg_num_of_features) - stat_names.append('avg_num_of_features') - - num_of_features_added = np.sum(of_count_no_nans) - num_of_features_observed = np.sum(self.of['count']) - ratio_of_features_added = num_of_features_added / num_of_features_observed if num_of_features_observed > 0 else 0 - stats.append(ratio_of_features_added) - stat_names.append('ratio_of_features_added') - - # ML stats - ml_count_no_nans = [0 if math.isnan(val) else val for val in self.ekf['ml_count']] - avg_num_ml_features = np.mean(ml_count_no_nans) - stats.append(avg_num_ml_features) - stat_names.append('avg_num_ml_features') - - num_ml_features_added = np.sum(ml_count_no_nans) - num_ml_features_observed = np.sum(self.vl['count']) - ratio_ml_features_added = num_ml_features_added / num_ml_features_observed if num_ml_features_observed > 0 else 0 - stats.append(ratio_ml_features_added) - stat_names.append('ratio_ml_features_added') - - # Mahal - avg_mahal_distance = np.mean(np.concatenate(self.mahal['boxes'])) if len(self.mahal['boxes']) > 0 else 0 - - stats.append(avg_mahal_distance) - stat_names.append('avg_mahal_distance') - - # Covariances - position_covariances = covariance_map(self.ekf, 'cov_13', 'cov_14', 'cov_15') - avg_position_covariance = np.mean(position_covariances) - stats.append(avg_position_covariance) - stat_names.append('avg_position_covariance') - - orientation_covariances = covariance_map(self.ekf, 'cov_1', 'cov_2', 'cov_3') - avg_orientation_covariance = np.mean(orientation_covariances) - stats.append(avg_orientation_covariance) - stat_names.append('avg_orientation_covariance') - - #Compare with groundtruth (if available) - # rmse_pos_vs_groundtruth, rmse_angle_vs_groundtruth = self.evaluate_error() - # stats.append(rmse_pos_vs_groundtruth) - # stat_names.append('rmse_pos_vs_groundtruth') - # stats.append(rmse_angle_vs_groundtruth) - # stat_names.append('rmse_orientation_vs_groundtruth') - - # Compare with poses from single-image sparse mapping registations - rmse_pos_vs_single_image_sparse_mapping, rmse_angle_vs_single_image_sparse_mapping, rmse_int = self.rmse_using_single_image_sparse_mapping( - ) - stats.append(rmse_int) - stat_names.append('integrated_rmse') - stats.append(rmse_pos_vs_single_image_sparse_mapping) - stat_names.append('rmse_pos_vs_single_image_sparse_mapping') - stats.append(rmse_angle_vs_single_image_sparse_mapping) - stat_names.append('orientation_rmse') - return stats, stat_names - - def write_results_to_csv(self, job_id, results_csv_output_file, bagfile): - with open(results_csv_output_file, 'w') as results_file: - writer = csv.writer(results_file) - stats, stat_names = self.get_stats(job_id) - bag_name = os.path.splitext(os.path.basename(bagfile))[0] - # TODO(rsoussan): better way to do this - stats.append(stats[-2]) - stat_names.append('rmse') - stats.append(bag_name) - stat_names.append('Bag') - writer.writerow(stat_names) - writer.writerow(stats) - - def plot(self, filename, output_text): - colors = ['r', 'b', 'g'] - ekf = self.ekf - gt = self.gt - vl = self.vl - of = self.of - - with PdfPages(filename) as pdf: - # positions - plt.figure() - plt.plot(ekf['t'], ekf['x'], colors[0], linewidth=0.5, label='EKF Pos. (X)') - plt.plot(ekf['t'], ekf['y'], colors[1], linewidth=0.5, label='EKF Pos. (Y)') - plt.plot(ekf['t'], ekf['z'], colors[2], linewidth=0.5, label='EKF Pos. (Z)') - plt.fill_between(ekf['t'], - ekf['x'] - ekf['cov_13'], - ekf['x'] + ekf['cov_13'], - facecolor=colors[0], - alpha=0.5) - plt.fill_between(ekf['t'], - ekf['y'] - ekf['cov_14'], - ekf['y'] + ekf['cov_14'], - facecolor=colors[1], - alpha=0.5) - plt.fill_between(ekf['t'], - ekf['z'] - ekf['cov_15'], - ekf['z'] + ekf['cov_15'], - facecolor=colors[2], - alpha=0.5) - plt.autoscale(False) - plt.plot(gt['t'], gt['x'], color=colors[0], linewidth=0.5, dashes=(1, 1), label='Ground Truth (X)') - plt.plot(gt['t'], gt['y'], color=colors[1], linewidth=0.5, dashes=(1, 1)) - plt.plot(gt['t'], gt['z'], color=colors[2], linewidth=0.5, dashes=(1, 1)) - plt.plot(vl['t'], - vl['x'], - color=colors[0], - linestyle='None', - marker='o', - markersize=2, - label='Observation (X)') - plt.plot(vl['t'], vl['y'], color=colors[1], linestyle='None', marker='o', markersize=2) - plt.plot(vl['t'], vl['z'], color=colors[2], linestyle='None', marker='o', markersize=2) - plt.xlabel('Time (s)') - plt.ylabel('Position (m)') - plt.title('Position') - plt.legend(prop={'size': 6}) - pdf.savefig() - - # angles - plt.figure() - plt.plot(ekf['t'], ekf['angle1'], colors[0], linewidth=0.5, label='EKF') - plt.plot(ekf['t'], ekf['angle2'], colors[1], linewidth=0.5) - plt.plot(ekf['t'], ekf['angle3'], colors[2], linewidth=0.5) - plt.fill_between(ekf['t'], - ekf['angle1'] - ekf['cov_1'], - ekf['angle1'] + ekf['cov_1'], - facecolor=colors[0], - alpha=0.5) - plt.fill_between(ekf['t'], - ekf['angle2'] - ekf['cov_2'], - ekf['angle2'] + ekf['cov_2'], - facecolor=colors[1], - alpha=0.5) - plt.fill_between(ekf['t'], - ekf['angle3'] - ekf['cov_3'], - ekf['angle3'] + ekf['cov_3'], - facecolor=colors[2], - alpha=0.5) - plt.autoscale(False) - plt.plot(gt['t'], gt['angle1'], color=colors[0], linewidth=0.25, dashes=(1, 1), label='Grount Truth') - plt.plot(gt['t'], gt['angle2'], color=colors[1], linewidth=0.25, dashes=(1, 1)) - plt.plot(gt['t'], gt['angle3'], color=colors[2], linewidth=0.25, dashes=(1, 1)) - plt.plot(vl['t'], vl['angle1'], color=colors[0], linestyle='None', marker='o', markersize=2, label='Observation') - plt.plot(vl['t'], vl['angle2'], color=colors[1], linestyle='None', marker='o', markersize=2) - plt.plot(vl['t'], vl['angle3'], color=colors[2], linestyle='None', marker='o', markersize=2) - plt.xlabel('Time (s)') - plt.ylabel('Angle ($^\circ$)') - plt.title('Orientation') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - - # positions - plt.figure() - max_y = 100 - - int_x = integrated_velocities(ekf['x'][0], ekf['vx'], ekf['t']) - int_y = integrated_velocities(ekf['y'][0], ekf['vy'], ekf['t']) - int_z = integrated_velocities(ekf['z'][0], ekf['vz'], ekf['t']) - - plt.plot(ekf['t'], int_x, colors[0], linewidth=0.5, label='EKF Pos. (X)') - plt.plot(ekf['t'], int_y, colors[1], linewidth=0.5, label='EKF Pos. (Y)') - plt.plot(ekf['t'], int_z, colors[2], linewidth=0.5, label='EKF Pos. (Z)') - plt.plot(vl['t'], - vl['x'], - color=colors[0], - linestyle='None', - marker='o', - markersize=2, - label='Observation (X)') - plt.plot(vl['t'], vl['y'], color=colors[1], linestyle='None', marker='o', markersize=2) - plt.plot(vl['t'], vl['z'], color=colors[2], linestyle='None', marker='o', markersize=2) - plt.xlabel('Time (s)') - plt.ylabel('Integrated Velocity (m)') - plt.title('Integrated Velocity') - plt.legend(prop={'size': 6}) - pdf.savefig() - - - - # feature counts - plt.figure() - plt.plot(ekf['t'], - ekf['ml_count'], - linestyle='None', - marker='x', - markersize=6, - color='#FF0000', - label='Integrated VL Features') - plt.plot(ekf['t'], ekf['of_count'], marker='|', markersize=2, color='#0000FF', label='Integrated OF Features') - plt.plot(vl['t'], - vl['count'], - linestyle='None', - marker='.', - markersize=2, - color='#B300FF', - label='Observed VL Features (at Reg. Time)') - plt.plot(of['t'], - of['count'], - linestyle='None', - marker=',', - markersize=2, - color='#00FFb3', - label='Observed OF Features (at Reg. Time)') - plt.xlabel('Time (s)') - plt.ylabel('Number of Features') - plt.title('EKF Features') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # mahalnobis distance - plt.figure() - if len(self.mahal['boxes']) > 0: - boxes = plt.boxplot(self.mahal['boxes'], - positions=self.mahal['times'], - widths=0.2, - manage_xticks=False, - patch_artist=True) - plt.setp(boxes['whiskers'], color='Black', linewidth=0.25) - plt.setp(boxes['caps'], color='Black', linewidth=0.25) - plt.setp(boxes['medians'], color='Black', linewidth=0.25) - plt.setp(boxes['fliers'], color='r', marker='x', markersize=1) - plt.setp(boxes['boxes'], color='Black', facecolor='SkyBlue', linewidth=0.25) - plt.title('VL Features Mahalnobis Distances') - plt.xlabel('Time (s)') - plt.ylabel('Mahalnobis Distance') - pdf.savefig() - plt.close() - - # linear velocity and acceleration - plt.figure() - plt.plot(ekf['t'], ekf['vx'], color=colors[0], linewidth=0.5, label='Velocity') - plt.plot(ekf['t'], ekf['vy'], color=colors[1], linewidth=0.5) - plt.plot(ekf['t'], ekf['vz'], color=colors[2], linewidth=0.5) - plt.fill_between(ekf['t'], ekf['vx'] - ekf['cov_7'], ekf['vx'] + ekf['cov_7'], facecolor=colors[0], alpha=0.5) - plt.fill_between(ekf['t'], ekf['vy'] - ekf['cov_8'], ekf['vy'] + ekf['cov_8'], facecolor=colors[1], alpha=0.5) - plt.fill_between(ekf['t'], ekf['vz'] - ekf['cov_9'], ekf['vz'] + ekf['cov_9'], facecolor=colors[2], alpha=0.5) - plt.title('Velocity') - plt.xlabel('Time (s)') - plt.ylabel('Velocity (m/s)') - plt.ylim(-0.5, 0.5) - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - plt.figure() - plt.plot(ekf['t'], ekf['ax'], color=colors[0], dashes=(1, 1), linewidth=0.5, label='Acceleration') - plt.plot(ekf['t'], ekf['ay'], color=colors[1], dashes=(1, 1), linewidth=0.5) - plt.plot(ekf['t'], ekf['az'], color=colors[2], dashes=(1, 1), linewidth=0.5) - plt.title('Acceleration') - plt.xlabel('Time (s)') - plt.ylabel('Acceleration (m/s$^2$)') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # angle and angular velocity - plt.figure() - ax = plt.gca() - ax.plot(ekf['t'], ekf['angle1'], colors[0], linewidth=0.5, label='Angle') - ax.plot(ekf['t'], ekf['angle2'], colors[1], linewidth=0.5) - ax.plot(ekf['t'], ekf['angle3'], colors[2], linewidth=0.5) - ax2 = ax.twinx() - ax2.plot(ekf['t'], ekf['ox'], color=colors[0], linewidth=0.5, dashes=(1, 1), label='Angular Velocity') - ax2.plot(ekf['t'], ekf['oy'], color=colors[1], linewidth=0.5, dashes=(1, 1)) - ax2.plot(ekf['t'], ekf['oz'], color=colors[2], linewidth=0.5, dashes=(1, 1)) - ax.set_title('Angular Velocity') - ax.set_xlabel('Time (s)') - ax.set_ylabel('Angle ($^\circ$)') - ax2.set_ylabel('Angular Velocity ($^\circ$/s)') - lines, labels = ax.get_legend_handles_labels() - lines2, labels2 = ax2.get_legend_handles_labels() - ax.legend(lines + lines2, labels + labels2, prop={'size': 6}) - pdf.savefig() - plt.close() - - # bias - plt.figure() - ax = plt.gca() - ax.plot(ekf['t'], ekf['abx'], colors[0], linewidth=0.5, label='Accelerometer Bias') - ax.plot(ekf['t'], ekf['aby'], colors[1], linewidth=0.5) - ax.plot(ekf['t'], ekf['abz'], colors[2], linewidth=0.5) - ax.fill_between(ekf['t'], ekf['abx'] - ekf['cov_10'], ekf['abx'] + ekf['cov_10'], facecolor=colors[0], alpha=0.5) - ax.fill_between(ekf['t'], ekf['aby'] - ekf['cov_11'], ekf['aby'] + ekf['cov_11'], facecolor=colors[1], alpha=0.5) - ax.fill_between(ekf['t'], ekf['abz'] - ekf['cov_12'], ekf['abz'] + ekf['cov_12'], facecolor=colors[2], alpha=0.5) - ax2 = ax.twinx() - ax2.plot(ekf['t'], ekf['gbx'], color=colors[0], linewidth=0.5, dashes=(1, 1), label='Gyrometer Bias') - ax2.plot(ekf['t'], ekf['gby'], color=colors[1], linewidth=0.5, dashes=(1, 1)) - ax2.plot(ekf['t'], ekf['gbz'], color=colors[2], linewidth=0.5, dashes=(1, 1)) - ax2.fill_between(ekf['t'], ekf['gbx'] - ekf['cov_4'], ekf['gbx'] + ekf['cov_4'], facecolor=colors[0], alpha=0.5) - ax2.fill_between(ekf['t'], ekf['gby'] - ekf['cov_5'], ekf['gby'] + ekf['cov_5'], facecolor=colors[1], alpha=0.5) - ax2.fill_between(ekf['t'], ekf['gbz'] - ekf['cov_6'], ekf['gbz'] + ekf['cov_6'], facecolor=colors[2], alpha=0.5) - ax.set_title('Bias Terms') - ax.set_xlabel('Time (s)') - ax.set_ylabel('Accelerometer Bias (m/s$^2$)') - ax2.set_ylabel('Gyrometer Bias ($^\circ$/s)') - lines, labels = ax.get_legend_handles_labels() - lines2, labels2 = ax2.get_legend_handles_labels() - ax.legend(lines + lines2, labels + labels2, prop={'size': 6}) - pdf.savefig() - plt.close() - - # covariance - plt.figure() - plt.plot(ekf['t'], - covariance_map(self.ekf, 'cov_13', 'cov_14', 'cov_15'), - colors[0], - linewidth=0.5, - label='Position Covariance') - plt.plot(ekf['t'], - covariance_map(self.ekf, 'cov_7', 'cov_8', 'cov_9'), - colors[1], - linewidth=0.5, - label='Velocity Covariance') - plt.plot(ekf['t'], - covariance_map(self.ekf, 'cov_1', 'cov_2', 'cov_3'), - colors[2], - linewidth=0.5, - label='Orientation Covariance') - plt.title('Std. Deviation') - plt.xlabel('Time (s)') - plt.ylabel('Covariance') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # mahalnobis distance histogram - plt.figure() - plt.hist([item for sublist in self.mahal['boxes'] for item in sublist], bins=200, range=(0, 50), normed=True) - plt.xlabel('Mahalnobis Distance') - plt.ylabel('pdf') - plt.title('Mahalnobis Distance Histogram') - pdf.savefig() - plt.close() - - plt.figure() - plt.imshow(np.transpose(self.vl_heatmap), - cmap='hot', - interpolation='nearest', - vmin=0, - vmax=np.amax(self.vl_heatmap)) - plt.title('Visual Landmarks Density') - pdf.savefig() - plt.close() - - plt.figure() - plt.plot(of['t'], of['oldest'], color=colors[0], linewidth=0.5, label='Oldest') - plt.plot(of['t'], of['median'], color=colors[1], linewidth=0.5, label='Median') - plt.plot(of['t'], of['youngest'], color=colors[2], linewidth=0.5, label='Youngest') - plt.xlabel('Time (s)') - plt.ylabel('Optical Flow Feature Age (s)') - plt.title('Optical Flow Feature Age') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - plt.figure() - plt.imshow(np.transpose(self.of_heatmap), - cmap='hot', - interpolation='nearest', - vmin=0, - vmax=np.amax(self.vl_heatmap)) - plt.title('Optical Flow Density') - pdf.savefig() - plt.close() - - plt.figure() - plt.axis('off') - plt.text(0.0, 0.5, output_text) - pdf.savefig() - plt.close() + self.ekf["cov_" + str(i)] = [] + + self.mahal = {"times": [], "boxes": []} + self.CAMERA_RES = (1280, 960) + self.HEATMAP_SIZE = (self.CAMERA_RES[0] / 32, self.CAMERA_RES[1] / 32) + self.vl_heatmap = np.zeros((self.HEATMAP_SIZE[0] + 1, self.HEATMAP_SIZE[1] + 1)) + self.of_heatmap = np.zeros((self.HEATMAP_SIZE[0] + 1, self.HEATMAP_SIZE[1] + 1)) + + self.vl = { + "t": [], + "count": [], + "x": [], + "y": [], + "z": [], + "angle1": [], + "angle2": [], + "angle3": [], + } + self.of = {"t": [], "count": [], "oldest": [], "youngest": [], "median": []} + self.gt = { + "t": [], + "x": [], + "y": [], + "z": [], + "angle1": [], + "angle2": [], + "angle3": [], + } + + f = open(filename, "r") + for l in f: + p = l.split(" ") + if l.startswith("EKF "): + t = float(p[1]) + if t < start_time or t > end_time: + continue + self.ekf["t"].append(float(p[1])) + self.ekf["x"].append(float(p[2])) + self.ekf["y"].append(float(p[3])) + self.ekf["z"].append(float(p[4])) + self.ekf["angle1"].append(float(p[5]) * 180 / math.pi) + self.ekf["angle2"].append(float(p[6]) * 180 / math.pi) + self.ekf["angle3"].append(float(p[7]) * 180 / math.pi) + self.ekf["vx"].append(float(p[8])) + self.ekf["vy"].append(float(p[9])) + self.ekf["vz"].append(float(p[10])) + self.ekf["ox"].append(float(p[11]) * 180 / math.pi) + self.ekf["oy"].append(float(p[12]) * 180 / math.pi) + self.ekf["oz"].append(float(p[13]) * 180 / math.pi) + self.ekf["ax"].append(float(p[14])) + self.ekf["ay"].append(float(p[15])) + self.ekf["az"].append(float(p[16])) + self.ekf["abx"].append(float(p[17])) + self.ekf["aby"].append(float(p[18])) + self.ekf["abz"].append(float(p[19])) + self.ekf["gbx"].append(float(p[20]) * 180 / math.pi) + self.ekf["gby"].append(float(p[21]) * 180 / math.pi) + self.ekf["gbz"].append(float(p[22]) * 180 / math.pi) + self.ekf["c"].append(int(p[23])) + self.ekf["s"].append(int(p[24])) + self.ekf["ml_count"].append(int(p[25])) + if self.ekf["ml_count"][-1] == 0: + self.ekf["ml_count"][-1] = float("nan") + self.ekf["of_count"].append(int(p[26])) + if self.ekf["of_count"][-1] == 0: + self.ekf["of_count"][-1] = float("nan") + for i in range(1, 16): + self.ekf["cov_" + str(i)].append(math.sqrt(abs(float(p[26 + i])))) + # convert quaternion to euler angles + # TODO: shouldn't be using this for covariances + q = ( + 0.5 * self.ekf["cov_1"][-1], + 0.5 * self.ekf["cov_2"][-1], + 0.5 * self.ekf["cov_3"][-1], + 1.0, + ) + euler = quat_to_euler(q) + self.ekf["cov_1"][-1] = euler[0] + self.ekf["cov_2"][-1] = euler[1] + self.ekf["cov_3"][-1] = euler[2] + m = [] + MAHAL_MAX = 30.0 + for i in range(50): + t = float(p[42 + i]) + if not math.isnan(t) and t > 0.0: + m.append(min(t, MAHAL_MAX)) + if len(m) > 0: + self.mahal["times"].append(float(p[1])) + self.mahal["boxes"].append(m) + self.ekf["mahal"].append(m) + elif l.startswith("OF ") and (len(p) - 3) / 4 >= 1: + t = float(p[1]) + if t > 0.0: + self.of["t"].append(t) + self.of["count"].append(int(p[2])) + times = [] + for i in range((len(p) - 3) / 4): + of_id = float(p[3 + 4 * i + 0]) + origt = float(p[3 + 4 * i + 1]) + u = float(p[3 + 4 * i + 2]) + v = float(p[3 + 4 * i + 3]) + times.append(self.of["t"][-1] - origt) + self.of_heatmap[ + int(round(u / self.CAMERA_RES[0] * self.HEATMAP_SIZE[0])) + + self.HEATMAP_SIZE[0] / 2, + int(round(v / self.CAMERA_RES[1] * self.HEATMAP_SIZE[1])) + + self.HEATMAP_SIZE[1] / 2, + ] += 1 + self.of["oldest"].append(np.max(times)) + self.of["youngest"].append(np.min(times)) + self.of["median"].append(np.median(times)) + elif l.startswith("VL ") and (len(p) - 9) / 5 >= 1: + if float(p[1]) > -1.0: + self.vl["t"].append(float(p[1])) + self.vl["count"].append(int(p[2])) + self.vl["x"].append(float(p[3])) + self.vl["y"].append(float(p[4])) + self.vl["z"].append(float(p[5])) + self.vl["angle1"].append(float(p[6]) * 180 / math.pi) + self.vl["angle2"].append(float(p[7]) * 180 / math.pi) + self.vl["angle3"].append(float(p[8]) * 180 / math.pi) + for i in range((len(p) - 9) / 5): + u = float(p[9 + 5 * i + 0]) + v = float(p[9 + 5 * i + 1]) + x = float(p[9 + 5 * i + 2]) + y = float(p[9 + 5 * i + 3]) + z = float(p[9 + 5 * i + 4]) + self.vl_heatmap[ + int(round(u / self.CAMERA_RES[0] * self.HEATMAP_SIZE[0])) + + self.HEATMAP_SIZE[0] / 2, + int(round(v / self.CAMERA_RES[1] * self.HEATMAP_SIZE[1])) + + self.HEATMAP_SIZE[1] / 2, + ] += 1 + elif l.startswith("GT "): + self.gt["t"].append(float(p[1])) + self.gt["x"].append(float(p[2])) + self.gt["y"].append(float(p[3])) + self.gt["z"].append(float(p[4])) + self.gt["angle1"].append(float(p[5]) * 180 / math.pi) + self.gt["angle2"].append(float(p[6]) * 180 / math.pi) + self.gt["angle3"].append(float(p[7]) * 180 / math.pi) + for key in list(self.ekf.keys()): + self.ekf[key] = np.array(self.ekf[key]) + # self.vl_heatmap = self.vl_heatmap / np.amax(self.vl_heatmap) + # self.of_heatmap = self.of_heatmap / np.amax(self.of_heatmap) + + def correct_ground_truth(self): + # find the best time offset for ground truth + best_error = float("inf") + best_offset = 0 + for i in range(125): + offset = (i - 62.5) / 62.5 + (pos_err, angle_err) = self.evaluate_error(offset) + err = 100 * pos_err + angle_err # 1 cm = 1 degree error + if err < best_error: + best_error = err + best_offset = offset + + # now actually do the shift + for i in range(len(self.gt["t"])): + self.gt["t"][i] += best_offset + return best_offset + + def evaluate_error(self, time_offset=0.0): + pos_err = 0.0 + angle_err = 0.0 + err_count = 0 + ekf_i = 0 + for gt_i in range(len(self.gt["t"])): + t = self.gt["t"][gt_i] + time_offset + while ekf_i < len(self.ekf["t"]) - 1 and self.ekf["t"][ekf_i] < t: + ekf_i += 1 + if ekf_i >= len(self.ekf["t"]) - 1: + break + if ekf_i == 0: + continue + # now gt_i falls between ekf_i - 1 and ekf_i, we will interpolate for position + u = (t - self.ekf["t"][ekf_i - 1]) / ( + self.ekf["t"][ekf_i] - self.ekf["t"][ekf_i - 1] + ) + x1 = np.array( + [ + self.ekf["x"][ekf_i - 1], + self.ekf["y"][ekf_i - 1], + self.ekf["z"][ekf_i - 1], + ] + ) + x2 = np.array( + [self.ekf["x"][ekf_i], self.ekf["y"][ekf_i], self.ekf["z"][ekf_i]] + ) + x = x1 + u * (x2 - x1) + # but just use the older angle, not worth trouble to interpolate in python + a = np.array( + [ + self.ekf["angle1"][ekf_i - 1], + self.ekf["angle2"][ekf_i - 1], + self.ekf["angle3"][ekf_i - 1], + ] + ) + + err_count += 1 + pos_err += ( + float( + norm( + np.array( + [self.gt["x"][gt_i], self.gt["y"][gt_i], self.gt["z"][gt_i]] + ) + - x + ) + ) + ** 2 + ) + # again, not best metric, but good enough for this + angle_err += ( + float( + norm( + np.array( + [ + self.gt["angle1"][gt_i], + self.gt["angle2"][gt_i], + self.gt["angle3"][gt_i], + ] + ) + - a + ) + ) + ** 2 + ) + if err_count == 0: + return (0.0, 0.0) + return (math.sqrt(pos_err / err_count), math.sqrt(angle_err / err_count)) + + def evaluate_features(self): + total_time = self.ekf["t"][len(self.ekf["t"]) - 1] - self.ekf["t"][0] + total_count = sum(self.vl["count"]) + max_gap = 0 + for i in range(1, len(self.vl["t"])): + max_gap = max(max_gap, self.vl["t"][i] - self.vl["t"][i - 1]) + return (total_count / total_time, max_gap) + + def rmse_using_single_image_sparse_mapping(self): + rmse_pos = rmse_timestamped_sequences( + self.ekf["x"], + self.ekf["y"], + self.ekf["z"], + self.ekf["t"], + self.vl["x"], + self.vl["y"], + self.vl["z"], + self.vl["t"], + ) + int_x = integrated_velocities(self.ekf["x"][0], self.ekf["vx"], self.ekf["t"]) + int_y = integrated_velocities(self.ekf["y"][0], self.ekf["vy"], self.ekf["t"]) + int_z = integrated_velocities(self.ekf["z"][0], self.ekf["vz"], self.ekf["t"]) + rmse_int_v = rmse_timestamped_sequences( + int_x, + int_y, + int_z, + self.ekf["t"], + self.vl["x"], + self.vl["y"], + self.vl["z"], + self.vl["t"], + ) + rmse_angle = orientation_rmse( + self.ekf["angle1"], + self.ekf["angle2"], + self.ekf["angle3"], + self.vl["angle1"], + self.vl["angle2"], + self.vl["angle3"], + self.ekf["t"], + self.vl["t"], + ) + return rmse_pos, rmse_angle, rmse_int_v + + def get_stats(self, job_id): + stats = [] + stat_names = [] + + # job id + stats.append(job_id) + stat_names.append("job_id") + + # OF stats + of_count_no_nans = [ + 0 if math.isnan(val) else val for val in self.ekf["of_count"] + ] + avg_num_of_features = np.mean(of_count_no_nans) + stats.append(avg_num_of_features) + stat_names.append("avg_num_of_features") + + num_of_features_added = np.sum(of_count_no_nans) + num_of_features_observed = np.sum(self.of["count"]) + ratio_of_features_added = ( + num_of_features_added / num_of_features_observed + if num_of_features_observed > 0 + else 0 + ) + stats.append(ratio_of_features_added) + stat_names.append("ratio_of_features_added") + + # ML stats + ml_count_no_nans = [ + 0 if math.isnan(val) else val for val in self.ekf["ml_count"] + ] + avg_num_ml_features = np.mean(ml_count_no_nans) + stats.append(avg_num_ml_features) + stat_names.append("avg_num_ml_features") + + num_ml_features_added = np.sum(ml_count_no_nans) + num_ml_features_observed = np.sum(self.vl["count"]) + ratio_ml_features_added = ( + num_ml_features_added / num_ml_features_observed + if num_ml_features_observed > 0 + else 0 + ) + stats.append(ratio_ml_features_added) + stat_names.append("ratio_ml_features_added") + + # Mahal + avg_mahal_distance = ( + np.mean(np.concatenate(self.mahal["boxes"])) + if len(self.mahal["boxes"]) > 0 + else 0 + ) + + stats.append(avg_mahal_distance) + stat_names.append("avg_mahal_distance") + + # Covariances + position_covariances = covariance_map(self.ekf, "cov_13", "cov_14", "cov_15") + avg_position_covariance = np.mean(position_covariances) + stats.append(avg_position_covariance) + stat_names.append("avg_position_covariance") + + orientation_covariances = covariance_map(self.ekf, "cov_1", "cov_2", "cov_3") + avg_orientation_covariance = np.mean(orientation_covariances) + stats.append(avg_orientation_covariance) + stat_names.append("avg_orientation_covariance") + + # Compare with groundtruth (if available) + # rmse_pos_vs_groundtruth, rmse_angle_vs_groundtruth = self.evaluate_error() + # stats.append(rmse_pos_vs_groundtruth) + # stat_names.append('rmse_pos_vs_groundtruth') + # stats.append(rmse_angle_vs_groundtruth) + # stat_names.append('rmse_orientation_vs_groundtruth') + + # Compare with poses from single-image sparse mapping registations + ( + rmse_pos_vs_single_image_sparse_mapping, + rmse_angle_vs_single_image_sparse_mapping, + rmse_int, + ) = self.rmse_using_single_image_sparse_mapping() + stats.append(rmse_int) + stat_names.append("integrated_rmse") + stats.append(rmse_pos_vs_single_image_sparse_mapping) + stat_names.append("rmse_pos_vs_single_image_sparse_mapping") + stats.append(rmse_angle_vs_single_image_sparse_mapping) + stat_names.append("orientation_rmse") + return stats, stat_names + + def write_results_to_csv(self, job_id, results_csv_output_file, bagfile): + with open(results_csv_output_file, "w") as results_file: + writer = csv.writer(results_file) + stats, stat_names = self.get_stats(job_id) + bag_name = os.path.splitext(os.path.basename(bagfile))[0] + # TODO(rsoussan): better way to do this + stats.append(stats[-2]) + stat_names.append("rmse") + stats.append(bag_name) + stat_names.append("Bag") + writer.writerow(stat_names) + writer.writerow(stats) + + def plot(self, filename, output_text): + colors = ["r", "b", "g"] + ekf = self.ekf + gt = self.gt + vl = self.vl + of = self.of + + with PdfPages(filename) as pdf: + # positions + plt.figure() + plt.plot(ekf["t"], ekf["x"], colors[0], linewidth=0.5, label="EKF Pos. (X)") + plt.plot(ekf["t"], ekf["y"], colors[1], linewidth=0.5, label="EKF Pos. (Y)") + plt.plot(ekf["t"], ekf["z"], colors[2], linewidth=0.5, label="EKF Pos. (Z)") + plt.fill_between( + ekf["t"], + ekf["x"] - ekf["cov_13"], + ekf["x"] + ekf["cov_13"], + facecolor=colors[0], + alpha=0.5, + ) + plt.fill_between( + ekf["t"], + ekf["y"] - ekf["cov_14"], + ekf["y"] + ekf["cov_14"], + facecolor=colors[1], + alpha=0.5, + ) + plt.fill_between( + ekf["t"], + ekf["z"] - ekf["cov_15"], + ekf["z"] + ekf["cov_15"], + facecolor=colors[2], + alpha=0.5, + ) + plt.autoscale(False) + plt.plot( + gt["t"], + gt["x"], + color=colors[0], + linewidth=0.5, + dashes=(1, 1), + label="Ground Truth (X)", + ) + plt.plot(gt["t"], gt["y"], color=colors[1], linewidth=0.5, dashes=(1, 1)) + plt.plot(gt["t"], gt["z"], color=colors[2], linewidth=0.5, dashes=(1, 1)) + plt.plot( + vl["t"], + vl["x"], + color=colors[0], + linestyle="None", + marker="o", + markersize=2, + label="Observation (X)", + ) + plt.plot( + vl["t"], + vl["y"], + color=colors[1], + linestyle="None", + marker="o", + markersize=2, + ) + plt.plot( + vl["t"], + vl["z"], + color=colors[2], + linestyle="None", + marker="o", + markersize=2, + ) + plt.xlabel("Time (s)") + plt.ylabel("Position (m)") + plt.title("Position") + plt.legend(prop={"size": 6}) + pdf.savefig() + + # angles + plt.figure() + plt.plot(ekf["t"], ekf["angle1"], colors[0], linewidth=0.5, label="EKF") + plt.plot(ekf["t"], ekf["angle2"], colors[1], linewidth=0.5) + plt.plot(ekf["t"], ekf["angle3"], colors[2], linewidth=0.5) + plt.fill_between( + ekf["t"], + ekf["angle1"] - ekf["cov_1"], + ekf["angle1"] + ekf["cov_1"], + facecolor=colors[0], + alpha=0.5, + ) + plt.fill_between( + ekf["t"], + ekf["angle2"] - ekf["cov_2"], + ekf["angle2"] + ekf["cov_2"], + facecolor=colors[1], + alpha=0.5, + ) + plt.fill_between( + ekf["t"], + ekf["angle3"] - ekf["cov_3"], + ekf["angle3"] + ekf["cov_3"], + facecolor=colors[2], + alpha=0.5, + ) + plt.autoscale(False) + plt.plot( + gt["t"], + gt["angle1"], + color=colors[0], + linewidth=0.25, + dashes=(1, 1), + label="Grount Truth", + ) + plt.plot( + gt["t"], gt["angle2"], color=colors[1], linewidth=0.25, dashes=(1, 1) + ) + plt.plot( + gt["t"], gt["angle3"], color=colors[2], linewidth=0.25, dashes=(1, 1) + ) + plt.plot( + vl["t"], + vl["angle1"], + color=colors[0], + linestyle="None", + marker="o", + markersize=2, + label="Observation", + ) + plt.plot( + vl["t"], + vl["angle2"], + color=colors[1], + linestyle="None", + marker="o", + markersize=2, + ) + plt.plot( + vl["t"], + vl["angle3"], + color=colors[2], + linestyle="None", + marker="o", + markersize=2, + ) + plt.xlabel("Time (s)") + plt.ylabel("Angle ($^\circ$)") + plt.title("Orientation") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # positions + plt.figure() + max_y = 100 + + int_x = integrated_velocities(ekf["x"][0], ekf["vx"], ekf["t"]) + int_y = integrated_velocities(ekf["y"][0], ekf["vy"], ekf["t"]) + int_z = integrated_velocities(ekf["z"][0], ekf["vz"], ekf["t"]) + + plt.plot(ekf["t"], int_x, colors[0], linewidth=0.5, label="EKF Pos. (X)") + plt.plot(ekf["t"], int_y, colors[1], linewidth=0.5, label="EKF Pos. (Y)") + plt.plot(ekf["t"], int_z, colors[2], linewidth=0.5, label="EKF Pos. (Z)") + plt.plot( + vl["t"], + vl["x"], + color=colors[0], + linestyle="None", + marker="o", + markersize=2, + label="Observation (X)", + ) + plt.plot( + vl["t"], + vl["y"], + color=colors[1], + linestyle="None", + marker="o", + markersize=2, + ) + plt.plot( + vl["t"], + vl["z"], + color=colors[2], + linestyle="None", + marker="o", + markersize=2, + ) + plt.xlabel("Time (s)") + plt.ylabel("Integrated Velocity (m)") + plt.title("Integrated Velocity") + plt.legend(prop={"size": 6}) + pdf.savefig() + + # feature counts + plt.figure() + plt.plot( + ekf["t"], + ekf["ml_count"], + linestyle="None", + marker="x", + markersize=6, + color="#FF0000", + label="Integrated VL Features", + ) + plt.plot( + ekf["t"], + ekf["of_count"], + marker="|", + markersize=2, + color="#0000FF", + label="Integrated OF Features", + ) + plt.plot( + vl["t"], + vl["count"], + linestyle="None", + marker=".", + markersize=2, + color="#B300FF", + label="Observed VL Features (at Reg. Time)", + ) + plt.plot( + of["t"], + of["count"], + linestyle="None", + marker=",", + markersize=2, + color="#00FFb3", + label="Observed OF Features (at Reg. Time)", + ) + plt.xlabel("Time (s)") + plt.ylabel("Number of Features") + plt.title("EKF Features") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # mahalnobis distance + plt.figure() + if len(self.mahal["boxes"]) > 0: + boxes = plt.boxplot( + self.mahal["boxes"], + positions=self.mahal["times"], + widths=0.2, + manage_xticks=False, + patch_artist=True, + ) + plt.setp(boxes["whiskers"], color="Black", linewidth=0.25) + plt.setp(boxes["caps"], color="Black", linewidth=0.25) + plt.setp(boxes["medians"], color="Black", linewidth=0.25) + plt.setp(boxes["fliers"], color="r", marker="x", markersize=1) + plt.setp( + boxes["boxes"], color="Black", facecolor="SkyBlue", linewidth=0.25 + ) + plt.title("VL Features Mahalnobis Distances") + plt.xlabel("Time (s)") + plt.ylabel("Mahalnobis Distance") + pdf.savefig() + plt.close() + + # linear velocity and acceleration + plt.figure() + plt.plot( + ekf["t"], ekf["vx"], color=colors[0], linewidth=0.5, label="Velocity" + ) + plt.plot(ekf["t"], ekf["vy"], color=colors[1], linewidth=0.5) + plt.plot(ekf["t"], ekf["vz"], color=colors[2], linewidth=0.5) + plt.fill_between( + ekf["t"], + ekf["vx"] - ekf["cov_7"], + ekf["vx"] + ekf["cov_7"], + facecolor=colors[0], + alpha=0.5, + ) + plt.fill_between( + ekf["t"], + ekf["vy"] - ekf["cov_8"], + ekf["vy"] + ekf["cov_8"], + facecolor=colors[1], + alpha=0.5, + ) + plt.fill_between( + ekf["t"], + ekf["vz"] - ekf["cov_9"], + ekf["vz"] + ekf["cov_9"], + facecolor=colors[2], + alpha=0.5, + ) + plt.title("Velocity") + plt.xlabel("Time (s)") + plt.ylabel("Velocity (m/s)") + plt.ylim(-0.5, 0.5) + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + plt.figure() + plt.plot( + ekf["t"], + ekf["ax"], + color=colors[0], + dashes=(1, 1), + linewidth=0.5, + label="Acceleration", + ) + plt.plot(ekf["t"], ekf["ay"], color=colors[1], dashes=(1, 1), linewidth=0.5) + plt.plot(ekf["t"], ekf["az"], color=colors[2], dashes=(1, 1), linewidth=0.5) + plt.title("Acceleration") + plt.xlabel("Time (s)") + plt.ylabel("Acceleration (m/s$^2$)") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # angle and angular velocity + plt.figure() + ax = plt.gca() + ax.plot(ekf["t"], ekf["angle1"], colors[0], linewidth=0.5, label="Angle") + ax.plot(ekf["t"], ekf["angle2"], colors[1], linewidth=0.5) + ax.plot(ekf["t"], ekf["angle3"], colors[2], linewidth=0.5) + ax2 = ax.twinx() + ax2.plot( + ekf["t"], + ekf["ox"], + color=colors[0], + linewidth=0.5, + dashes=(1, 1), + label="Angular Velocity", + ) + ax2.plot(ekf["t"], ekf["oy"], color=colors[1], linewidth=0.5, dashes=(1, 1)) + ax2.plot(ekf["t"], ekf["oz"], color=colors[2], linewidth=0.5, dashes=(1, 1)) + ax.set_title("Angular Velocity") + ax.set_xlabel("Time (s)") + ax.set_ylabel("Angle ($^\circ$)") + ax2.set_ylabel("Angular Velocity ($^\circ$/s)") + lines, labels = ax.get_legend_handles_labels() + lines2, labels2 = ax2.get_legend_handles_labels() + ax.legend(lines + lines2, labels + labels2, prop={"size": 6}) + pdf.savefig() + plt.close() + + # bias + plt.figure() + ax = plt.gca() + ax.plot( + ekf["t"], + ekf["abx"], + colors[0], + linewidth=0.5, + label="Accelerometer Bias", + ) + ax.plot(ekf["t"], ekf["aby"], colors[1], linewidth=0.5) + ax.plot(ekf["t"], ekf["abz"], colors[2], linewidth=0.5) + ax.fill_between( + ekf["t"], + ekf["abx"] - ekf["cov_10"], + ekf["abx"] + ekf["cov_10"], + facecolor=colors[0], + alpha=0.5, + ) + ax.fill_between( + ekf["t"], + ekf["aby"] - ekf["cov_11"], + ekf["aby"] + ekf["cov_11"], + facecolor=colors[1], + alpha=0.5, + ) + ax.fill_between( + ekf["t"], + ekf["abz"] - ekf["cov_12"], + ekf["abz"] + ekf["cov_12"], + facecolor=colors[2], + alpha=0.5, + ) + ax2 = ax.twinx() + ax2.plot( + ekf["t"], + ekf["gbx"], + color=colors[0], + linewidth=0.5, + dashes=(1, 1), + label="Gyrometer Bias", + ) + ax2.plot( + ekf["t"], ekf["gby"], color=colors[1], linewidth=0.5, dashes=(1, 1) + ) + ax2.plot( + ekf["t"], ekf["gbz"], color=colors[2], linewidth=0.5, dashes=(1, 1) + ) + ax2.fill_between( + ekf["t"], + ekf["gbx"] - ekf["cov_4"], + ekf["gbx"] + ekf["cov_4"], + facecolor=colors[0], + alpha=0.5, + ) + ax2.fill_between( + ekf["t"], + ekf["gby"] - ekf["cov_5"], + ekf["gby"] + ekf["cov_5"], + facecolor=colors[1], + alpha=0.5, + ) + ax2.fill_between( + ekf["t"], + ekf["gbz"] - ekf["cov_6"], + ekf["gbz"] + ekf["cov_6"], + facecolor=colors[2], + alpha=0.5, + ) + ax.set_title("Bias Terms") + ax.set_xlabel("Time (s)") + ax.set_ylabel("Accelerometer Bias (m/s$^2$)") + ax2.set_ylabel("Gyrometer Bias ($^\circ$/s)") + lines, labels = ax.get_legend_handles_labels() + lines2, labels2 = ax2.get_legend_handles_labels() + ax.legend(lines + lines2, labels + labels2, prop={"size": 6}) + pdf.savefig() + plt.close() + + # covariance + plt.figure() + plt.plot( + ekf["t"], + covariance_map(self.ekf, "cov_13", "cov_14", "cov_15"), + colors[0], + linewidth=0.5, + label="Position Covariance", + ) + plt.plot( + ekf["t"], + covariance_map(self.ekf, "cov_7", "cov_8", "cov_9"), + colors[1], + linewidth=0.5, + label="Velocity Covariance", + ) + plt.plot( + ekf["t"], + covariance_map(self.ekf, "cov_1", "cov_2", "cov_3"), + colors[2], + linewidth=0.5, + label="Orientation Covariance", + ) + plt.title("Std. Deviation") + plt.xlabel("Time (s)") + plt.ylabel("Covariance") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # mahalnobis distance histogram + plt.figure() + plt.hist( + [item for sublist in self.mahal["boxes"] for item in sublist], + bins=200, + range=(0, 50), + normed=True, + ) + plt.xlabel("Mahalnobis Distance") + plt.ylabel("pdf") + plt.title("Mahalnobis Distance Histogram") + pdf.savefig() + plt.close() + + plt.figure() + plt.imshow( + np.transpose(self.vl_heatmap), + cmap="hot", + interpolation="nearest", + vmin=0, + vmax=np.amax(self.vl_heatmap), + ) + plt.title("Visual Landmarks Density") + pdf.savefig() + plt.close() + + plt.figure() + plt.plot( + of["t"], of["oldest"], color=colors[0], linewidth=0.5, label="Oldest" + ) + plt.plot( + of["t"], of["median"], color=colors[1], linewidth=0.5, label="Median" + ) + plt.plot( + of["t"], + of["youngest"], + color=colors[2], + linewidth=0.5, + label="Youngest", + ) + plt.xlabel("Time (s)") + plt.ylabel("Optical Flow Feature Age (s)") + plt.title("Optical Flow Feature Age") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + plt.figure() + plt.imshow( + np.transpose(self.of_heatmap), + cmap="hot", + interpolation="nearest", + vmin=0, + vmax=np.amax(self.vl_heatmap), + ) + plt.title("Optical Flow Density") + pdf.savefig() + plt.close() + + plt.figure() + plt.axis("off") + plt.text(0.0, 0.5, output_text) + pdf.savefig() + plt.close() class RunEKFOptions(object): - - def __init__(self, bag_file, map_file, ekf_output_file, pdf_output_file, results_csv_output_file): - self.bag_file = bag_file - self.map_file = map_file - self.ekf_output_file = ekf_output_file - self.pdf_output_file = pdf_output_file - self.results_csv_output_file = results_csv_output_file - self.image_topic = None - self.job_id = 0 - self.gnc_config = 'gnc.config' - self.robot_name = None - self.start_time = float('-inf') - self.end_time = float('inf') - self.ekf_in_bag = False - self.features_in_bag = False - self.cached = False - self.save_stats = False - self.make_plots = True - - def set_bag_sweep_params(self, args): - self.image_topic = args.image_topic - self.robot_name = args.robot_name - self.gnc_config = args.gnc_config - self.save_stats = args.save_stats - self.make_plots = args.make_plots - - def set_command_line_params(self, args): - self.image_topic = args.image_topic - self.robot_name = args.robot_name - self.start_time = args.start_time - self.end_time = args.end_time - self.ekf_in_bag = args.ekf_in_bag - self.features_in_bag = args.features_in_bag - self.cached = args.cached - self.save_stats = args.save_stats - self.make_plots = args.make_plots - - def set_param_sweep_params(self, image_topic, job_id, gnc_config, save_stats=True, make_plots=True): - self.image_topic = image_topic - self.job_id = job_id - self.gnc_config = gnc_config - self.save_stats = save_stats - self.make_plots = make_plots + def __init__( + self, + bag_file, + map_file, + ekf_output_file, + pdf_output_file, + results_csv_output_file, + ): + self.bag_file = bag_file + self.map_file = map_file + self.ekf_output_file = ekf_output_file + self.pdf_output_file = pdf_output_file + self.results_csv_output_file = results_csv_output_file + self.image_topic = None + self.job_id = 0 + self.gnc_config = "gnc.config" + self.robot_name = None + self.start_time = float("-inf") + self.end_time = float("inf") + self.ekf_in_bag = False + self.features_in_bag = False + self.cached = False + self.save_stats = False + self.make_plots = True + + def set_bag_sweep_params(self, args): + self.image_topic = args.image_topic + self.robot_name = args.robot_name + self.gnc_config = args.gnc_config + self.save_stats = args.save_stats + self.make_plots = args.make_plots + + def set_command_line_params(self, args): + self.image_topic = args.image_topic + self.robot_name = args.robot_name + self.start_time = args.start_time + self.end_time = args.end_time + self.ekf_in_bag = args.ekf_in_bag + self.features_in_bag = args.features_in_bag + self.cached = args.cached + self.save_stats = args.save_stats + self.make_plots = args.make_plots + + def set_param_sweep_params( + self, image_topic, job_id, gnc_config, save_stats=True, make_plots=True + ): + self.image_topic = image_topic + self.job_id = job_id + self.gnc_config = gnc_config + self.save_stats = save_stats + self.make_plots = make_plots def run_ekf_and_save_stats(options): - (astrobee_map, astrobee_bag, robot_config) = environment.initialize_environment(options.map_file, options.bag_file, - options.robot_name) - - print("Running EKF for job_id: " + str(options.job_id)) - - start = time.time() - if not options.cached: - run_ekf(astrobee_map, astrobee_bag, options.ekf_output_file, options.ekf_in_bag, options.features_in_bag, - options.image_topic, options.gnc_config) - run_time = time.time() - start - output_text = 'Run time: %g\n' % (run_time) - - print("Finished running EKF for job_id " + str(options.job_id) + " in " + str(run_time) + " seconds") - - log = EkfLog(options.ekf_output_file, options.start_time, options.end_time) - - if options.save_stats: - log.write_results_to_csv(options.job_id, options.results_csv_output_file, astrobee_bag) - print("Printed csv reults for job_id: " + str(options.job_id)) - - if options.make_plots: - offset = log.correct_ground_truth() - (pos_err, angle_err) = log.evaluate_error() - (fps, max_gap) = log.evaluate_features() - output_text += 'Ground Truth Time Shift: %g\n' % (offset) - output_text += 'RMSE Position Error: %g Angular Error: %g\n' % (pos_err, angle_err) - output_text += 'VL Feature Throughput: %g VLs / s Max Gap: %g s' % (fps, max_gap) - print output_text - log.plot(options.pdf_output_file, output_text) - print 'Saved results to %s' % (options.pdf_output_file) + (astrobee_map, astrobee_bag, robot_config) = environment.initialize_environment( + options.map_file, options.bag_file, options.robot_name + ) + + print(("Running EKF for job_id: " + str(options.job_id))) + + start = time.time() + if not options.cached: + run_ekf( + astrobee_map, + astrobee_bag, + options.ekf_output_file, + options.ekf_in_bag, + options.features_in_bag, + options.image_topic, + options.gnc_config, + ) + run_time = time.time() - start + output_text = "Run time: %g\n" % (run_time) + + print( + ( + "Finished running EKF for job_id " + + str(options.job_id) + + " in " + + str(run_time) + + " seconds" + ) + ) + + log = EkfLog(options.ekf_output_file, options.start_time, options.end_time) + + if options.save_stats: + log.write_results_to_csv( + options.job_id, options.results_csv_output_file, astrobee_bag + ) + print(("Printed csv reults for job_id: " + str(options.job_id))) + + if options.make_plots: + offset = log.correct_ground_truth() + (pos_err, angle_err) = log.evaluate_error() + (fps, max_gap) = log.evaluate_features() + output_text += "Ground Truth Time Shift: %g\n" % (offset) + output_text += "RMSE Position Error: %g Angular Error: %g\n" % ( + pos_err, + angle_err, + ) + output_text += "VL Feature Throughput: %g VLs / s Max Gap: %g s" % ( + fps, + max_gap, + ) + print(output_text) + log.plot(options.pdf_output_file, output_text) + print(("Saved results to %s" % (options.pdf_output_file))) diff --git a/tools/ekf_bag/scripts/environment.py b/tools/ekf_bag/scripts/environment.py index 008a81f343..634b2675d5 100755 --- a/tools/ekf_bag/scripts/environment.py +++ b/tools/ekf_bag/scripts/environment.py @@ -2,64 +2,66 @@ # # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import rospkg - -import sys import os import os.path +import sys + +import rospkg + # sets all needed environment variables. returns map and bag specified # by environment variables, if any def initialize_environment(astrobee_map, astrobee_bag, astrobee_robot): - if astrobee_map == None and 'ASTROBEE_MAP' in os.environ: - astrobee_map = os.environ['ASTROBEE_MAP'] - if astrobee_map == None: - print >> sys.stderr, 'ASTROBEE_MAP not set.' - sys.exit(0) - if astrobee_bag == None and 'ASTROBEE_BAG' in os.environ: - astrobee_bag = os.environ['ASTROBEE_BAG'] - if astrobee_bag == None: - print >> sys.stderr, 'ASTROBEE_BAG not set.' - sys.exit(0) + if astrobee_map == None and "ASTROBEE_MAP" in os.environ: + astrobee_map = os.environ["ASTROBEE_MAP"] + if astrobee_map == None: + print("ASTROBEE_MAP not set.", file=sys.stderr) + sys.exit(0) + if astrobee_bag == None and "ASTROBEE_BAG" in os.environ: + astrobee_bag = os.environ["ASTROBEE_BAG"] + if astrobee_bag == None: + print("ASTROBEE_BAG not set.", file=sys.stderr) + sys.exit(0) + + rospack = rospkg.RosPack() + astrobee_path = "" + try: + astrobee_path = rospack.get_path("astrobee") + except: + print("Failed to find package astrobee.", file=sys.stderr) + sys.exit(0) - rospack = rospkg.RosPack() - astrobee_path = '' - try: - astrobee_path = rospack.get_path('astrobee') - except: - print >> sys.stderr, 'Failed to find package astrobee.' - sys.exit(0) + os.environ["ASTROBEE_RESOURCE_DIR"] = astrobee_path + "/resources" + os.environ["ASTROBEE_CONFIG_DIR"] = astrobee_path + "/config" - os.environ['ASTROBEE_RESOURCE_DIR'] = astrobee_path + '/resources' - os.environ['ASTROBEE_CONFIG_DIR'] = astrobee_path + '/config' + if astrobee_robot == None: + robot_config = os.path.dirname(os.path.abspath(astrobee_bag)) + robot_config += "/robot.config" + else: + robot_config = astrobee_path + "/config/robots/" + astrobee_robot + ".config" - if astrobee_robot == None: - robot_config = os.path.dirname(os.path.abspath(astrobee_bag)) - robot_config += '/robot.config' - else: - robot_config = astrobee_path + '/config/robots/' + astrobee_robot + '.config' - - if not os.path.isfile(robot_config): - print >> sys.stderr, 'Please create the robot config file %s.' % (robot_config) - sys.exit(0) + if not os.path.isfile(robot_config): + print( + "Please create the robot config file %s." % (robot_config), file=sys.stderr + ) + sys.exit(0) - os.environ['ASTROBEE_ROBOT'] = robot_config - os.environ['ASTROBEE_WORLD'] = 'granite' - - return (astrobee_map, astrobee_bag, robot_config) + os.environ["ASTROBEE_ROBOT"] = robot_config + os.environ["ASTROBEE_WORLD"] = "granite" + return (astrobee_map, astrobee_bag, robot_config) diff --git a/tools/ekf_bag/scripts/parameter_sweep.py b/tools/ekf_bag/scripts/parameter_sweep.py index f20905205f..75856d0352 100644 --- a/tools/ekf_bag/scripts/parameter_sweep.py +++ b/tools/ekf_bag/scripts/parameter_sweep.py @@ -22,11 +22,11 @@ import itertools import math import multiprocessing -import numpy as np import os import config_creator import ekf_graph +import numpy as np import utilities @@ -35,150 +35,185 @@ # some errors are suppressed due to the multiprocessing # library call @utilities.full_traceback -def test_values(values, job_id, value_names, output_dir, bag_file, map_file, image_topic, gnc_config): - new_gnc_config_filepath = os.path.join(output_dir, str(job_id) + "_gnc.config") - config_creator.make_config(values, value_names, gnc_config, new_gnc_config_filepath) - - ekf_output_file = os.path.join(output_dir, str(job_id) + "_ekf_results.txt") - pdf_output_file = os.path.join(output_dir, str(job_id) + "_plots.pdf") - results_csv_output_file = os.path.join(output_dir, str(job_id) + "_results.csv") - - values_file_name = str(job_id) + '_values.csv' - with open(os.path.join(output_dir, values_file_name), 'w') as values_file: - id_and_values = (job_id,) + values - id_name_and_value_names = ['job_id'] + value_names - writer = csv.writer(values_file) - writer.writerow(id_name_and_value_names) - writer.writerow(id_and_values) - - options = ekf_graph.RunEKFOptions(bag_file, map_file, ekf_output_file, pdf_output_file, results_csv_output_file) - options.set_param_sweep_params(image_topic, job_id, new_gnc_config_filepath) - ekf_graph.run_ekf_and_save_stats(options) +def test_values( + values, job_id, value_names, output_dir, bag_file, map_file, image_topic, gnc_config +): + new_gnc_config_filepath = os.path.join(output_dir, str(job_id) + "_gnc.config") + config_creator.make_config(values, value_names, gnc_config, new_gnc_config_filepath) + + ekf_output_file = os.path.join(output_dir, str(job_id) + "_ekf_results.txt") + pdf_output_file = os.path.join(output_dir, str(job_id) + "_plots.pdf") + results_csv_output_file = os.path.join(output_dir, str(job_id) + "_results.csv") + + values_file_name = str(job_id) + "_values.csv" + with open(os.path.join(output_dir, values_file_name), "w") as values_file: + id_and_values = (job_id,) + values + id_name_and_value_names = ["job_id"] + value_names + writer = csv.writer(values_file) + writer.writerow(id_name_and_value_names) + writer.writerow(id_and_values) + + options = ekf_graph.RunEKFOptions( + bag_file, map_file, ekf_output_file, pdf_output_file, results_csv_output_file + ) + options.set_param_sweep_params(image_topic, job_id, new_gnc_config_filepath) + ekf_graph.run_ekf_and_save_stats(options) # Helper that unpacks arguments and calls original function # Aides running jobs in parallel as pool only supports # passing a single argument to workers def test_values_helper(zipped_vals): - return test_values(*zipped_vals) - - -def parameter_sweep(all_value_combos, value_names, output_dir, bag_file, map_file, image_topic, gnc_config): - job_ids = list(range(len(all_value_combos))) - num_processes = 6 - pool = multiprocessing.Pool(num_processes) - # izip arguments so we can pass as one argument to pool worker - pool.map( - test_values_helper, - itertools.izip(all_value_combos, job_ids, itertools.repeat(value_names), itertools.repeat(output_dir), - itertools.repeat(bag_file), itertools.repeat(map_file), itertools.repeat(image_topic), - itertools.repeat(gnc_config))) + return test_values(*zipped_vals) + + +def parameter_sweep( + all_value_combos, + value_names, + output_dir, + bag_file, + map_file, + image_topic, + gnc_config, +): + job_ids = list(range(len(all_value_combos))) + num_processes = 6 + pool = multiprocessing.Pool(num_processes) + # izip arguments so we can pass as one argument to pool worker + pool.map( + test_values_helper, + list( + zip( + all_value_combos, + job_ids, + itertools.repeat(value_names), + itertools.repeat(output_dir), + itertools.repeat(bag_file), + itertools.repeat(map_file), + itertools.repeat(image_topic), + itertools.repeat(gnc_config), + ) + ), + ) def make_all_value_combinations(value_ranges): - return list(itertools.product(*value_ranges)) + return list(itertools.product(*value_ranges)) def make_value_ranges(): - value_ranges = [] - value_names = [] - steps = 50 + value_ranges = [] + value_names = [] + steps = 50 - #tune_ase_Q_imu + # tune_ase_Q_imu - #q_gyro - # .001 -> 2 deg - #q_gyro_degrees_range = np.logspace(-3, .3, steps, endpoint=True) - #q_gyro_squared_rads_range = [math.radians(deg)**2 for deg in q_gyro_degrees_range] - #value_ranges.append(q_gyro_squared_rads_range) - #value_names.append('q_gyro') + # q_gyro + # .001 -> 2 deg + # q_gyro_degrees_range = np.logspace(-3, .3, steps, endpoint=True) + # q_gyro_squared_rads_range = [math.radians(deg)**2 for deg in q_gyro_degrees_range] + # value_ranges.append(q_gyro_squared_rads_range) + # value_names.append('q_gyro') - #q_gyro_bias - #q_gyro_bias_degrees_range = np.logspace(-8, 0, steps, endpoint=True) - #q_gyro_bias_squared_rads_range = [math.radians(bias)**2 for bias in q_gyro_bias_degrees_range] - #value_ranges.append(q_gyro_bias_squared_rads_range) - #value_names.append('q_gyro_bias') + # q_gyro_bias + # q_gyro_bias_degrees_range = np.logspace(-8, 0, steps, endpoint=True) + # q_gyro_bias_squared_rads_range = [math.radians(bias)**2 for bias in q_gyro_bias_degrees_range] + # value_ranges.append(q_gyro_bias_squared_rads_range) + # value_names.append('q_gyro_bias') - #q_accel (logspace) - #value_ranges.append(list(np.logspace(-9, -5, steps, endpoint=True))) - #value_names.append('q_accel') + # q_accel (logspace) + # value_ranges.append(list(np.logspace(-9, -5, steps, endpoint=True))) + # value_names.append('q_accel') - #q_accel_bias - #value_ranges.append(list(np.logspace(-11, -5, steps, endpoint=True))) - #value_names.append('q_accel_bias') + # q_accel_bias + # value_ranges.append(list(np.logspace(-11, -5, steps, endpoint=True))) + # value_names.append('q_accel_bias') - #Tune Image Params + # Tune Image Params - #tun_ase_map_error - #value_ranges.append(list(np.linspace(0, 0.1, steps, endpoint=True))) - #value_names.append('tun_ase_map_error') + # tun_ase_map_error + # value_ranges.append(list(np.linspace(0, 0.1, steps, endpoint=True))) + # value_names.append('tun_ase_map_error') - #tun_ase_vis_r_mag - #value_ranges.append(list(np.linspace(2, 6, steps, endpoint=True))) - #value_names.append('tun_ase_vis_r_mag') - #tun_ase_of_r_mag - #value_ranges.append(list(np.linspace(0, 3, steps, endpoint=True))) - #value_names.append('tun_ase_of_r_mag') + # tun_ase_vis_r_mag + # value_ranges.append(list(np.linspace(2, 6, steps, endpoint=True))) + # value_names.append('tun_ase_vis_r_mag') + # tun_ase_of_r_mag + # value_ranges.append(list(np.linspace(0, 3, steps, endpoint=True))) + # value_names.append('tun_ase_of_r_mag') - #tun_ase_dock_r_mag - #value_ranges.append(list(np.linspace(1, 3, steps, endpoint=True))) - #value_names.append('tun_ase_dock_r_mag') + # tun_ase_dock_r_mag + # value_ranges.append(list(np.linspace(1, 3, steps, endpoint=True))) + # value_names.append('tun_ase_dock_r_mag') - # Other Options + # Other Options - # tun_ase_mahal_distance_max - value_ranges.append(list(np.linspace(0, 40, steps, endpoint=True))) - value_names.append('tun_ase_mahal_distance_max') + # tun_ase_mahal_distance_max + value_ranges.append(list(np.linspace(0, 40, steps, endpoint=True))) + value_names.append("tun_ase_mahal_distance_max") - return value_ranges, value_names + return value_ranges, value_names def save_values(value_names, values, filename, output_dir): - with open(os.path.join(output_dir, filename), 'w') as values_file: - writer = csv.writer(values_file) - writer.writerow(value_names) - writer.writerows(values) - -def make_values_and_parameter_sweep(output_dir, bag_file, map_file, image_topic, gnc_config): - output_dir = utilities.create_directory(output_dir) - print('Output directory for results is {}'.format(output_dir)) - - value_ranges, value_names = make_value_ranges() - save_values(value_names, value_ranges, 'individual_value_ranges.csv', output_dir) - - all_value_combos = make_all_value_combinations(value_ranges) - save_values(value_names, all_value_combos, 'all_value_combos.csv', output_dir) - - parameter_sweep(all_value_combos, value_names, output_dir, bag_file, map_file, image_topic, gnc_config) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('bag_file', help='Full path to bagfile.') - parser.add_argument('map_file', help='Full path to map file.') - - parser.add_argument( - '--directory', - default=None, - help= - 'Full path to output directory where files will be saved. If not specified, timestamped directory will be created in current path.' - ) - parser.add_argument('--image_topic', - '-i', - default=None, - help='Image topic for images in bagfile. Default topic will be used if not specified.') - - parser.add_argument( - '--gnc_config', - '-g', - default=None, - help= - 'GNC config to use as a base for the parameter sweep. Parameters not swept over will be taken from this file. Default will be gnc.config in astrobee/config' - ) - #TODO(rsoussan): pass number of processes to run and parallel and parameter ranges as arguments - - args = parser.parse_args() - - gnc_config = utilities.get_gnc_config(args.gnc_config) - - make_values_and_parameter_sweep(args.directory, args.bag_file, args.map_file, args.image_topic, gnc_config) + with open(os.path.join(output_dir, filename), "w") as values_file: + writer = csv.writer(values_file) + writer.writerow(value_names) + writer.writerows(values) + + +def make_values_and_parameter_sweep( + output_dir, bag_file, map_file, image_topic, gnc_config +): + output_dir = utilities.create_directory(output_dir) + print(("Output directory for results is {}".format(output_dir))) + + value_ranges, value_names = make_value_ranges() + save_values(value_names, value_ranges, "individual_value_ranges.csv", output_dir) + + all_value_combos = make_all_value_combinations(value_ranges) + save_values(value_names, all_value_combos, "all_value_combos.csv", output_dir) + + parameter_sweep( + all_value_combos, + value_names, + output_dir, + bag_file, + map_file, + image_topic, + gnc_config, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("bag_file", help="Full path to bagfile.") + parser.add_argument("map_file", help="Full path to map file.") + + parser.add_argument( + "--directory", + default=None, + help="Full path to output directory where files will be saved. If not specified, timestamped directory will be created in current path.", + ) + parser.add_argument( + "--image_topic", + "-i", + default=None, + help="Image topic for images in bagfile. Default topic will be used if not specified.", + ) + + parser.add_argument( + "--gnc_config", + "-g", + default=None, + help="GNC config to use as a base for the parameter sweep. Parameters not swept over will be taken from this file. Default will be gnc.config in astrobee/config", + ) + # TODO(rsoussan): pass number of processes to run and parallel and parameter ranges as arguments + + args = parser.parse_args() + + gnc_config = utilities.get_gnc_config(args.gnc_config) + + make_values_and_parameter_sweep( + args.directory, args.bag_file, args.map_file, args.image_topic, gnc_config + ) diff --git a/tools/ekf_bag/scripts/plot_creator.py b/tools/ekf_bag/scripts/plot_creator.py index 04502d2b3b..44424dc58e 100644 --- a/tools/ekf_bag/scripts/plot_creator.py +++ b/tools/ekf_bag/scripts/plot_creator.py @@ -18,48 +18,52 @@ # under the License. import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages import pandas as pd +from matplotlib.backends.backend_pdf import PdfPages -def create_pdf(dataframes, pdf_filename, write_values=False, directory='None'): - with PdfPages(pdf_filename) as pdf: - for dataframe in dataframes: - for index in range(len(dataframe.columns)): - plot = create_histogram(dataframe, index) - pdf.savefig(plot) - if write_values and dataframe.columns.name == 'values': - pdf.savefig(create_table(dataframe)) - if directory != 'None': - pdf.savefig(create_directory_page(directory)) +def create_pdf(dataframes, pdf_filename, write_values=False, directory="None"): + with PdfPages(pdf_filename) as pdf: + for dataframe in dataframes: + for index in range(len(dataframe.columns)): + plot = create_histogram(dataframe, index) + pdf.savefig(plot) + if write_values and dataframe.columns.name == "values": + pdf.savefig(create_table(dataframe)) + if directory != "None": + pdf.savefig(create_directory_page(directory)) def create_directory_page(directory): - directory_page = plt.figure() - directory_page.text(0.1, 0.9, "Output directory: " + directory) - return directory_page + directory_page = plt.figure() + directory_page.text(0.1, 0.9, "Output directory: " + directory) + return directory_page def create_table(dataframe): - sorted_dataframe = dataframe.sort_values('job_id') - figure = plt.figure() - ax = plt.subplot(111) - ax.axis('off') - ax.table(cellText=sorted_dataframe.values, colLabels=sorted_dataframe.columns, bbox=[0, 0, 1, 1]) - return figure + sorted_dataframe = dataframe.sort_values("job_id") + figure = plt.figure() + ax = plt.subplot(111) + ax.axis("off") + ax.table( + cellText=sorted_dataframe.values, + colLabels=sorted_dataframe.columns, + bbox=[0, 0, 1, 1], + ) + return figure def create_histogram(dataframe, index): - figure = plt.figure() - job_ids = dataframe['job_id'] - y = dataframe.iloc[:, index] - plt.scatter(job_ids, y, c=y, marker='+', s=150, linewidths=4, cmap=plt.cm.coolwarm) - column_name = dataframe.columns[index] - plt.title(column_name) - return figure + figure = plt.figure() + job_ids = dataframe["job_id"] + y = dataframe.iloc[:, index] + plt.scatter(job_ids, y, c=y, marker="+", s=150, linewidths=4, cmap=plt.cm.coolwarm) + column_name = dataframe.columns[index] + plt.title(column_name) + return figure def load_dataframe(files): - dataframes = [pd.read_csv(file) for file in files] - dataframe = pd.concat(dataframes) - return dataframe + dataframes = [pd.read_csv(file) for file in files] + dataframe = pd.concat(dataframes) + return dataframe diff --git a/tools/ekf_bag/scripts/rosbag_to_csv b/tools/ekf_bag/scripts/rosbag_to_csv index 4082fa8735..1d5fff5463 100755 --- a/tools/ekf_bag/scripts/rosbag_to_csv +++ b/tools/ekf_bag/scripts/rosbag_to_csv @@ -17,11 +17,11 @@ # License for the specific language governing permissions and limitations # under the License. -import environment - import os import sys +import environment + astrobee_map = None astrobee_bag = None if len(sys.argv) >= 2: diff --git a/tools/ekf_bag/scripts/sparse_map_eval b/tools/ekf_bag/scripts/sparse_map_eval index 92c2b41ee9..1a42cf1f23 100755 --- a/tools/ekf_bag/scripts/sparse_map_eval +++ b/tools/ekf_bag/scripts/sparse_map_eval @@ -17,12 +17,13 @@ # License for the specific language governing permissions and limitations # under the License. -import environment - -import sys import os +import sys import time +import environment + + def run_sparse_map_bag(astrobee_map, astrobee_bag): os.system('rosrun ekf_bag sparse_map_bag %s %s' % (astrobee_map, astrobee_bag)) diff --git a/tools/ekf_bag/scripts/streamlit_server.py b/tools/ekf_bag/scripts/streamlit_server.py index 670c29e272..ddb63fbb83 100644 --- a/tools/ekf_bag/scripts/streamlit_server.py +++ b/tools/ekf_bag/scripts/streamlit_server.py @@ -16,13 +16,15 @@ # under the License. -import streamlit as st +import glob import os +import subprocess + import numpy as np import pandas as pd +import streamlit as st from pdf2image import convert_from_path -import glob -import subprocess + def PDFtoJPEG(filename, folder, start=0): pages = convert_from_path(filename + ".pdf", 500, size=1000) @@ -30,7 +32,7 @@ def PDFtoJPEG(filename, folder, start=0): for i in range(len(pages)): pageList.append(folder + "/" + str(start + i) + ".jpg") pages[i].save(pageList[-1], "JPEG") - os.system("rm " + filename + "\".pdf\"") + os.system("rm " + filename + '".pdf"') return pageList @@ -71,13 +73,23 @@ def ekf_video(mapFile, bagFile, flags, folder): os.remove(f) cmd = "rosrun ekf_video ekf_video " + bagFile + " " + mapFile + flags os.system(cmd) - os.system("ffmpeg -i " + bagFile[:-4] + ".mkv -c copy " + folder + "/" + bagFile.split("/")[-1][:-4] + ".mp4") + os.system( + "ffmpeg -i " + + bagFile[:-4] + + ".mkv -c copy " + + folder + + "/" + + bagFile.split("/")[-1][:-4] + + ".mp4" + ) os.system("rm " + bagFile[:-4] + ".mkv") return folder + "/" + bagFile.split("/")[-1][:-4] + ".mp4" st.title("Astrobee Pose Estimation Evaluation") -script = st.selectbox("Script to Run:", ("ekf_diff", "ekf_graph", "ekf_video", "change branch")) +script = st.selectbox( + "Script to Run:", ("ekf_diff", "ekf_graph", "ekf_video", "change branch") +) if script == "ekf_diff": flags = "" @@ -110,7 +122,7 @@ def ekf_video(mapFile, bagFile, flags, folder): for ekfFile in ekfFiles: fileNames.append(ekfFile.split("/")[-1]) os.system("cp " + ekfFile + " " + fileNames[-1]) - folder = "cache/ekf_diff__" + "__".join(fileNames + flags.split()) + folder = "cache/ekf_diff__" + "__".join(fileNames + flags.split()) os.system("mkdir -p cache") os.system("mkdir -p " + folder) @@ -137,19 +149,23 @@ def ekf_video(mapFile, bagFile, flags, folder): if st.sidebar.checkbox("Do Not Rerun EKF"): flags += " --cached" if st.sidebar.checkbox("Define Start Time"): - flags += " --start " + \ - str(st.sidebar.number_input("", key=1, format="%f")) + flags += " --start " + str(st.sidebar.number_input("", key=1, format="%f")) if st.sidebar.checkbox("Define Stop Time"): - flags += " --end " + \ - str(st.sidebar.number_input("", key=2, format="%f")) + flags += " --end " + str(st.sidebar.number_input("", key=2, format="%f")) if st.sidebar.checkbox("Specify Robot"): - flags += " -r " + \ - st.sidebar.text_input("Enter Robot Name Without Extension/Path", key=1) - gitHash = str(subprocess.check_output(["git", "rev-parse", "origin/master"])[:-1], 'utf-8') - folder = "cache/ekf_graph__" + "__".join([gitHash, mapFile[5:], bagFile[5:]] + flags.split()) + flags += " -r " + st.sidebar.text_input( + "Enter Robot Name Without Extension/Path", key=1 + ) + gitHash = str( + subprocess.check_output(["git", "rev-parse", "origin/master"])[:-1], "utf-8" + ) + folder = "cache/ekf_graph__" + "__".join( + [gitHash, mapFile[5:], bagFile[5:]] + flags.split() + ) if st.sidebar.checkbox("Specify Image Topic", value=True): - flags += " -i " + \ - st.sidebar.text_input("", value="/mgt/img_sampler/nav_cam/image_record", key=2) + flags += " -i " + st.sidebar.text_input( + "", value="/mgt/img_sampler/nav_cam/image_record", key=2 + ) if st.button("Run"): os.system("mkdir -p cache") @@ -157,7 +173,9 @@ def ekf_video(mapFile, bagFile, flags, folder): imageList = ekf_graphJPEGs(mapFile, bagFile, flags, folder) - os.system("mv bags/" + bagFile[5:-4] + ".txt " + folder + "/" + bagFile[5:-4] + ".txt") + os.system( + "mv bags/" + bagFile[5:-4] + ".txt " + folder + "/" + bagFile[5:-4] + ".txt" + ) for image in imageList: st.image(image) @@ -177,28 +195,35 @@ def ekf_video(mapFile, bagFile, flags, folder): if st.sidebar.checkbox("Use JEM Mode: Top and Side View"): flags += " -j" if st.sidebar.checkbox("Specify Robot"): - flags += " -r " + \ - st.sidebar.text_input("Enter Robot Name Without Extension/Path") - folder = "cache/ekf_video__" + "__".join([bagFile.split("/")[-1], mapFile.split("/")[-1]] + flags.split()) + flags += " -r " + st.sidebar.text_input( + "Enter Robot Name Without Extension/Path" + ) + folder = "cache/ekf_video__" + "__".join( + [bagFile.split("/")[-1], mapFile.split("/")[-1]] + flags.split() + ) os.system("mkdir -p " + folder) if st.sidebar.checkbox("Specify Image Topic", value=True): - flags += " -i " + \ - st.sidebar.text_input("", value="/mgt/img_sampler/nav_cam/image_record") + flags += " -i " + st.sidebar.text_input( + "", value="/mgt/img_sampler/nav_cam/image_record" + ) if st.button("Run"): videoFile = ekf_video(mapFile, bagFile, flags, folder) - video_file = open(videoFile, 'rb') + video_file = open(videoFile, "rb") video_bytes = video_file.read() st.video(video_bytes) elif script == "change branch": - allBranches = str(subprocess.check_output(["git", "branch", "-a"]), 'utf-8').split("\n") + allBranches = str(subprocess.check_output(["git", "branch", "-a"]), "utf-8").split( + "\n" + ) branches = [] - for branch in allBranches: - if branch[2:17] == "remotes/origin/": branches.append(branch[17:]) + for branch in allBranches: + if branch[2:17] == "remotes/origin/": + branches.append(branch[17:]) branch = st.selectbox("Branch", branches) - currBranch = str(subprocess.check_output(["git", "branch"]), 'utf-8').split()[1] + currBranch = str(subprocess.check_output(["git", "branch"]), "utf-8").split()[1] if branch != currBranch: os.system("git checkout " + branch) if st.button("Build"): - os.system("make -C ../../../../../astrobee_build/native") + os.system("make -C ../../../../../astrobee_build/native") diff --git a/tools/ekf_bag/scripts/test_ekf_graph.py b/tools/ekf_bag/scripts/test_ekf_graph.py index 605995a71c..257b681585 100644 --- a/tools/ekf_bag/scripts/test_ekf_graph.py +++ b/tools/ekf_bag/scripts/test_ekf_graph.py @@ -19,72 +19,82 @@ import copy import math -import numpy as np import unittest -import ekf_graph +import ekf_graph +import numpy as np class TestRMSESequence(unittest.TestCase): + def test_prune_missing_timestamps_beginning_set(self): + a_times = np.arange(10.0) + a_xs = np.arange(10.0) + a_ys = np.arange(10.0) + 1.0 + a_zs = np.arange(10.0) + 2.0 + b_times = np.arange(5.0) + pruned_a = ekf_graph.prune_missing_timestamps( + a_xs, a_ys, a_zs, a_times, b_times + ) + self.assertEqual(len(pruned_a), len(b_times)) + self.assertTrue(np.allclose(pruned_a[:, 0], b_times, rtol=0)) + self.assertTrue(np.allclose(pruned_a[:, 1], b_times + 1.0, rtol=0)) + self.assertTrue(np.allclose(pruned_a[:, 2], b_times + 2.0, rtol=0)) - def test_prune_missing_timestamps_beginning_set(self): - a_times = np.arange(10.0) - a_xs = np.arange(10.0) - a_ys = np.arange(10.0) + 1.0 - a_zs = np.arange(10.0) + 2.0 - b_times = np.arange(5.0) - pruned_a = ekf_graph.prune_missing_timestamps(a_xs, a_ys, a_zs, a_times, b_times) - self.assertEqual(len(pruned_a), len(b_times)) - self.assertTrue(np.allclose(pruned_a[:, 0], b_times, rtol=0)) - self.assertTrue(np.allclose(pruned_a[:, 1], b_times + 1.0, rtol=0)) - self.assertTrue(np.allclose(pruned_a[:, 2], b_times + 2.0, rtol=0)) + def test_prune_missing_timestamps_middle_set(self): + a_times = np.arange(10.0) + a_xs = np.arange(10.0) + a_ys = np.arange(10.0) + 1.0 + a_zs = np.arange(10.0) + 2.0 + b_times = np.arange(3.0, 7.0) + pruned_a = ekf_graph.prune_missing_timestamps( + a_xs, a_ys, a_zs, a_times, b_times + ) + self.assertEqual(len(pruned_a), len(b_times)) + self.assertTrue(np.allclose(pruned_a[:, 0], b_times, rtol=0)) + self.assertTrue(np.allclose(pruned_a[:, 1], b_times + 1.0, rtol=0)) + self.assertTrue(np.allclose(pruned_a[:, 2], b_times + 2.0, rtol=0)) - def test_prune_missing_timestamps_middle_set(self): - a_times = np.arange(10.0) - a_xs = np.arange(10.0) - a_ys = np.arange(10.0) + 1.0 - a_zs = np.arange(10.0) + 2.0 - b_times = np.arange(3.0, 7.0) - pruned_a = ekf_graph.prune_missing_timestamps(a_xs, a_ys, a_zs, a_times, b_times) - self.assertEqual(len(pruned_a), len(b_times)) - self.assertTrue(np.allclose(pruned_a[:, 0], b_times, rtol=0)) - self.assertTrue(np.allclose(pruned_a[:, 1], b_times + 1.0, rtol=0)) - self.assertTrue(np.allclose(pruned_a[:, 2], b_times + 2.0, rtol=0)) + def test_prune_missing_timestamps_end_set(self): + a_times = np.arange(10.0) + a_xs = np.arange(10.0) + a_ys = np.arange(10.0) + 1.0 + a_zs = np.arange(10.0) + 2.0 + b_times = np.arange(7.0, 10.0) + pruned_a = ekf_graph.prune_missing_timestamps( + a_xs, a_ys, a_zs, a_times, b_times + ) + self.assertEqual(len(pruned_a), len(b_times)) + self.assertTrue(np.allclose(pruned_a[:, 0], b_times, rtol=0)) + self.assertTrue(np.allclose(pruned_a[:, 1], b_times + 1.0, rtol=0)) + self.assertTrue(np.allclose(pruned_a[:, 2], b_times + 2.0, rtol=0)) - def test_prune_missing_timestamps_end_set(self): - a_times = np.arange(10.0) - a_xs = np.arange(10.0) - a_ys = np.arange(10.0) + 1.0 - a_zs = np.arange(10.0) + 2.0 - b_times = np.arange(7.0, 10.0) - pruned_a = ekf_graph.prune_missing_timestamps(a_xs, a_ys, a_zs, a_times, b_times) - self.assertEqual(len(pruned_a), len(b_times)) - self.assertTrue(np.allclose(pruned_a[:, 0], b_times, rtol=0)) - self.assertTrue(np.allclose(pruned_a[:, 1], b_times + 1.0, rtol=0)) - self.assertTrue(np.allclose(pruned_a[:, 2], b_times + 2.0, rtol=0)) + def test_prune_missing_timestamps_scattered_set(self): + a_times = np.arange(10.0) + a_xs = np.arange(10.0) + a_ys = np.arange(10.0) + 1.0 + a_zs = np.arange(10.0) + 2.0 + b_times = np.array([1.0, 5.0, 6.0, 9.0]) + pruned_a = ekf_graph.prune_missing_timestamps( + a_xs, a_ys, a_zs, a_times, b_times + ) + self.assertEqual(len(pruned_a), len(b_times)) + self.assertTrue(np.allclose(pruned_a[:, 0], b_times, rtol=0)) + self.assertTrue(np.allclose(pruned_a[:, 1], b_times + 1.0, rtol=0)) + self.assertTrue(np.allclose(pruned_a[:, 2], b_times + 2.0, rtol=0)) - def test_prune_missing_timestamps_scattered_set(self): - a_times = np.arange(10.0) - a_xs = np.arange(10.0) - a_ys = np.arange(10.0) + 1.0 - a_zs = np.arange(10.0) + 2.0 - b_times = np.array([1.0, 5.0, 6.0, 9.0]) - pruned_a = ekf_graph.prune_missing_timestamps(a_xs, a_ys, a_zs, a_times, b_times) - self.assertEqual(len(pruned_a), len(b_times)) - self.assertTrue(np.allclose(pruned_a[:, 0], b_times, rtol=0)) - self.assertTrue(np.allclose(pruned_a[:, 1], b_times + 1.0, rtol=0)) - self.assertTrue(np.allclose(pruned_a[:, 2], b_times + 2.0, rtol=0)) + def test_rmse_matrix_ones(self): + ones_mat = np.ones(shape=(5, 3)) + self.assertTrue( + np.isclose(ekf_graph.rmse_matrix(ones_mat), math.sqrt(3.0), rtol=0) + ) - def test_rmse_matrix_ones(self): - ones_mat = np.ones(shape=(5, 3)) - self.assertTrue(np.isclose(ekf_graph.rmse_matrix(ones_mat), math.sqrt(3.0), rtol=0)) + def test_rmse_matrix_1_2_2(self): + col_0 = np.ones(5) + col_1 = np.full(5, 2.0) + col_2 = np.full(5, 2.0) + mat = np.column_stack((col_0, col_1, col_2)) + self.assertTrue(np.isclose(ekf_graph.rmse_matrix(mat), 3.0, rtol=0)) - def test_rmse_matrix_1_2_2(self): - col_0 = np.ones(5) - col_1 = np.full(5,2.0) - col_2 = np.full(5,2.0) - mat = np.column_stack((col_0, col_1, col_2)) - self.assertTrue(np.isclose(ekf_graph.rmse_matrix(mat), 3.0, rtol=0)) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/tools/ekf_bag/scripts/utilities.py b/tools/ekf_bag/scripts/utilities.py index e75e526ae0..ccbccab013 100644 --- a/tools/ekf_bag/scripts/utilities.py +++ b/tools/ekf_bag/scripts/utilities.py @@ -18,44 +18,52 @@ import datetime import glob import os + import rospkg + # Forward errors so we can recover failures # even when running commands through multiprocessing # pooling def full_traceback(func): - import traceback, functools + import functools + import traceback + + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + msg = "{}\n\nOriginal {}".format(e, traceback.format_exc()) + raise type(e)(msg) - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception as e: - msg = "{}\n\nOriginal {}".format(e, traceback.format_exc()) - raise type(e)(msg) + return wrapper - return wrapper def get_files(directory, file_string): - return glob.glob(os.path.join(directory, file_string)) + return glob.glob(os.path.join(directory, file_string)) + def create_directory(directory=None): - if directory == None: - directory = os.path.join(os.getcwd(), datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) - if os.path.exists(directory): - print(directory + " already exists!") - exit() - os.makedirs(directory) - return directory + if directory == None: + directory = os.path.join( + os.getcwd(), datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + ) + if os.path.exists(directory): + print((directory + " already exists!")) + exit() + os.makedirs(directory) + return directory + def get_gnc_config(gnc_config): - if gnc_config == None: - #Load from astrobee/config/gnc.confg - astrobee_path = rospkg.RosPack().get_path('astrobee') - gnc_config = os.path.join(astrobee_path, 'config/gnc.config') - else: - if not os.path.exists(gnc_config): - print("GNC config does not exists! {}".format(gnc_config)) - exit() - - return gnc_config + if gnc_config == None: + # Load from astrobee/config/gnc.confg + astrobee_path = rospkg.RosPack().get_path("astrobee") + gnc_config = os.path.join(astrobee_path, "config/gnc.config") + else: + if not os.path.exists(gnc_config): + print(("GNC config does not exists! {}".format(gnc_config))) + exit() + + return gnc_config diff --git a/tools/ekf_video/scripts/ekf_video b/tools/ekf_video/scripts/ekf_video index 1fa6a37de6..9445ab6465 100755 --- a/tools/ekf_video/scripts/ekf_video +++ b/tools/ekf_video/scripts/ekf_video @@ -18,15 +18,16 @@ # under the License. import argparse -import sys +import math import os import os.path -import math +import sys import time sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/../../ekf_bag/scripts') import environment + def run_ekf(astrobee_map, astrobee_bag, output_file, ekf_in_bag = False, features_in_bag=False, image_topic=None, use_jem=False): cmd = '' if ekf_in_bag else '--run_ekf %s' % ('' if features_in_bag else '--gen_features') diff --git a/tools/faults_during_bag.py b/tools/faults_during_bag.py index c89d4c8dca..0bcba6147d 100644 --- a/tools/faults_during_bag.py +++ b/tools/faults_during_bag.py @@ -1,17 +1,27 @@ import sys -import rospy + import rosbag +import rospy +from ff_msgs.msg import Fault, FaultState -from ff_msgs.msg import FaultState -from ff_msgs.msg import Fault def process(msg, start): for f in msg.faults: # Fault 21 is perching arm node missing --> ignore if f.id != 21: elapsed = f.time_of_fault - start - print("secs_from_start=%d fault_id=%d timestamp=%d.%09d" % (int(elapsed.secs), f.id, f.time_of_fault.secs, f.time_of_fault.nsecs)) - + print( + ( + "secs_from_start=%d fault_id=%d timestamp=%d.%09d" + % ( + int(elapsed.secs), + f.id, + f.time_of_fault.secs, + f.time_of_fault.nsecs, + ) + ) + ) + if len(sys.argv) < 3: print("Usage: faults_during_bag.py short_bag_for_period ars_default_bag") @@ -20,18 +30,24 @@ def process(msg, start): short_bag_fn = sys.argv[1] default_bag_fn = sys.argv[2] -print("reading time bounds of %s" % short_bag_fn) +print(("reading time bounds of %s" % short_bag_fn)) short_bag = rosbag.Bag(short_bag_fn) start_ts = short_bag.get_start_time() end_ts = short_bag.get_end_time() short_bag.close() -print( "will filter events of %s starting at %f to %f" % - (default_bag_fn, start_ts, end_ts) ) - -default_bag = rosbag.Bag(default_bag_fn) +print( + ( + "will filter events of %s starting at %f to %f" + % (default_bag_fn, start_ts, end_ts) + ) +) -for topic, msg, time in default_bag.read_messages(topics=["/mgt/sys_monitor/state"], start_time=rospy.Time(start_ts), end_time=rospy.Time(end_ts)): - process(msg, rospy.Time(start_ts)) +default_bag = rosbag.Bag(default_bag_fn) - +for topic, msg, time in default_bag.read_messages( + topics=["/mgt/sys_monitor/state"], + start_time=rospy.Time(start_ts), + end_time=rospy.Time(end_ts), +): + process(msg, rospy.Time(start_ts)) diff --git a/tools/gds_helper/src/gds_simulator.py b/tools/gds_helper/src/gds_simulator.py index 331a699bf1..daf87674ce 100755 --- a/tools/gds_helper/src/gds_simulator.py +++ b/tools/gds_helper/src/gds_simulator.py @@ -17,21 +17,32 @@ # License for the specific language governing permissions and limitations # under the License. -import rospy -import rosgraph +import _thread +import getpass import os import pwd -import time -import getpass -import socket -import thread import signal +import socket +import time +from os import name, system -from ff_msgs.msg import AckStamped, GuestScienceState, GuestScienceConfig, \ - GuestScienceData, AccessControlStateStamped, CommandStamped, CommandArg, \ - GuestScienceApk, GuestScienceCommand, AckCompletedStatus, AckStatus +import rosgraph +import rospy +from ff_msgs.msg import ( + AccessControlStateStamped, + AckCompletedStatus, + AckStamped, + AckStatus, + CommandArg, + CommandStamped, + GuestScienceApk, + GuestScienceCommand, + GuestScienceConfig, + GuestScienceData, + GuestScienceState, +) from std_msgs.msg import Header -from os import system, name + class Queue: def __init__(self): @@ -41,7 +52,7 @@ def isEmpty(self): return self.items == [] def enqueue(self, item): - self.items.insert(0,item) + self.items.insert(0, item) def dequeue(self): return self.items.pop() @@ -49,9 +60,10 @@ def dequeue(self): def size(self): return len(self.items) -pub = rospy.Publisher('comm/dds/command', CommandStamped, queue_size=10) -base_id = 'LocalParticipant' +pub = rospy.Publisher("comm/dds/command", CommandStamped, queue_size=10) + +base_id = "LocalParticipant" count = 0 requesting = False state = None @@ -70,27 +82,31 @@ def size(self): ACTION_GO_BACK = 1 ACTION_EXIT = 2 + def get_user(): # TODO Check portability user = getpass.getuser() machine = socket.gethostname() - return user + '@' + machine + return user + "@" + machine + def clear(): # for windows - if name == 'nt': - _ = system('cls') + if name == "nt": + _ = system("cls") # for mac and linux(here, os.name is 'posix') else: - _ = system('clear') + _ = system("clear") time.sleep(0.5) - print "\n ------- Ground Data System Local Simulator -------\n\n" + print("\n ------- Ground Data System Local Simulator -------\n\n") + def request_control(): global requesting requesting = True - send_command('requestControl') + send_command("requestControl") + def grab_control(msg): global requesting @@ -99,33 +115,36 @@ def grab_control(msg): arg.data_type = 5 arg.s = msg.cookie - send_command('grabControl', [arg]) + send_command("grabControl", [arg]) requesting = False -def send_command(name, args = []): + +def send_command(name, args=[]): global count, last_command_id, new_ack, data_response new_ack = False - #data_response = None + # data_response = None cmd = CommandStamped() cmd.header = Header() cmd.header.stamp = rospy.Time.now() - cmd.header.frame_id = 'world' + cmd.header.frame_id = "world" cmd.cmd_name = name cmd.args = args last_command_id = str(count) + base_id cmd.cmd_id = last_command_id cmd.cmd_src = user - cmd.cmd_origin = 'ground' - cmd.subsys_name = '' + cmd.cmd_origin = "ground" + cmd.subsys_name = "" pub.publish(cmd) count = count + 1 + def get_manager_config(): pass + def access_control_callback(data): global current_controller @@ -134,56 +153,72 @@ def access_control_callback(data): else: current_controller = data.controller + def ack_callback(data): global fault_state, new_ack, ack_response if data.cmd_id == last_command_id: - if (data.status.status == AckStatus.COMPLETED and - data.completed_status.status != AckCompletedStatus.OK): + if ( + data.status.status == AckStatus.COMPLETED + and data.completed_status.status != AckCompletedStatus.OK + ): fault_state = True ack_response = data new_ack = True + def gs_state_callback(data): global state state = data + def gs_config_callback(data): global config config = data + def gs_data_callback(data): global data_response data_response.enqueue(data) - #data_response = data + # data_response = data + def start_subscribers(): - rospy.init_node('gds_gs_simulator') + rospy.init_node("gds_gs_simulator") rospy.Subscriber("gs/gs_manager/state", GuestScienceState, gs_state_callback) rospy.Subscriber("gs/gs_manager/config", GuestScienceConfig, gs_config_callback) rospy.Subscriber("gs/data", GuestScienceData, gs_data_callback) rospy.Subscriber("mgt/ack", AckStamped, ack_callback) - rospy.Subscriber("mgt/access_control/state", AccessControlStateStamped, access_control_callback) + rospy.Subscriber( + "mgt/access_control/state", AccessControlStateStamped, access_control_callback + ) # Wait for master to register subs and pubs - rospy.sleep(2.) + rospy.sleep(2.0) + def gain_control(): timer = 0 if current_controller == None: - print "Astrobee's current controller is undetermined. We cannot proceed" + print("Astrobee's current controller is undetermined. We cannot proceed") return False elif current_controller == user: - print ("Astrobee's controller is: " + current_controller + "\n" - + "You are the current controller") - raw_input("Press any key to continue") + print( + ( + "Astrobee's controller is: " + + current_controller + + "\n" + + "You are the current controller" + ) + ) + eval(input("Press any key to continue")) return True else: - print "Astrobee's controller is: " + current_controller + "\n" - raw_input("Press any key to grab control of the robot") + print(("Astrobee's controller is: " + current_controller + "\n")) + eval(input("Press any key to grab control of the robot")) # Request and grab control - print ' > Requesting control' + print(" > Requesting control") request_control() while requesting and timer < 20: @@ -191,71 +226,75 @@ def gain_control(): timer += 1 if fault_state: - print " > Request failed with message: " + ack_response.message + print((" > Request failed with message: " + ack_response.message)) return False elif timer >= 20: - print ' > Timeout' + print(" > Timeout") return False timer = 0 - print ' > Grabbing control' + print(" > Grabbing control") while current_controller != user: time.sleep(0.5) timer += 1 if fault_state: - print " > Request failed with message: " + ack_response.message + print((" > Request failed with message: " + ack_response.message)) return False elif timer >= 20: - print ' > Timeout' + print(" > Timeout") return False return True + def get_apk_info(): # Wait until the GS_manager shows up timer = 0 - print (" > Waiting for Guest Science Manager communication." - " Make sure the app is running in the android device and that you " - "can ping it from this computer") + print( + " > Waiting for Guest Science Manager communication." + " Make sure the app is running in the android device and that you " + "can ping it from this computer" + ) while (state == None or config == None) and timer < 30: time.sleep(0.5) timer += 1 if timer >= 30: - print ' > Timeout' + print(" > Timeout") return False if state.serial != config.serial: - print ' > Guest Science state and config do not match' + print(" > Guest Science state and config do not match") return False - print ' > Guest Science Manager found!' + print(" > Guest Science Manager found!") return True + def select_app(): global apps, current_app # Show available apps and states apps = config.apks - print '\nAvailable Guest Science applications in HLP' + print("\nAvailable Guest Science applications in HLP") for i, app in enumerate(apps): - app_state = 'Running' if state.runningApks[i] else 'Stopped' - print str(i + 1) + ') ' + app.short_name + ' ' + app_state + app_state = "Running" if state.runningApks[i] else "Stopped" + print((str(i + 1) + ") " + app.short_name + " " + app_state)) - print str(len(apps) + 1) + ') ' + 'Exit' + print((str(len(apps) + 1) + ") " + "Exit")) # Choose an app try: - selection = input("\nType the number of app you want to select: ") + selection = eval(input("\nType the number of app you want to select: ")) except: - print ' > Invalid entry' + print(" > Invalid entry") time.sleep(1) return None @@ -263,109 +302,127 @@ def select_app(): return -1 if selection < 1 or selection > len(apps): - print ' > Invalid entry' + print(" > Invalid entry") time.sleep(1) return None current_app = apps[selection - 1] - return (selection - 1) + return selection - 1 + def select_action(): - print ("a) See available commands\n" - "b) Start application\n" - "c) Stop application\n" - "d) Send Custom Guest Science command\n" - "e) Go back to apps menu\n" - "f) Exit") - - option = raw_input("\nType an option: ") + print( + "a) See available commands\n" + "b) Start application\n" + "c) Stop application\n" + "d) Send Custom Guest Science command\n" + "e) Go back to apps menu\n" + "f) Exit" + ) + + option = eval(input("\nType an option: ")) return option + def input_thread(a_list): - raw_input() + eval(input()) a_list.append(True) def print_gs_feedback(): global data_response - print 'Waiting for feedback (command execution).\n' + print("Waiting for feedback (command execution).\n") # Print ACK while new_ack == False: time.sleep(0.5) - print '> Execution response' - if (ack_response.status.status == AckStatus.COMPLETED and - ack_response.completed_status.status == AckCompletedStatus.OK): - print " Execution was successful!\n" + print("> Execution response") + if ( + ack_response.status.status == AckStatus.COMPLETED + and ack_response.completed_status.status == AckCompletedStatus.OK + ): + print(" Execution was successful!\n") else: - print " Something went wrong\n" - print ack_response.message + print(" Something went wrong\n") + print((ack_response.message)) # Print GS Data - print 'Waiting for feedback (app response).\n' - print ("Please note that some apps may send a confirmation when receiving" - " a new command and then data feedback. Since we cannot know when" - " the app will send feedback, we will listen until you manually" - " stop it.\n You can stop listening by pressing ENTER") + print("Waiting for feedback (app response).\n") + print( + "Please note that some apps may send a confirmation when receiving" + " a new command and then data feedback. Since we cannot know when" + " the app will send feedback, we will listen until you manually" + " stop it.\n You can stop listening by pressing ENTER" + ) # Variable and thread used to stop the loop a_list = [] - thread.start_new_thread(input_thread, (a_list,)) + _thread.start_new_thread(input_thread, (a_list,)) while not a_list: if not data_response.isEmpty(): qsize = data_response.size() data = data_response.items[qsize - 1] if data.apk_name == current_app.apk_name: - print ('\n> Data response\n Topic: ' + data.topic + - '\n Data: ' + str(data.data)) + print( + ( + "\n> Data response\n Topic: " + + data.topic + + "\n Data: " + + str(data.data) + ) + ) data_response.dequeue() def execute_action(selection): final_act = None - print '\nYou selected ' + apps[selection].short_name + '. Choose an option\n' + print(("\nYou selected " + apps[selection].short_name + ". Choose an option\n")) option = select_action() arg = CommandArg() arg.data_type = 5 arg.s = apps[selection].apk_name - command = "" # for when an the user supplies the command index - command_str = "" # For when the user enters the actual command - - if option == 'a': + command = "" # for when an the user supplies the command index + command_str = "" # For when the user enters the actual command + + if option == "a": # List commands clear() print_app_cmd(selection) final_act = ACTION_CONTINUE - elif option == 'b': + elif option == "b": # Start app clear() - send_command('startGuestScience', [arg]) + send_command("startGuestScience", [arg]) print_gs_feedback() final_act = ACTION_CONTINUE - elif option == 'c': + elif option == "c": # Stop app clear() if state.runningApks[selection] == False: - print '\n > App already stopped' + print("\n > App already stopped") else: - send_command('stopGuestScience', [arg]) + send_command("stopGuestScience", [arg]) print_gs_feedback() final_act = ACTION_CONTINUE - elif option == 'd': + elif option == "d": # Execute command command = None while True: clear() num_cmds = len(apps[selection].commands) print_app_cmd(selection) - print str(num_cmds + 1) + ') Exit program' + print((str(num_cmds + 1) + ") Exit program")) - command_str = raw_input('\nType the number of the command you wish to send or the command string: ') + command_str = eval( + input( + "\nType the number of the command you wish to send or the command string: " + ) + ) try: # Convert to an integer, the number of the command command = int(command_str) @@ -374,18 +431,18 @@ def execute_action(selection): # Not an integer if command_str is None or command_str == "": # The user did not enter anything at all, so try again - print ' > Invalid entry' + print(" > Invalid entry") time.sleep(1) continue else: # Pass the full command and exit the loop break - + if command == num_cmds + 1: return ACTION_EXIT if command < 1 or command > len(apps[selection].commands): - print ' > Invalid entry' + print(" > Invalid entry") time.sleep(1) else: command -= 1 @@ -398,40 +455,43 @@ def execute_action(selection): arg2.s = apps[selection].commands[command].command else: arg2.s = command_str - + clear() - send_command('customGuestScience', [arg, arg2]) + send_command("customGuestScience", [arg, arg2]) print_gs_feedback() final_act = ACTION_CONTINUE - elif option == 'e': + elif option == "e": # Go back final_act = ACTION_GO_BACK - elif option == 'f': + elif option == "f": # Exit final_act = ACTION_EXIT else: - print ' > Invalid entry' + print(" > Invalid entry") final_act = ACTION_CONTINUE if final_act != ACTION_GO_BACK and final_act != ACTION_EXIT: - raw_input("\nPress any key to continue") + eval(input("\nPress any key to continue")) return final_act + def print_app_cmd(selection): - print '\nAvailable commands' + print("\nAvailable commands") for i, cmd in enumerate(apps[selection].commands): - print str(i + 1) + ') ' + cmd.name + '\n\t' + cmd.command + print((str(i + 1) + ") " + cmd.name + "\n\t" + cmd.command)) + def is_ros_running(): try: - rosgraph.Master('/rostopic').getPid() + rosgraph.Master("/rostopic").getPid() except socket.error: return False return True + def main(): global user timer = 0 @@ -439,20 +499,20 @@ def main(): clear() # Check ROS master presence - print ' > Waiting for ROS communication...\n' + print(" > Waiting for ROS communication...\n") while not is_ros_running(): if timer == 0: - print ' > Are you running Astrobee Robot Software?\n' + print(" > Are you running Astrobee Robot Software?\n") elif timer == 30: - print ' > Timeout. Shutting down...' + print(" > Timeout. Shutting down...") time.sleep(1) exit() timer += 1 time.sleep(1) - print ' > ROS Master has been found!\n' + print(" > ROS Master has been found!\n") # Get the user user = get_user() @@ -462,14 +522,16 @@ def main(): # Grab control if gain_control(): - print '\nCongrats! You are now the Astrobee controller\n' + print("\nCongrats! You are now the Astrobee controller\n") else: - print '\nUnable to grab control of Astrobee. Shutting down...' + print("\nUnable to grab control of Astrobee. Shutting down...") exit() # Get info from Guest Science Manager if not get_apk_info(): - print '\nUnable to communicate with the Guest Science Manager. Shutting down...' + print( + "\nUnable to communicate with the Guest Science Manager. Shutting down..." + ) exit() time.sleep(3) @@ -497,10 +559,12 @@ def main(): if return_val == ACTION_EXIT: break + def handler(signum, frame): - print '\nShutting down gracefully...' + print("\nShutting down gracefully...") exit() -if __name__ == '__main__': + +if __name__ == "__main__": signal.signal(signal.SIGINT, handler) main() diff --git a/tools/gnc_visualizer/dds/reader.py b/tools/gnc_visualizer/dds/reader.py index f9ced58094..9516019ab2 100644 --- a/tools/gnc_visualizer/dds/reader.py +++ b/tools/gnc_visualizer/dds/reader.py @@ -5,29 +5,27 @@ ############################################################################## """Samples's reader.""" -from __future__ import print_function -from sys import path as sysPath + from os import path as osPath +from sys import path as sysPath from time import sleep + filepath = osPath.dirname(osPath.realpath(__file__)) sysPath.append(filepath + "../") import rticonnextdds_connector as rti - - -connector = rti.Connector("MyParticipantLibrary::Zero", - filepath + "/PositionTest.xml") +connector = rti.Connector("MyParticipantLibrary::Zero", filepath + "/PositionTest.xml") inputDDS = connector.getInput("MySubscriber::MyTextReader") for i in range(1, 500): print("i loop") inputDDS.take() numOfSamples = inputDDS.samples.getLength() - for j in range(1, numOfSamples+1): - print("j loop") + for j in range(1, numOfSamples + 1): + print("j loop") if inputDDS.infos.isValid(j): # Or you can just access the field directly - pose = inputDDS.samples.getDictionary(j) + pose = inputDDS.samples.getDictionary(j) toPrint = "pose: %s" % repr(pose) print(toPrint) diff --git a/tools/gnc_visualizer/dds/writer.py b/tools/gnc_visualizer/dds/writer.py index 9d5c978f1c..4d48f99160 100644 --- a/tools/gnc_visualizer/dds/writer.py +++ b/tools/gnc_visualizer/dds/writer.py @@ -5,15 +5,15 @@ ############################################################################## """Samples's writer.""" -from sys import path as sysPath from os import path as osPath +from sys import path as sysPath from time import sleep + filepath = osPath.dirname(osPath.realpath(__file__)) sysPath.append(filepath + "../") import rticonnextdds_connector as rti -connector = rti.Connector("MyParticipantLibrary::Zero", - filepath + "/PositionTest.xml") +connector = rti.Connector("MyParticipantLibrary::Zero", filepath + "/PositionTest.xml") outputDDS = connector.getOutput("MyPublisher::MyTextWriter") for i in range(1, 500): diff --git a/tools/gnc_visualizer/scripts/communications/com_manager.py b/tools/gnc_visualizer/scripts/communications/com_manager.py index b69cd9dbc2..749a947d23 100644 --- a/tools/gnc_visualizer/scripts/communications/com_manager.py +++ b/tools/gnc_visualizer/scripts/communications/com_manager.py @@ -17,11 +17,13 @@ # License for the specific language governing permissions and limitations # under the License. -import configuration_support as configuration import sys -DDS_COM = 'dds' -ROS_COM = 'ros' +import configuration_support as configuration + +DDS_COM = "dds" +ROS_COM = "ros" + class ComManager: default_com_method = ROS_COM @@ -33,11 +35,11 @@ class ComManager: executor = None config = configuration.Preferences() - reset_ekf = lambda self : self.executor.reset_ekf() - initialize_bias = lambda self : self.executor.initialize_bias() - toggle_pmc = lambda self, current_value : self.executor.toggle_pmc(current_value) + reset_ekf = lambda self: self.executor.reset_ekf() + initialize_bias = lambda self: self.executor.initialize_bias() + toggle_pmc = lambda self, current_value: self.executor.toggle_pmc(current_value) - def __init__(self, com_method = None): + def __init__(self, com_method=None): if com_method != None: self.set_com_method(self.com_method) @@ -69,7 +71,7 @@ def was_shutdown(self): return com.is_shutdown() - def set_com_method(self, com_method, args = None): + def set_com_method(self, com_method, args=None): if com_method == DDS_COM: self.current_com_method = com_method partition_name = None @@ -87,12 +89,20 @@ def set_com_method(self, com_method, args = None): if "public_ip" in args: public_ip = args["public_ip"] - if self.config.set_preferences(partition_name, given_peer, domain, public_ip): + if self.config.set_preferences( + partition_name, given_peer, domain, public_ip + ): # Print result - print >> sys.stderr, (self.config.get_all_warnings() + self.config.get_all_info()) + print >>sys.stderr, ( + self.config.get_all_warnings() + self.config.get_all_info() + ) else: # Print result and exit - print >> sys.stderr, (self.config.get_all_errors() + self.config.get_all_warnings() + self.config.get_all_info()) + print >>sys.stderr, ( + self.config.get_all_errors() + + self.config.get_all_warnings() + + self.config.get_all_info() + ) return False elif com_method == ROS_COM: @@ -101,45 +111,101 @@ def set_com_method(self, com_method, args = None): self.current_com_method = self.default_com_method global com - com = __import__(self.current_com_method + '_support', globals(), locals()) + com = __import__(self.current_com_method + "_support", globals(), locals()) return True def __start_ros_com(self): - self.subs_manager = com.RosSubscriberManager('gnc_visualizer', self.viz.print_to_log) - self.subs_manager.add_subscriber('rosout', "/rosout", self.viz.log_callback, com.Log) - self.subs_manager.add_subscriber('truth', "/loc/truth", self.viz.ground_truth_callback, com.PoseStamped) - self.subs_manager.add_subscriber('ml_pose', "/sparse_mapping/pose", self.viz.ml_pose_callback, com.PoseStamped) - self.subs_manager.add_subscriber('ekf', "/gnc/ekf", self.viz.ekf_callback, com.EkfState) - self.subs_manager.add_subscriber('ctl_cmd', "/gnc/ctl/command", self.viz.command_callback, com.FamCommand) - self.subs_manager.add_subscriber('ctl_traj', "/gnc/ctl/traj", self.viz.traj_callback, com.ControlState) - self.subs_manager.add_subscriber('ctl_shaper', "/gnc/ctl/shaper", self.viz.shaper_callback, \ - com.ControlState) - self.subs_manager.add_subscriber('pmc_cmd', "/hw/pmc/command", self.viz.pmc_callback, com.PmcCommand) + self.subs_manager = com.RosSubscriberManager( + "gnc_visualizer", self.viz.print_to_log + ) + self.subs_manager.add_subscriber( + "rosout", "/rosout", self.viz.log_callback, com.Log + ) + self.subs_manager.add_subscriber( + "truth", "/loc/truth", self.viz.ground_truth_callback, com.PoseStamped + ) + self.subs_manager.add_subscriber( + "ml_pose", + "/sparse_mapping/pose", + self.viz.ml_pose_callback, + com.PoseStamped, + ) + self.subs_manager.add_subscriber( + "ekf", "/gnc/ekf", self.viz.ekf_callback, com.EkfState + ) + self.subs_manager.add_subscriber( + "ctl_cmd", "/gnc/ctl/command", self.viz.command_callback, com.FamCommand + ) + self.subs_manager.add_subscriber( + "ctl_traj", "/gnc/ctl/traj", self.viz.traj_callback, com.ControlState + ) + self.subs_manager.add_subscriber( + "ctl_shaper", "/gnc/ctl/shaper", self.viz.shaper_callback, com.ControlState + ) + self.subs_manager.add_subscriber( + "pmc_cmd", "/hw/pmc/command", self.viz.pmc_callback, com.PmcCommand + ) self.executor = com.RosCommandExecutor() def __start_dds_com(self): self.subs_manager = com.DdsSubscriberManager() self.subs_manager.add_subscriber( - "sparse_mapping_pose_sub", com.DdsSubscriber( - "SparseMappingPoseStampedSubscriber::SparseMappingPoseStampedReader", self.viz.ml_pose_callback, com.PoseStamped), True) + "sparse_mapping_pose_sub", + com.DdsSubscriber( + "SparseMappingPoseStampedSubscriber::SparseMappingPoseStampedReader", + self.viz.ml_pose_callback, + com.PoseStamped, + ), + True, + ) self.subs_manager.add_subscriber( - "ekf_sub", com.DdsSubscriber( - "EkfSubscriber::EkfReader", self.viz.ekf_callback, com.EkfState), True) + "ekf_sub", + com.DdsSubscriber( + "EkfSubscriber::EkfReader", self.viz.ekf_callback, com.EkfState + ), + True, + ) self.subs_manager.add_subscriber( - "fam_sub", com.DdsSubscriber( - "FamCmdSubscriber::FamCmdReader", self.viz.command_callback, com.FamCommand), True) + "fam_sub", + com.DdsSubscriber( + "FamCmdSubscriber::FamCmdReader", + self.viz.command_callback, + com.FamCommand, + ), + True, + ) self.subs_manager.add_subscriber( - "shaper_sub", com.DdsSubscriber( - "GncControlStateSubscriber::ShaperReader", self.viz.shaper_callback, com.ControlState), True) + "shaper_sub", + com.DdsSubscriber( + "GncControlStateSubscriber::ShaperReader", + self.viz.shaper_callback, + com.ControlState, + ), + True, + ) self.subs_manager.add_subscriber( - "traj_sub", com.DdsSubscriber( - "GncControlStateSubscriber::TrajReader", self.viz.traj_callback, com.ControlState), True) + "traj_sub", + com.DdsSubscriber( + "GncControlStateSubscriber::TrajReader", + self.viz.traj_callback, + com.ControlState, + ), + True, + ) self.subs_manager.add_subscriber( - "pmc_sub", com.DdsSubscriber( - "PmcCmdSubscriber::PmcReader", self.viz.pmc_callback, com.PmcCommand), True) + "pmc_sub", + com.DdsSubscriber( + "PmcCmdSubscriber::PmcReader", self.viz.pmc_callback, com.PmcCommand + ), + True, + ) self.subs_manager.add_subscriber( - "log_sub", com.DdsSubscriber( - "LogSubscriber::LogReader", self.viz.log_callback, com.Log), True) + "log_sub", + com.DdsSubscriber( + "LogSubscriber::LogReader", self.viz.log_callback, com.Log + ), + True, + ) self.executor = com.DdsCommandExecutor() diff --git a/tools/gnc_visualizer/scripts/communications/configuration_support.py b/tools/gnc_visualizer/scripts/communications/configuration_support.py index cb7caa9e7e..4b89e014e0 100644 --- a/tools/gnc_visualizer/scripts/communications/configuration_support.py +++ b/tools/gnc_visualizer/scripts/communications/configuration_support.py @@ -1,22 +1,26 @@ #!/usr/bin/env python -import xml.dom.minidom as minidom +import datetime import socket import string -import datetime -import ConfigParser +import xml.dom.minidom as minidom +from collections import OrderedDict from os import path as osPath from os import remove -from collections import OrderedDict + +import ConfigParser filepath = osPath.dirname(osPath.realpath(__file__)) -BASE_DDS_PROFILE_FILE=filepath + "/dds_types/BaseDDSProfile.xml" -DDS_PROFILE_FILE=filepath + "/dds_types/CurrentDDSProfile.xml" -CONFIG_FILE=filepath + "/config.ini" +BASE_DDS_PROFILE_FILE = filepath + "/dds_types/BaseDDSProfile.xml" +DDS_PROFILE_FILE = filepath + "/dds_types/CurrentDDSProfile.xml" +CONFIG_FILE = filepath + "/config.ini" + class Preferences: - def __init__(self, partition_name = None, given_peer = None, domain = None, public_ip = None): + def __init__( + self, partition_name=None, given_peer=None, domain=None, public_ip=None + ): self.config = ConfigParser.ConfigParser() self.dom = None self.partition_name = partition_name @@ -29,7 +33,7 @@ def __init__(self, partition_name = None, given_peer = None, domain = None, publ self.info = OrderedDict() def is_valid_ipv4(self, ip): - if ip != None and ip.count('.') == 3: + if ip != None and ip.count(".") == 3: try: socket.inet_aton(ip) except socket.error: @@ -47,19 +51,23 @@ def validate_initial_peer(self, peer): dictionary[peer] = peer return dictionary - self.add_warning("The given peer is NOT a valid IP, valid profiles " + - "will be checked") + self.add_warning( + "The given peer is NOT a valid IP, valid profiles " + "will be checked" + ) # Check if input is a valid profile for key, value in self.initial_peers.items(): if string.lower(peer) == key: - self.add_warning("The given peer IS a valid profile. IP: " + - value + " will be used.") + self.add_warning( + "The given peer IS a valid profile. IP: " + value + " will be used." + ) dictionary[key] = value return dictionary - self.add_warning("The given peer is NOT a valid profile, IPs from " + - "config file will be used") + self.add_warning( + "The given peer is NOT a valid profile, IPs from " + + "config file will be used" + ) return None def validate_public_ip(self, ip): @@ -69,8 +77,8 @@ def validate_public_ip(self, ip): return -1 def get_robot_name(self): - if self.partition_name == None: # Robot name not given at start - self.partition_name = self.config.get('RobotName', 'name') + if self.partition_name == None: # Robot name not given at start + self.partition_name = self.config.get("RobotName", "name") if self.partition_name.strip() == "": self.partition_name = None @@ -79,14 +87,14 @@ def get_robot_name(self): def get_initial_peers(self): if len(self.initial_peers) == 0: - peers_items = self.config.items('DiscoveryPeers') + peers_items = self.config.items("DiscoveryPeers") for key, ip in peers_items: if key.strip() != "" and ip.strip() != "" and self.is_valid_ipv4(ip): self.initial_peers[key] = ip else: self.add_warning("There are unvalid profile/IPs in the config file") - if self.given_peer != None: # Robot IP was given at start + if self.given_peer != None: # Robot IP was given at start validated_peer = self.validate_initial_peer(self.given_peer) if validated_peer != None: self.initial_peers.clear() @@ -102,17 +110,19 @@ def get_initial_peers(self): def get_domain_id(self): fallback = False - if self.domain == None: # Domain id not given at start + if self.domain == None: # Domain id not given at start # Read from config file - self.domain = self.config.get('DdsDomain', 'domain') + self.domain = self.config.get("DdsDomain", "domain") self.add_warning("Domain ID will be read from config file") fallback = True if not self.domain.strip().isdigit(): self.domain = None if not fallback: - self.add_warning("Argument Domain ID is not valid, we will " + - "fallback to the value in the configuration file") + self.add_warning( + "Argument Domain ID is not valid, we will " + + "fallback to the value in the configuration file" + ) self.get_domain_id() return self.domain @@ -121,11 +131,11 @@ def get_public_ip(self): returnValue = None # Get ip from config file and put in a temp variable - tmp_ip = self.config.get('TRek', 'public_ip') + tmp_ip = self.config.get("TRek", "public_ip") if self.public_ip == None: # IP not given at startup if tmp_ip == None or tmp_ip == "": - pass # No public IP anywhere, ignore quietly + pass # No public IP anywhere, ignore quietly else: # Validate config file value returnValue = self.validate_public_ip(tmp_ip) @@ -147,8 +157,11 @@ def write_node_list(self, node_list, sub_tag_name, children): children = [children] if isinstance(children, basestring) else children for child in children: new_node = node_list.ownerDocument.createElement(sub_tag_name) - new_node.appendChild(new_node.ownerDocument.createTextNode( - children[child] if isinstance(children, dict) else child)) + new_node.appendChild( + new_node.ownerDocument.createTextNode( + children[child] if isinstance(children, dict) else child + ) + ) node_list.appendChild(new_node) def override_node_children(self, node_list, sub_tag_name, children): @@ -156,44 +169,49 @@ def override_node_children(self, node_list, sub_tag_name, children): self.write_node_list(node_list, sub_tag_name, children) def write_xml_file(self, file_out): - self.dom.writexml( open(file_out, 'w'), - indent="", - addindent="", - newl='') + self.dom.writexml(open(file_out, "w"), indent="", addindent="", newl="") def replace_initial_peers(self): new_peers = self.get_initial_peers() if new_peers == None or len(new_peers) == 0: - self.add_error("No valid robot IPs were provided from arguments " + - "neither config file. We cannot continue. Please review your " + - "arguments and/or configuration file") + self.add_error( + "No valid robot IPs were provided from arguments " + + "neither config file. We cannot continue. Please review your " + + "arguments and/or configuration file" + ) return False else: - self.override_node_children(self.dom.getElementsByTagName( - 'initial_peers')[0],'element', new_peers) + self.override_node_children( + self.dom.getElementsByTagName("initial_peers")[0], "element", new_peers + ) return True def replace_partition_name(self): new_name = self.get_robot_name() if new_name == None: - self.add_error("Robot name is empty. We cannot continue. " + - "Please review your arguments and/or configuration file") + self.add_error( + "Robot name is empty. We cannot continue. " + + "Please review your arguments and/or configuration file" + ) return False else: subscriber_partitions = self.dom.getElementsByTagName( - 'domain_participant_library')[0].getElementsByTagName('name') + "domain_participant_library" + )[0].getElementsByTagName("name") for sub_part in subscriber_partitions: - self.override_node_children(sub_part, 'element', new_name) + self.override_node_children(sub_part, "element", new_name) return True def replace_domain_id(self): new_domain = self.get_domain_id() if new_domain == None: - self.add_error("Domain is not valid. No valid domain ids were " + - "found in the arguments or config file") + self.add_error( + "Domain is not valid. No valid domain ids were " + + "found in the arguments or config file" + ) return False else: - domain_node = self.dom.getElementsByTagName('domain')[0] + domain_node = self.dom.getElementsByTagName("domain")[0] domain_node.attributes["domain_id"].value = new_domain return True @@ -202,30 +220,39 @@ def insert_public_ip(self): if new_public_ip == None: return True elif new_public_ip == -1: - self.add_error("Public IP is not a valid IPv4. We cannot continue." + - " Please review your arguments and/or configuration file") + self.add_error( + "Public IP is not a valid IPv4. We cannot continue." + + " Please review your arguments and/or configuration file" + ) return False else: - parents = self.dom.getElementsByTagName('qos_library') + parents = self.dom.getElementsByTagName("qos_library") for child in parents: if child.getAttribute("name") == "RapidQosLibrary": - parent = child.getElementsByTagName('property')[0] \ - .getElementsByTagName("value")[0] + parent = child.getElementsByTagName("property")[ + 0 + ].getElementsByTagName("value")[0] node_element = parent.ownerDocument.createElement("element") n_name = node_element.ownerDocument.createElement("name") - n_name.appendChild(n_name.ownerDocument.createTextNode( \ - "dds.transport.UDPv4.builtin.public_address")) + n_name.appendChild( + n_name.ownerDocument.createTextNode( + "dds.transport.UDPv4.builtin.public_address" + ) + ) n_value = node_element.ownerDocument.createElement("value") - n_value.appendChild(n_value.ownerDocument \ - .createTextNode(new_public_ip)) + n_value.appendChild( + n_value.ownerDocument.createTextNode(new_public_ip) + ) node_element.appendChild(n_name) node_element.appendChild(n_value) sibling_node = None elements = parent.getElementsByTagName("element") for element in elements: - if element.getElementsByTagName('name')[0].firstChild.nodeValue \ - == "dds.transport.UDPv4.builtin.parent.message_size_max": + if ( + element.getElementsByTagName("name")[0].firstChild.nodeValue + == "dds.transport.UDPv4.builtin.parent.message_size_max" + ): sibling_node = element break @@ -253,7 +280,9 @@ def get_info(self): return self.info def get_all_warnings(self): - warnings_text = "\nThe configuration proccess produced the following warnings:\n" + warnings_text = ( + "\nThe configuration proccess produced the following warnings:\n" + ) for key, value in self.warn.items(): warnings_text += "\n" + key + " : " + value warnings_text += "\n ----" @@ -275,15 +304,15 @@ def get_all_info(self): def validate_config_file(self): return ( - self.config.sections() and - self.config.has_section("RobotName") and - self.config.has_section("DiscoveryPeers") and - self.config.has_section("DdsDomain") and - self.config.has_section("TRek") and - self.config.has_option("RobotName", "name") and - self.config.options("DiscoveryPeers") and - self.config.has_option("DdsDomain", "domain") and - self.config.has_option("TRek", "public_ip") + self.config.sections() + and self.config.has_section("RobotName") + and self.config.has_section("DiscoveryPeers") + and self.config.has_section("DdsDomain") + and self.config.has_section("TRek") + and self.config.has_option("RobotName", "name") + and self.config.options("DiscoveryPeers") + and self.config.has_option("DdsDomain", "domain") + and self.config.has_option("TRek", "public_ip") ) def destroy_dom(self): @@ -291,8 +320,9 @@ def destroy_dom(self): self.dom.unlink() remove(DDS_PROFILE_FILE) - def set_preferences(self, partition_name = None, given_peer = None, - domain = None, public_ip = None): + def set_preferences( + self, partition_name=None, given_peer=None, domain=None, public_ip=None + ): # Override preferences with argument values if partition_name != None: @@ -308,8 +338,9 @@ def set_preferences(self, partition_name = None, given_peer = None, try: self.dom = minidom.parse(BASE_DDS_PROFILE_FILE) except Exception as e: - self.add_error("DDS profile was NOT found or is corrupted." + - " We cannot continue.") + self.add_error( + "DDS profile was NOT found or is corrupted." + " We cannot continue." + ) self.add_info("Configuration process failed. See warnings and errors") return False @@ -317,25 +348,34 @@ def set_preferences(self, partition_name = None, given_peer = None, self.config.read(CONFIG_FILE) if not self.validate_config_file(): - self.add_error("""Config file was NOT found or is corrupted. - We cannot continue""") + self.add_error( + """Config file was NOT found or is corrupted. + We cannot continue""" + ) self.add_info("Configuration process failed. See warnings and errors") return False - if not (self.replace_initial_peers() and - self.replace_partition_name() and - self.replace_domain_id() and - self.insert_public_ip()): - self.add_error("DDS Profile could not be configured." + - " We cannot continue") + if not ( + self.replace_initial_peers() + and self.replace_partition_name() + and self.replace_domain_id() + and self.insert_public_ip() + ): + self.add_error( + "DDS Profile could not be configured." + " We cannot continue" + ) self.add_info("Configuration process failed. See warnings and errors") return False self.write_xml_file(DDS_PROFILE_FILE) - info_text = "Configuration process was SUCCESSFUL. Following values" + \ - " will be used:" + "\n\nRobot Name: " + self.get_robot_name() + \ - "\nInitial Peers:\n" + info_text = ( + "Configuration process was SUCCESSFUL. Following values" + + " will be used:" + + "\n\nRobot Name: " + + self.get_robot_name() + + "\nInitial Peers:\n" + ) for key, value in self.get_initial_peers().items(): info_text += " - " + value + "\n" @@ -348,9 +388,10 @@ def set_preferences(self, partition_name = None, given_peer = None, self.add_info(info_text) return True + # Usage -#config = Preferences() -#config.set_preferences() -#print config.get_all_warnings() -#print config.get_all_errors() -#print config.get_all_info() +# config = Preferences() +# config.set_preferences() +# print config.get_all_warnings() +# print config.get_all_errors() +# print config.get_all_info() diff --git a/tools/gnc_visualizer/scripts/communications/data_support.py b/tools/gnc_visualizer/scripts/communications/data_support.py index 2f5156ca30..67d8f1caad 100644 --- a/tools/gnc_visualizer/scripts/communications/data_support.py +++ b/tools/gnc_visualizer/scripts/communications/data_support.py @@ -19,66 +19,80 @@ import numpy as np + def todict(obj): if hasattr(obj, "__iter__"): return [todict(v) for v in obj] elif hasattr(obj, "__dict__"): - return dict([(key, todict(value)) - for key, value in obj.__dict__.iteritems() - if not callable(value) and not key.startswith('_')]) + return dict( + [ + (key, todict(value)) + for key, value in obj.__dict__.iteritems() + if not callable(value) and not key.startswith("_") + ] + ) else: return obj + class Common: def asDict(self): return todict(self) class Header(Common): - def __init__(self, seq = None, stamp = None, frame_id = None): + def __init__(self, seq=None, stamp=None, frame_id=None): self.seq = seq self.stamp = stamp self.frame_id = frame_id + class Point(Common): - def __init__(self, x = 0.0, y = 0.0, z = 0.0): + def __init__(self, x=0.0, y=0.0, z=0.0): self.x = x self.y = y self.z = z + class Quaternion(Common): - def __init__(self, x = 0.0, y = 0.0, z = 0.0, w = 0.0): + def __init__(self, x=0.0, y=0.0, z=0.0, w=0.0): self.x = x self.y = y self.z = z self.w = w + class Pose(Common): - def __init__(self, position = Point(), orientation = Quaternion()): + def __init__(self, position=Point(), orientation=Quaternion()): self.position = position self.orientation = orientation + class Vector3(Common): - def __init__(self, x = 0.0, y = 0.0, z = 0.0): + def __init__(self, x=0.0, y=0.0, z=0.0): self.x = x self.y = y self.z = z + class Wrench(Common): - def __init__(self, force = Vector3(), torque = Vector3()): + def __init__(self, force=Vector3(), torque=Vector3()): self.force = force self.torque = torque + class Twist(Common): - def __init__(self, linear = Vector3(), angular = Vector3()): + def __init__(self, linear=Vector3(), angular=Vector3()): self.linear = linear self.angular = angular + class PoseStamped(Common): def __init__(self): self.header = Header() self.pose = Pose() + class EkfState(Common): def __init__(self): self.header = Header() @@ -89,7 +103,7 @@ def __init__(self): self.gyro_bias = Vector3() self.accel = Vector3() self.accel_bias = Vector3() - #self.cov_diag = [0.0] * 15 + # self.cov_diag = [0.0] * 15 self.cov_diag = np.zeros(15) self.confidence = 0 self.aug_state_enum = 0 @@ -97,33 +111,34 @@ def __init__(self): self.of_count = 0 self.ml_count = 0 self.hr_global_pose = Pose() - #self.ml_mahal_dists = [0.0] * 50 + # self.ml_mahal_dists = [0.0] * 50 self.ml_mahal_dists = np.zeros(50) class PmcCommand(Common): - def __init__(self, header = Header()): + def __init__(self, header=Header()): self.header = header self.goals = [] class PmcGoal(Common): - def __init__(self, motor_speed = None): + def __init__(self, motor_speed=None): self.motor_speed = motor_speed self.nozzle_positions = [] def to_dict(self): dic = dict() - dic['hdr'] = self.header - dic['goals'] = [] + dic["hdr"] = self.header + dic["goals"] = [] for i in range(0, len(self.goals)): - goal = {'motorSpeed': self.goals[i].motor_speed, 'nozzlePositions': []} + goal = {"motorSpeed": self.goals[i].motor_speed, "nozzlePositions": []} for j in range(0, len(self.goals[i].nozzle_positions)): - goal['nozzlePositions'].append(self.goals[i].nozzle_positions[j]) - dic['goals'].append(goal) + goal["nozzlePositions"].append(self.goals[i].nozzle_positions[j]) + dic["goals"].append(goal) return dic + class FamCommand(Common): def __init__(self): self.header = Header() @@ -138,6 +153,7 @@ def __init__(self): self.attitude_error_mag = 0.0 self.control_mode = 0 + class ControlState(Common): def __init__(self): self.when = None @@ -145,6 +161,7 @@ def __init__(self): self.twist = Twist() self.accel = Twist() + class Log(Common): def __init__(self): self.header = Header() diff --git a/tools/gnc_visualizer/scripts/communications/dds_support.py b/tools/gnc_visualizer/scripts/communications/dds_support.py index 7fae7e33c5..e4a06b8beb 100644 --- a/tools/gnc_visualizer/scripts/communications/dds_support.py +++ b/tools/gnc_visualizer/scripts/communications/dds_support.py @@ -17,31 +17,46 @@ # License for the specific language governing permissions and limitations # under the License. -from data_support import PmcCommand, PoseStamped, EkfState, FamCommand, ControlState, Quaternion, Vector3, Log - -from sys import path as sysPath from os import path as osPath +from sys import path as sysPath from time import sleep -from pkg_resources import parse_version, get_distribution + +from data_support import ( + ControlState, + EkfState, + FamCommand, + Log, + PmcCommand, + PoseStamped, + Quaternion, + Vector3, +) +from pkg_resources import get_distribution, parse_version try: import rticonnextdds_connector as rti except Exception as e: - raise ImportError("\nYou haven't installed a needed RTI DDS library. \n\nFirst, ensure you have" + - " pip installed:\n\n\tsudo apt-get install python-pip\n\n" + - "Then install the library:\n\n\tpip install rticonnextdds-connector\n") + raise ImportError( + "\nYou haven't installed a needed RTI DDS library. \n\nFirst, ensure you have" + + " pip installed:\n\n\tsudo apt-get install python-pip\n\n" + + "Then install the library:\n\n\tpip install rticonnextdds-connector\n" + ) -import threading import math +import threading filepath = osPath.dirname(osPath.realpath(__file__)) -connector = rti.Connector("MyParticipantLibrary::Zero", filepath + "/dds_types/CurrentDDSProfile.xml") +connector = rti.Connector( + "MyParticipantLibrary::Zero", filepath + "/dds_types/CurrentDDSProfile.xml" +) rti_version = get_distribution("rticonnextdds-connector").version -is_old_version = parse_version(rti_version) < parse_version('0.4.1') +is_old_version = parse_version(rti_version) < parse_version("0.4.1") + def is_shutdown(): return False + class DdsSubscriberManager: subscribers = dict() sem = None @@ -57,7 +72,6 @@ def add_subscriber(self, key, subscriber, auto_start): else: self.subscribers[key].sem = self.sem - def get_subscriber(self, key): return self.subscribers.get(key, None) @@ -81,6 +95,7 @@ def start_all(self): for key, value in self.subscribers.itemitems(): value.start_sync(self.sem) + class DdsSubscriber(threading.Thread): inputDDS = None @@ -105,8 +120,11 @@ def run(self): self.sem.acquire(True) connector.wait(self.timeout) self.inputDDS.take() - numOfSamples = (self.inputDDS.samples.getLength() if not is_old_version - else self.inputDDS.samples.getLength() + 1) + numOfSamples = ( + self.inputDDS.samples.getLength() + if not is_old_version + else self.inputDDS.samples.getLength() + 1 + ) for j in range(self.start_index, numOfSamples): if self.inputDDS.infos.isValid(j): @@ -122,27 +140,28 @@ def start_sync(self, sem): self.start() def stop(self): - self.stopper.set() + self.stopper.set() + class Dict2RosMsgTranslator: supported_ros_types = { - PoseStamped: lambda self, data: self.__dictionary_to_pose_stamped_msg(data), - EkfState: lambda self, data: self.__dictionary_to_ekf_msg(data), - FamCommand: lambda self, data: self.__dictionary_to_fam_msg(data), - ControlState: lambda self, data: self.__dictionary_to_control_msg(data), - PmcCommand: lambda self, data: self.__dictionary_to_pmc_msg(data), - Log: lambda self, data: self.__dictionary_to_log_msg(data) + PoseStamped: lambda self, data: self.__dictionary_to_pose_stamped_msg(data), + EkfState: lambda self, data: self.__dictionary_to_ekf_msg(data), + FamCommand: lambda self, data: self.__dictionary_to_fam_msg(data), + ControlState: lambda self, data: self.__dictionary_to_control_msg(data), + PmcCommand: lambda self, data: self.__dictionary_to_pmc_msg(data), + Log: lambda self, data: self.__dictionary_to_log_msg(data), } ros_type = None def __init__(self, ros_type): - self.ros_type = ros_type + self.ros_type = ros_type def translate(self, dictionary): - ros_msg = self.supported_ros_types[self.ros_type](self, dictionary) - return ros_msg + ros_msg = self.supported_ros_types[self.ros_type](self, dictionary) + return ros_msg def __array_to_vector3d(self, vector_array): vector3d = Vector3() @@ -164,114 +183,119 @@ def __array_to_quaternion(self, rot_array): def __dictionary_to_control_msg(self, dic): msg = ControlState() - msg.when = dic['hdr']['timeStamp'] * 1000 + msg.when = dic["hdr"]["timeStamp"] * 1000 - msg.pose.position = self.__array_to_vector3d(dic['pose']['xyz']) - msg.pose.orientation = self.__array_to_quaternion(dic['pose']['rot']) + msg.pose.position = self.__array_to_vector3d(dic["pose"]["xyz"]) + msg.pose.orientation = self.__array_to_quaternion(dic["pose"]["rot"]) - msg.twist.linear = self.__array_to_vector3d(dic['twist']['linear']) - msg.twist.angular = self.__array_to_vector3d(dic['twist']['angular']) + msg.twist.linear = self.__array_to_vector3d(dic["twist"]["linear"]) + msg.twist.angular = self.__array_to_vector3d(dic["twist"]["angular"]) - msg.accel.linear = self.__array_to_vector3d(dic['accel']['linear']) - msg.accel.angular = self.__array_to_vector3d(dic['accel']['angular']) + msg.accel.linear = self.__array_to_vector3d(dic["accel"]["linear"]) + msg.accel.angular = self.__array_to_vector3d(dic["accel"]["angular"]) return msg def __dictionary_to_fam_msg(self, dic): msg = FamCommand() - msg.header.stamp = dic['hdr']['timeStamp'] - msg.wrench.force = self.__array_to_vector3d(dic['wrench']['force']) - msg.wrench.torque = self.__array_to_vector3d(dic['wrench']['torque']) - msg.accel = self.__array_to_vector3d(dic['accel']) - msg.alpha = self.__array_to_vector3d(dic['alpha']) - msg.status = dic['status'] - msg.position_error = self.__array_to_vector3d(dic['position_error']) - msg.position_error_integrated = self.__array_to_vector3d(dic['position_error_integrated']) - msg.attitude_error = self.__array_to_vector3d(dic['attitude_error']) - msg.attitude_error_integrated = self.__array_to_vector3d(dic['attitude_error_integrated']) - msg.attitude_error_mag = dic['attitude_error_mag'] - msg.control_mode = dic['control_mode'] - - #print msg.asDict() + msg.header.stamp = dic["hdr"]["timeStamp"] + msg.wrench.force = self.__array_to_vector3d(dic["wrench"]["force"]) + msg.wrench.torque = self.__array_to_vector3d(dic["wrench"]["torque"]) + msg.accel = self.__array_to_vector3d(dic["accel"]) + msg.alpha = self.__array_to_vector3d(dic["alpha"]) + msg.status = dic["status"] + msg.position_error = self.__array_to_vector3d(dic["position_error"]) + msg.position_error_integrated = self.__array_to_vector3d( + dic["position_error_integrated"] + ) + msg.attitude_error = self.__array_to_vector3d(dic["attitude_error"]) + msg.attitude_error_integrated = self.__array_to_vector3d( + dic["attitude_error_integrated"] + ) + msg.attitude_error_mag = dic["attitude_error_mag"] + msg.control_mode = dic["control_mode"] + + # print msg.asDict() return msg - def __dictionary_to_pose_stamped_msg(self, dic): msg = PoseStamped() - msg.header.stamp = dic['hdr']['timeStamp'] - msg.pose.position = self.__array_to_vector3d(dic['pose']['xyz']) - msg.pose.orientation = self.__array_to_quaternion(dic['pose']['rot']) + msg.header.stamp = dic["hdr"]["timeStamp"] + msg.pose.position = self.__array_to_vector3d(dic["pose"]["xyz"]) + msg.pose.orientation = self.__array_to_quaternion(dic["pose"]["rot"]) return msg - def __dictionary_to_ekf_msg(self, dic): - #print dic + # print dic msg = EkfState() - msg.header.stamp = dic['hdr']['timeStamp'] + msg.header.stamp = dic["hdr"]["timeStamp"] - msg.pose.position = self.__array_to_vector3d(dic['pose']['xyz']) + msg.pose.position = self.__array_to_vector3d(dic["pose"]["xyz"]) - msg.pose.orientation = self.__array_to_quaternion(dic['pose']['rot']) + msg.pose.orientation = self.__array_to_quaternion(dic["pose"]["rot"]) - msg.velocity = self.__array_to_vector3d(dic['velocity']) - msg.omega = self.__array_to_vector3d(dic['omega']) - msg.gyro_bias = self.__array_to_vector3d(dic['gyro_bias']) - msg.accel = self.__array_to_vector3d(dic['accel']) - msg.accel_bias = self.__array_to_vector3d(dic['accel_bias']) + msg.velocity = self.__array_to_vector3d(dic["velocity"]) + msg.omega = self.__array_to_vector3d(dic["omega"]) + msg.gyro_bias = self.__array_to_vector3d(dic["gyro_bias"]) + msg.accel = self.__array_to_vector3d(dic["accel"]) + msg.accel_bias = self.__array_to_vector3d(dic["accel_bias"]) - for i in range(0, len(dic['cov_diag'])): - msg.cov_diag[i] = dic['cov_diag'][i] + for i in range(0, len(dic["cov_diag"])): + msg.cov_diag[i] = dic["cov_diag"][i] - msg.confidence = dic['confidence'] - msg.status = dic['status'] - msg.of_count = dic['of_count'] - msg.ml_count = dic['ml_count'] + msg.confidence = dic["confidence"] + msg.status = dic["status"] + msg.of_count = dic["of_count"] + msg.ml_count = dic["ml_count"] - msg.hr_global_pose.position = self.__array_to_vector3d(dic['hr_global_pose']['xyz']) + msg.hr_global_pose.position = self.__array_to_vector3d( + dic["hr_global_pose"]["xyz"] + ) - msg.hr_global_pose.orientation = self.__array_to_quaternion(dic['hr_global_pose']['rot']) + msg.hr_global_pose.orientation = self.__array_to_quaternion( + dic["hr_global_pose"]["rot"] + ) - for i in range(0, len(dic['ml_mahal_dists'])): - msg.ml_mahal_dists[i] = dic['ml_mahal_dists'][i] + for i in range(0, len(dic["ml_mahal_dists"])): + msg.ml_mahal_dists[i] = dic["ml_mahal_dists"][i] - #print msg.asDict() + # print msg.asDict() return msg def __dictionary_to_pmc_msg(self, dic): msg = PmcCommand() - msg.header.stamp = dic['hdr']['timeStamp'] + msg.header.stamp = dic["hdr"]["timeStamp"] - for i in range(0, len(dic['goals'])): - goal = PmcCommand.PmcGoal(dic['goals'][i]['motorSpeed']) - for j in range(0, len(dic['goals'][i]['nozzlePositions'])): - goal.nozzle_positions.append(chr(dic['goals'][i]['nozzlePositions'][j])) + for i in range(0, len(dic["goals"])): + goal = PmcCommand.PmcGoal(dic["goals"][i]["motorSpeed"]) + for j in range(0, len(dic["goals"][i]["nozzlePositions"])): + goal.nozzle_positions.append(chr(dic["goals"][i]["nozzlePositions"][j])) msg.goals.append(goal) - #print msg.asDict() + # print msg.asDict() return msg def __dictionary_to_log_msg(self, dic): msg = Log() - msg.header.stamp = dic['hdr']['timeStamp'] - msg.level = math.pow(2, dic['level']) - msg.name = dic['name'] - msg.msg = dic['msg'] - #print msg.asDict() + msg.header.stamp = dic["hdr"]["timeStamp"] + msg.level = math.pow(2, dic["level"]) + msg.name = dic["name"] + msg.msg = dic["msg"] + # print msg.asDict() return msg -class DdsCommandExecutor: +class DdsCommandExecutor: def __init__(self): pass def reset_ekf(self): - print "reset_ekf function is not yet implemented on dds" - + print("reset_ekf function is not yet implemented on dds") def initialize_bias(self): - print "initialize_bias function is not yet implemented on dds" + print("initialize_bias function is not yet implemented on dds") def toggle_pmc(self): - print "toggle_pmc function is not yet implemented on dds" + print("toggle_pmc function is not yet implemented on dds") diff --git a/tools/gnc_visualizer/scripts/communications/ros_support.py b/tools/gnc_visualizer/scripts/communications/ros_support.py index 3f93af627a..17bd12f6f9 100644 --- a/tools/gnc_visualizer/scripts/communications/ros_support.py +++ b/tools/gnc_visualizer/scripts/communications/ros_support.py @@ -17,30 +17,32 @@ # License for the specific language governing permissions and limitations # under the License. -import rospy -import rosgraph import os +import signal import socket -import time import subprocess -import signal +import time +import rosgraph +import rospy from ff_hw_msgs.msg import PmcCommand -from ff_msgs.msg import EkfState, FamCommand, ControlState, CommandStamped +from ff_msgs.msg import CommandStamped, ControlState, EkfState, FamCommand from ff_msgs.srv import SetBool from geometry_msgs.msg import PoseStamped from rosgraph_msgs.msg import Log from std_srvs.srv import Empty + def is_shutdown(): return rospy.is_shutdown() + class RosSubscriberManager: subscribers = dict() rosmaster = None logger_function = None - def __init__(self, node_name, logger_function = None): + def __init__(self, node_name, logger_function=None): self.logger_function = logger_function self.__ensure_ros_master() rospy.init_node(node_name, anonymous=False, disable_signals=True) @@ -53,13 +55,18 @@ def get_subscriber(self, key): def __ensure_ros_master(self): try: - rosgraph.Master('/rostopic').getPid() + rosgraph.Master("/rostopic").getPid() except socket.error: if self.logger_function != None: - self.logger_function('Starting roscore.', '#FFB266') + self.logger_function("Starting roscore.", "#FFB266") - self.rosmaster = subprocess.Popen("roscore", preexec_fn=os.setsid, shell=True, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + self.rosmaster = subprocess.Popen( + "roscore", + preexec_fn=os.setsid, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) time.sleep(1) def stop_all(self): @@ -68,31 +75,31 @@ def stop_all(self): os.killpg(os.getpgid(self.rosmaster.pid), signal.SIGINT) rosmaster.wait() -class RosCommandExecutor: +class RosCommandExecutor: def __init__(self): pass def reset_ekf(self): try: - reset = rospy.ServiceProxy('gnc/ekf/reset', Empty) + reset = rospy.ServiceProxy("gnc/ekf/reset", Empty) reset() - except rospy.ServiceException, e: - print "Service call failed: %s" % e + except rospy.ServiceException as e: + print("Service call failed: %s" % e) def initialize_bias(self): try: - initialize = rospy.ServiceProxy('/gnc/ekf/init_bias', Empty) + initialize = rospy.ServiceProxy("/gnc/ekf/init_bias", Empty) initialize() - except rospy.ServiceException, e: - print "Service call failed: %s" % e + except rospy.ServiceException as e: + print(("Service call failed: %s" % e)) def toggle_pmc(self, current_value): try: - pmc_enable = rospy.ServiceProxy('/hw/pmc/enable', SetBool) + pmc_enable = rospy.ServiceProxy("/hw/pmc/enable", SetBool) new_value = not current_value pmc_enable(new_value) return new_value - except rospy.ServiceException, e: - print "Service call failed: %s" % e + except rospy.ServiceException as e: + print(("Service call failed: %s" % e)) return current_value diff --git a/tools/gnc_visualizer/scripts/plot_types.py b/tools/gnc_visualizer/scripts/plot_types.py index 04c325f417..e8ddaf9ffc 100644 --- a/tools/gnc_visualizer/scripts/plot_types.py +++ b/tools/gnc_visualizer/scripts/plot_types.py @@ -2,62 +2,68 @@ # # Copyright (c) 2017, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. -# +# # All rights reserved. -# +# # The Astrobee platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from pyqtgraph.Qt import QtGui, QtCore -import pyqtgraph as pg -import numpy as np - import time -DISPLAY_TIME = 10 +import numpy as np +import pyqtgraph as pg +from pyqtgraph.Qt import QtCore, QtGui -colors = [pg.mkColor(65, 105, 225), - pg.mkColor(255, 100, 100), - pg.mkColor(255, 255, 100), - pg.mkColor(32, 178, 170), - pg.mkColor(153, 50, 204), - pg.mkColor(255, 250, 250) - ] +DISPLAY_TIME = 10 -colors2 = [pg.mkColor(135, 206, 235), - pg.mkColor(255, 160, 122), - pg.mkColor(255, 255, 150)] +colors = [ + pg.mkColor(65, 105, 225), + pg.mkColor(255, 100, 100), + pg.mkColor(255, 255, 100), + pg.mkColor(32, 178, 170), + pg.mkColor(153, 50, 204), + pg.mkColor(255, 250, 250), +] + +colors2 = [ + pg.mkColor(135, 206, 235), + pg.mkColor(255, 160, 122), + pg.mkColor(255, 255, 150), +] start_time = time.time() + class VisualizerPlot(pg.PlotItem): def __init__(self): super(VisualizerPlot, self).__init__() + def update_plot(self, data): pass + class GraphPlot(VisualizerPlot): def __init__(self, title, y_label, show_x_axis=False): super(GraphPlot, self).__init__() self.setTitle(title) - self.setLabel('left', y_label) - self.setLabel('bottom', 'Time', 's') + self.setLabel("left", y_label) + self.setLabel("bottom", "Time", "s") self.hideButtons() - self.setDownsampling(mode='subsample') - #self.setClipToView(True) + self.setDownsampling(mode="subsample") + # self.setClipToView(True) self.show_x_axis(False) def show_x_axis(self, show): - self.showAxis('bottom', show) + self.showAxis("bottom", show) def update_plot(self, data): super(GraphPlot, self).update_plot(data) @@ -65,27 +71,32 @@ def update_plot(self, data): self.setXRange(max(0, t - DISPLAY_TIME), t) self.setLimits(xMin=max(0, t - DISPLAY_TIME), xMax=t) + class PathItem(pg.QtGui.QGraphicsPathItem): def __init__(self, pen): - self.path = pg.arrayToQPath(np.zeros(0), np.zeros(0), 'all') + self.path = pg.arrayToQPath(np.zeros(0), np.zeros(0), "all") pg.QtGui.QGraphicsPathItem.__init__(self, self.path) self.setPen(pen) self.last_time = None - def shape(self): # override because QGraphicsPathItem.shape is too expensive. + + def shape(self): # override because QGraphicsPathItem.shape is too expensive. return pg.QtGui.QGraphicsItem.shape(self) + def boundingRect(self): return self.path.boundingRect() + def setData(self, x_values, y_values): - self.path = pg.arrayToQPath(x_values, y_values, 'all') + self.path = pg.arrayToQPath(x_values, y_values, "all") self.setPath(self.path) + class VectorPlot(GraphPlot): def __init__(self, name, y_axis, time_names, y_names, legend_names=None, pens=None): a = time.time() super(VectorPlot, self).__init__(name, y_axis) self.y_names = y_names self.time_names = time_names - if type(self.time_names) != list: # if all the same, can just specify one + if type(self.time_names) != list: # if all the same, can just specify one self.time_names = [time_names] * len(y_names) self.plot_data = [] if legend_names != None: @@ -93,7 +104,11 @@ def __init__(self, name, y_axis, time_names, y_names, legend_names=None, pens=No if pens == None: pens = colors for i in range(len(self.y_names)): - name = None if legend_names == None or len(legend_names) <= i else legend_names[i] + name = ( + None + if legend_names == None or len(legend_names) <= i + else legend_names[i] + ) self.plot_data.append(PathItem(pens[i])) self.addItem(self.plot_data[-1], name=name) if name != None: @@ -104,91 +119,154 @@ def update_plot(self, data): super(VectorPlot, self).update_plot(data) for i in range(len(self.y_names)): s = data[1][self.time_names[i]] - self.plot_data[i].setData(data[0][self.time_names[i]][:s], data[0][self.y_names[i]][:s]) + self.plot_data[i].setData( + data[0][self.time_names[i]][:s], data[0][self.y_names[i]][:s] + ) + class PositionPlot(GraphPlot): def __init__(self): - super(PositionPlot, self).__init__('Position', 'Position (m)') + super(PositionPlot, self).__init__("Position", "Position (m)") self.pos_x = self.plot(pen=colors[0]) self.pos_y = self.plot(pen=colors[1]) self.pos_z = self.plot(pen=colors[2]) - self.truth_x = self.plot(symbol='d', pen=None, symbolPen=None, symbolBrush=colors[0], symbolSize=5) - self.truth_y = self.plot(symbol='d', pen=None, symbolPen=None, symbolBrush=colors[1], symbolSize=5) - self.truth_z = self.plot(symbol='d', pen=None, symbolPen=None, symbolBrush=colors[2], symbolSize=5) - self.ml_pose_x = self.plot(symbol='d', pen=None, symbolPen=None, symbolBrush=colors[0], symbolSize=5) - self.ml_pose_y = self.plot(symbol='d', pen=None, symbolPen=None, symbolBrush=colors[1], symbolSize=5) - self.ml_pose_z = self.plot(symbol='d', pen=None, symbolPen=None, symbolBrush=colors[2], symbolSize=5) - + self.truth_x = self.plot( + symbol="d", pen=None, symbolPen=None, symbolBrush=colors[0], symbolSize=5 + ) + self.truth_y = self.plot( + symbol="d", pen=None, symbolPen=None, symbolBrush=colors[1], symbolSize=5 + ) + self.truth_z = self.plot( + symbol="d", pen=None, symbolPen=None, symbolBrush=colors[2], symbolSize=5 + ) + self.ml_pose_x = self.plot( + symbol="d", pen=None, symbolPen=None, symbolBrush=colors[0], symbolSize=5 + ) + self.ml_pose_y = self.plot( + symbol="d", pen=None, symbolPen=None, symbolBrush=colors[1], symbolSize=5 + ) + self.ml_pose_z = self.plot( + symbol="d", pen=None, symbolPen=None, symbolBrush=colors[2], symbolSize=5 + ) def update_plot(self, data): super(PositionPlot, self).update_plot(data) - s = data[1]['ekf_time'] - self.pos_x.setData(data[0]['ekf_time'][:s], data[0]['ekf_position_x'][:s]) - self.pos_y.setData(data[0]['ekf_time'][:s], data[0]['ekf_position_y'][:s]) - self.pos_z.setData(data[0]['ekf_time'][:s], data[0]['ekf_position_z'][:s]) - s = data[1]['truth_time'] - self.truth_x.setData(data[0]['truth_time'][:s], data[0]['truth_position_x'][:s]) - self.truth_y.setData(data[0]['truth_time'][:s], data[0]['truth_position_y'][:s]) - self.truth_z.setData(data[0]['truth_time'][:s], data[0]['truth_position_z'][:s]) - s = data[1]['ml_pose_time'] - self.ml_pose_x.setData(data[0]['ml_pose_time'][:s], data[0]['ml_pose_position_x'][:s]) - self.ml_pose_y.setData(data[0]['ml_pose_time'][:s], data[0]['ml_pose_position_y'][:s]) - self.ml_pose_z.setData(data[0]['ml_pose_time'][:s], data[0]['ml_pose_position_z'][:s]) + s = data[1]["ekf_time"] + self.pos_x.setData(data[0]["ekf_time"][:s], data[0]["ekf_position_x"][:s]) + self.pos_y.setData(data[0]["ekf_time"][:s], data[0]["ekf_position_y"][:s]) + self.pos_z.setData(data[0]["ekf_time"][:s], data[0]["ekf_position_z"][:s]) + s = data[1]["truth_time"] + self.truth_x.setData(data[0]["truth_time"][:s], data[0]["truth_position_x"][:s]) + self.truth_y.setData(data[0]["truth_time"][:s], data[0]["truth_position_y"][:s]) + self.truth_z.setData(data[0]["truth_time"][:s], data[0]["truth_position_z"][:s]) + s = data[1]["ml_pose_time"] + self.ml_pose_x.setData( + data[0]["ml_pose_time"][:s], data[0]["ml_pose_position_x"][:s] + ) + self.ml_pose_y.setData( + data[0]["ml_pose_time"][:s], data[0]["ml_pose_position_y"][:s] + ) + self.ml_pose_z.setData( + data[0]["ml_pose_time"][:s], data[0]["ml_pose_position_z"][:s] + ) + class VelocityPlot(VectorPlot): def __init__(self): - super(VelocityPlot, self).__init__('Velocity', 'Velocity (m/s)', - 'ekf_time', ['ekf_vel_x', 'ekf_vel_y', 'ekf_vel_z']) + super(VelocityPlot, self).__init__( + "Velocity", + "Velocity (m/s)", + "ekf_time", + ["ekf_vel_x", "ekf_vel_y", "ekf_vel_z"], + ) + class AccelerationPlot(VectorPlot): def __init__(self): - super(AccelerationPlot, self).__init__('Acceleration', 'Acceleration (m/s^2)', - 'ekf_time', ['ekf_accel_x', 'ekf_accel_y', 'ekf_accel_z']) + super(AccelerationPlot, self).__init__( + "Acceleration", + "Acceleration (m/s^2)", + "ekf_time", + ["ekf_accel_x", "ekf_accel_y", "ekf_accel_z"], + ) + class AccelerationBiasPlot(VectorPlot): def __init__(self): - super(AccelerationBiasPlot, self).__init__('Accel Bias', 'Accel Bias (m/s^2)', - 'ekf_time', ['ekf_accel_bias_x', 'ekf_accel_bias_y', 'ekf_accel_bias_z']) + super(AccelerationBiasPlot, self).__init__( + "Accel Bias", + "Accel Bias (m/s^2)", + "ekf_time", + ["ekf_accel_bias_x", "ekf_accel_bias_y", "ekf_accel_bias_z"], + ) + class OrientationPlot(GraphPlot): def __init__(self): - super(OrientationPlot, self).__init__('Orientation', 'Euler Angles (degrees)') + super(OrientationPlot, self).__init__("Orientation", "Euler Angles (degrees)") self.rot_x = self.plot(pen=colors[0]) self.rot_y = self.plot(pen=colors[1]) self.rot_z = self.plot(pen=colors[2]) - self.truth_x = self.plot(symbol='d', pen=None, symbolPen=None, symbolBrush=colors[0], symbolSize=5) - self.truth_y = self.plot(symbol='d', pen=None, symbolPen=None, symbolBrush=colors[1], symbolSize=5) - self.truth_z = self.plot(symbol='d', pen=None, symbolPen=None, symbolBrush=colors[2], symbolSize=5) - self.ml_pose_x = self.plot(symbol='d', pen=None, symbolPen=None, symbolBrush=colors[0], symbolSize=5) - self.ml_pose_y = self.plot(symbol='d', pen=None, symbolPen=None, symbolBrush=colors[1], symbolSize=5) - self.ml_pose_z = self.plot(symbol='d', pen=None, symbolPen=None, symbolBrush=colors[2], symbolSize=5) - + self.truth_x = self.plot( + symbol="d", pen=None, symbolPen=None, symbolBrush=colors[0], symbolSize=5 + ) + self.truth_y = self.plot( + symbol="d", pen=None, symbolPen=None, symbolBrush=colors[1], symbolSize=5 + ) + self.truth_z = self.plot( + symbol="d", pen=None, symbolPen=None, symbolBrush=colors[2], symbolSize=5 + ) + self.ml_pose_x = self.plot( + symbol="d", pen=None, symbolPen=None, symbolBrush=colors[0], symbolSize=5 + ) + self.ml_pose_y = self.plot( + symbol="d", pen=None, symbolPen=None, symbolBrush=colors[1], symbolSize=5 + ) + self.ml_pose_z = self.plot( + symbol="d", pen=None, symbolPen=None, symbolBrush=colors[2], symbolSize=5 + ) def update_plot(self, data): super(OrientationPlot, self).update_plot(data) - s = data[1]['ekf_time'] - self.rot_x.setData(data[0]['ekf_time'][:s], data[0]['ekf_rot_x'][:s]) - self.rot_y.setData(data[0]['ekf_time'][:s], data[0]['ekf_rot_y'][:s]) - self.rot_z.setData(data[0]['ekf_time'][:s], data[0]['ekf_rot_z'][:s]) - s = data[1]['truth_time'] - self.truth_x.setData(data[0]['truth_time'][:s], data[0]['truth_rot_x'][:s]) - self.truth_y.setData(data[0]['truth_time'][:s], data[0]['truth_rot_y'][:s]) - self.truth_z.setData(data[0]['truth_time'][:s], data[0]['truth_rot_z'][:s]) - s = data[1]['ml_pose_time'] - self.ml_pose_x.setData(data[0]['ml_pose_time'][:s], data[0]['ml_pose_rot_x'][:s]) - self.ml_pose_y.setData(data[0]['ml_pose_time'][:s], data[0]['ml_pose_rot_y'][:s]) - self.ml_pose_z.setData(data[0]['ml_pose_time'][:s], data[0]['ml_pose_rot_z'][:s]) + s = data[1]["ekf_time"] + self.rot_x.setData(data[0]["ekf_time"][:s], data[0]["ekf_rot_x"][:s]) + self.rot_y.setData(data[0]["ekf_time"][:s], data[0]["ekf_rot_y"][:s]) + self.rot_z.setData(data[0]["ekf_time"][:s], data[0]["ekf_rot_z"][:s]) + s = data[1]["truth_time"] + self.truth_x.setData(data[0]["truth_time"][:s], data[0]["truth_rot_x"][:s]) + self.truth_y.setData(data[0]["truth_time"][:s], data[0]["truth_rot_y"][:s]) + self.truth_z.setData(data[0]["truth_time"][:s], data[0]["truth_rot_z"][:s]) + s = data[1]["ml_pose_time"] + self.ml_pose_x.setData( + data[0]["ml_pose_time"][:s], data[0]["ml_pose_rot_x"][:s] + ) + self.ml_pose_y.setData( + data[0]["ml_pose_time"][:s], data[0]["ml_pose_rot_y"][:s] + ) + self.ml_pose_z.setData( + data[0]["ml_pose_time"][:s], data[0]["ml_pose_rot_z"][:s] + ) class OmegaPlot(VectorPlot): def __init__(self): - super(OmegaPlot, self).__init__('Angular Velocity', 'Omega (degrees)', - 'ekf_time', ['ekf_omega_x', 'ekf_omega_y', 'ekf_omega_z']) + super(OmegaPlot, self).__init__( + "Angular Velocity", + "Omega (degrees)", + "ekf_time", + ["ekf_omega_x", "ekf_omega_y", "ekf_omega_z"], + ) + class GyroBiasPlot(VectorPlot): def __init__(self): - super(GyroBiasPlot, self).__init__('Gyro Bias', 'Gyro Bias (degrees)', - 'ekf_time', ['ekf_gyro_bias_x', 'ekf_gyro_bias_y', 'ekf_gyro_bias_z']) + super(GyroBiasPlot, self).__init__( + "Gyro Bias", + "Gyro Bias (degrees)", + "ekf_time", + ["ekf_gyro_bias_x", "ekf_gyro_bias_y", "ekf_gyro_bias_z"], + ) + class ModePlot(GraphPlot): def __init__(self, name, x_value, y_value, colormap): @@ -214,7 +292,11 @@ def update_plot(self, data): new_items = len(data[0][self.x_value]) else: for r in data[0][self.x_value]: - if r > self.confidence_rects[-1].rect().x() + self.confidence_rects[-1].rect().width(): + if ( + r + > self.confidence_rects[-1].rect().x() + + self.confidence_rects[-1].rect().width() + ): new_items += 1 else: break @@ -226,7 +308,9 @@ def update_plot(self, data): self.last_value = val if len(self.confidence_rects) > 0: r = self.confidence_rects[-1].rect() - self.confidence_rects[-1].setRect(r.x(), 0, data[0][self.x_value][i] - r.x(), 1) + self.confidence_rects[-1].setRect( + r.x(), 0, data[0][self.x_value][i] - r.x(), 1 + ) start_t = data[0][self.x_value][i] r = QtGui.QGraphicsRectItem(start_t, 0, 0, 1) r.setBrush(self.colormap[int(val)]) @@ -237,48 +321,91 @@ def update_plot(self, data): self.setYRange(0, 1) self.setLimits(yMin=0, yMax=1) + class ConfidencePlot(ModePlot): def __init__(self): - super(ConfidencePlot, self).__init__('Confidence', 'ekf_time', 'ekf_confidence', - [pg.mkColor(0, 255, 0), pg.mkColor(255, 255, 0), pg.mkColor(255, 0, 0)]) + super(ConfidencePlot, self).__init__( + "Confidence", + "ekf_time", + "ekf_confidence", + [pg.mkColor(0, 255, 0), pg.mkColor(255, 255, 0), pg.mkColor(255, 0, 0)], + ) + class CovPlot(VectorPlot): def __init__(self): - super(CovPlot, self).__init__('Covariance Magnitudes', 'Covariance', - 'ekf_time', ['ekf_cov_rot_m', 'ekf_cov_gbias_m', 'ekf_cov_vel_m', 'ekf_cov_abias_m', 'ekf_cov_pos_m'], - ['Rotation', 'Gyro Bias', 'Velocity', 'Accel Bias', 'Position']) + super(CovPlot, self).__init__( + "Covariance Magnitudes", + "Covariance", + "ekf_time", + [ + "ekf_cov_rot_m", + "ekf_cov_gbias_m", + "ekf_cov_vel_m", + "ekf_cov_abias_m", + "ekf_cov_pos_m", + ], + ["Rotation", "Gyro Bias", "Velocity", "Accel Bias", "Position"], + ) + def update_plot(self, data): super(CovPlot, self).update_plot(data) - #self.plot.setLogMode(y=True) + # self.plot.setLogMode(y=True) + class CovRotPlot(VectorPlot): def __init__(self): - super(CovRotPlot, self).__init__('Rotation Covariance', 'Rotation Covariance', - 'ekf_time', ['ekf_cov_rot_x', 'ekf_cov_rot_y', 'ekf_cov_rot_z']) + super(CovRotPlot, self).__init__( + "Rotation Covariance", + "Rotation Covariance", + "ekf_time", + ["ekf_cov_rot_x", "ekf_cov_rot_y", "ekf_cov_rot_z"], + ) + class CovGBiasPlot(VectorPlot): def __init__(self): - super(CovGBiasPlot, self).__init__('Gyro Bias Covariance', 'Gyro Bias Covariance', - 'ekf_time', ['ekf_cov_gbias_x', 'ekf_cov_gbias_y', 'ekf_cov_gbias_z']) + super(CovGBiasPlot, self).__init__( + "Gyro Bias Covariance", + "Gyro Bias Covariance", + "ekf_time", + ["ekf_cov_gbias_x", "ekf_cov_gbias_y", "ekf_cov_gbias_z"], + ) + class CovVelPlot(VectorPlot): def __init__(self): - super(CovVelPlot, self).__init__('Velocity Covariance', 'Velocity Covariance', - 'ekf_time', ['ekf_cov_vel_x', 'ekf_cov_vel_y', 'ekf_cov_vel_z']) + super(CovVelPlot, self).__init__( + "Velocity Covariance", + "Velocity Covariance", + "ekf_time", + ["ekf_cov_vel_x", "ekf_cov_vel_y", "ekf_cov_vel_z"], + ) + class CovABiasPlot(VectorPlot): def __init__(self): - super(CovABiasPlot, self).__init__('Accel Bias Covariance', 'Accel Bias Covariance', - 'ekf_time', ['ekf_cov_abias_x', 'ekf_cov_abias_y', 'ekf_cov_abias_z']) - + super(CovABiasPlot, self).__init__( + "Accel Bias Covariance", + "Accel Bias Covariance", + "ekf_time", + ["ekf_cov_abias_x", "ekf_cov_abias_y", "ekf_cov_abias_z"], + ) + + class CovPosPlot(VectorPlot): def __init__(self): - super(CovPosPlot, self).__init__('Position Covariance', 'Position Covariance', - 'ekf_time', ['ekf_cov_pos_x', 'ekf_cov_pos_y', 'ekf_cov_pos_z']) + super(CovPosPlot, self).__init__( + "Position Covariance", + "Position Covariance", + "ekf_time", + ["ekf_cov_pos_x", "ekf_cov_pos_y", "ekf_cov_pos_z"], + ) + class FeatureCountPlot(GraphPlot): def __init__(self): - super(FeatureCountPlot, self).__init__('Observations', 'Number of Features') + super(FeatureCountPlot, self).__init__("Observations", "Number of Features") self.setMaximumSize(5000, 200) self.features = [] @@ -293,18 +420,18 @@ def update_plot(self, data): break new_ml = 0 if len(self.features) == 0: - new_ml = len(filter(lambda x: x > 1e-2, data[0]['ml_time'])) + new_ml = len(filter(lambda x: x > 1e-2, data[0]["ml_time"])) else: - for r in data[0]['ml_time']: + for r in data[0]["ml_time"]: if r > self.features[-1].x() + 1e-5 and r > 1e-2: new_ml += 1 else: break new_of = 0 if len(self.features) == 0: - new_of = len(filter(lambda x: x > 1e-2, data[0]['of_time'])) + new_of = len(filter(lambda x: x > 1e-2, data[0]["of_time"])) else: - for r in data[0]['of_time']: + for r in data[0]["of_time"]: if r > self.features[-1].x() + 1e-5 and r > 1e-2: new_of += 1 else: @@ -315,11 +442,21 @@ def update_plot(self, data): add_ml = True if cur_ml < 0: add_ml = False - elif cur_of >= 0 and data[0]['of_time'][cur_of] < data[0]['ml_time'][cur_ml]: + elif ( + cur_of >= 0 and data[0]["of_time"][cur_of] < data[0]["ml_time"][cur_ml] + ): add_ml = False - t = data[0]['ml_time'][cur_ml] if add_ml else data[0]['of_time'][cur_of] - v = data[0]['ml_landmarks'][cur_ml] if add_ml else data[0]['of_landmarks'][cur_of] - r = QtGui.QGraphicsEllipseItem(-5, -5, 10, 10) if add_ml else QtGui.QGraphicsEllipseItem(-3, -3, 6, 6) + t = data[0]["ml_time"][cur_ml] if add_ml else data[0]["of_time"][cur_of] + v = ( + data[0]["ml_landmarks"][cur_ml] + if add_ml + else data[0]["of_landmarks"][cur_of] + ) + r = ( + QtGui.QGraphicsEllipseItem(-5, -5, 10, 10) + if add_ml + else QtGui.QGraphicsEllipseItem(-3, -3, 6, 6) + ) r.setPos(t, v) r.setBrush(colors[1] if add_ml else colors[0]) r.setFlag(QtGui.QGraphicsItem.ItemIgnoresTransformations) @@ -330,156 +467,310 @@ def update_plot(self, data): else: cur_of -= 1 + class MLMahalPlot(GraphPlot): def __init__(self): - super(MLMahalPlot, self).__init__('Mahalanobis Distance', 'Mahalanobis Distance') - self.mahal_min = pg.PlotDataItem([], [], pen=None, symbol='t') - self.mahal_max = pg.PlotDataItem([], [], pen=None, symbol='t') - self.mahal_mean = pg.PlotDataItem([], [], pen=None, symbol='o', symbolBrush=colors[1]) + super(MLMahalPlot, self).__init__( + "Mahalanobis Distance", "Mahalanobis Distance" + ) + self.mahal_min = pg.PlotDataItem([], [], pen=None, symbol="t") + self.mahal_max = pg.PlotDataItem([], [], pen=None, symbol="t") + self.mahal_mean = pg.PlotDataItem( + [], [], pen=None, symbol="o", symbolBrush=colors[1] + ) self.addItem(self.mahal_min) self.addItem(self.mahal_max) self.addItem(self.mahal_mean) def update_plot(self, data): super(MLMahalPlot, self).update_plot(data) - s = data[1]['ml_time'] - self.mahal_min.setData( data[0]['ml_time'][:s], data[0]['ml_mahal_min'][:s]) - self.mahal_max.setData( data[0]['ml_time'][:s], data[0]['ml_mahal_max'][:s]) - self.mahal_mean.setData(data[0]['ml_time'][:s], data[0]['ml_mahal_mean'][:s]) + s = data[1]["ml_time"] + self.mahal_min.setData(data[0]["ml_time"][:s], data[0]["ml_mahal_min"][:s]) + self.mahal_max.setData(data[0]["ml_time"][:s], data[0]["ml_mahal_max"][:s]) + self.mahal_mean.setData(data[0]["ml_time"][:s], data[0]["ml_mahal_mean"][:s]) + class CommandForcePlot(VectorPlot): def __init__(self): - super(CommandForcePlot, self).__init__('CTL Force', 'Force (N)', - 'command_time', ['command_force_x', 'command_force_y', 'command_force_z']) + super(CommandForcePlot, self).__init__( + "CTL Force", + "Force (N)", + "command_time", + ["command_force_x", "command_force_y", "command_force_z"], + ) + class CommandTorquePlot(VectorPlot): def __init__(self): - super(CommandTorquePlot, self).__init__('CTL Torque', 'Torque(N m)', - 'command_time', ['command_torque_x', 'command_torque_y', 'command_torque_z']) + super(CommandTorquePlot, self).__init__( + "CTL Torque", + "Torque(N m)", + "command_time", + ["command_torque_x", "command_torque_y", "command_torque_z"], + ) + class CommandPosErrPlot(VectorPlot): def __init__(self): - super(CommandPosErrPlot, self).__init__('CTL Position Error', 'Position Error (m)', - 'command_time', ['command_pos_err_x', 'command_pos_err_y', 'command_pos_err_z']) + super(CommandPosErrPlot, self).__init__( + "CTL Position Error", + "Position Error (m)", + "command_time", + ["command_pos_err_x", "command_pos_err_y", "command_pos_err_z"], + ) + class CommandPosErrIntPlot(VectorPlot): def __init__(self): - super(CommandPosErrIntPlot, self).__init__('CTL Position Error Integrated', 'Position Error Integrated', - 'command_time', ['command_pos_err_int_x', 'command_pos_err_int_y', 'command_pos_err_int_z']) + super(CommandPosErrIntPlot, self).__init__( + "CTL Position Error Integrated", + "Position Error Integrated", + "command_time", + ["command_pos_err_int_x", "command_pos_err_int_y", "command_pos_err_int_z"], + ) + class CommandAttErrPlot(VectorPlot): def __init__(self): - super(CommandAttErrPlot, self).__init__('CTL Attitude Error', 'Attitude Error', - 'command_time', ['command_att_err_x', 'command_att_err_y', 'command_att_err_z']) + super(CommandAttErrPlot, self).__init__( + "CTL Attitude Error", + "Attitude Error", + "command_time", + ["command_att_err_x", "command_att_err_y", "command_att_err_z"], + ) + class CommandAttErrIntPlot(VectorPlot): def __init__(self): - super(CommandAttErrIntPlot, self).__init__('CTL Attitude Error Integrated', 'Attitude Error Integrated', - 'command_time', ['command_att_err_int_x', 'command_att_err_int_y', 'command_att_err_int_z']) + super(CommandAttErrIntPlot, self).__init__( + "CTL Attitude Error Integrated", + "Attitude Error Integrated", + "command_time", + ["command_att_err_int_x", "command_att_err_int_y", "command_att_err_int_z"], + ) + class CommandStatusPlot(ModePlot): def __init__(self): - super(CommandStatusPlot, self).__init__('CTL Status', 'command_time', 'command_status', - [pg.mkColor(0, 0, 255), pg.mkColor(255, 255, 0), pg.mkColor(0, 255, 0), pg.mkColor(255, 0, 0)]) + super(CommandStatusPlot, self).__init__( + "CTL Status", + "command_time", + "command_status", + [ + pg.mkColor(0, 0, 255), + pg.mkColor(255, 255, 0), + pg.mkColor(0, 255, 0), + pg.mkColor(255, 0, 0), + ], + ) + class CtlPosPlot(VectorPlot): def __init__(self): - super(CtlPosPlot, self).__init__('Control Position', 'Position (m)', - ['ekf_time', 'truth_time', 'traj_time', 'shaper_time', - 'ekf_time', 'truth_time', 'traj_time', 'shaper_time', - 'ekf_time', 'truth_time', 'traj_time', 'shaper_time'], - ['ekf_position_x', 'truth_position_x', 'traj_position_x', 'shaper_position_x', - 'ekf_position_y', 'truth_position_y', 'traj_position_y', 'shaper_position_y', - 'ekf_position_z', 'truth_position_z', 'traj_position_z', 'shaper_position_z'], - legend_names=['EKF X', 'Truth X', 'Trajectory X', 'Shaper X'], - pens=[pg.mkPen(colors[0]), pg.mkPen(colors[0], style=QtCore.Qt.DotLine, width=2), pg.mkPen(colors[0], style=QtCore.Qt.DotLine), pg.mkPen(colors[0], style=QtCore.Qt.DashLine), - pg.mkPen(colors[1]), pg.mkPen(colors[1], style=QtCore.Qt.DotLine, width=2), pg.mkPen(colors[1], style=QtCore.Qt.DotLine), pg.mkPen(colors[1], style=QtCore.Qt.DashLine), - pg.mkPen(colors[2]), pg.mkPen(colors[2], style=QtCore.Qt.DotLine, width=2), pg.mkPen(colors[2], style=QtCore.Qt.DotLine), pg.mkPen(colors[2], style=QtCore.Qt.DashLine)] - ) + super(CtlPosPlot, self).__init__( + "Control Position", + "Position (m)", + [ + "ekf_time", + "truth_time", + "traj_time", + "shaper_time", + "ekf_time", + "truth_time", + "traj_time", + "shaper_time", + "ekf_time", + "truth_time", + "traj_time", + "shaper_time", + ], + [ + "ekf_position_x", + "truth_position_x", + "traj_position_x", + "shaper_position_x", + "ekf_position_y", + "truth_position_y", + "traj_position_y", + "shaper_position_y", + "ekf_position_z", + "truth_position_z", + "traj_position_z", + "shaper_position_z", + ], + legend_names=["EKF X", "Truth X", "Trajectory X", "Shaper X"], + pens=[ + pg.mkPen(colors[0]), + pg.mkPen(colors[0], style=QtCore.Qt.DotLine, width=2), + pg.mkPen(colors[0], style=QtCore.Qt.DotLine), + pg.mkPen(colors[0], style=QtCore.Qt.DashLine), + pg.mkPen(colors[1]), + pg.mkPen(colors[1], style=QtCore.Qt.DotLine, width=2), + pg.mkPen(colors[1], style=QtCore.Qt.DotLine), + pg.mkPen(colors[1], style=QtCore.Qt.DashLine), + pg.mkPen(colors[2]), + pg.mkPen(colors[2], style=QtCore.Qt.DotLine, width=2), + pg.mkPen(colors[2], style=QtCore.Qt.DotLine), + pg.mkPen(colors[2], style=QtCore.Qt.DashLine), + ], + ) + class CtlRotPlot(VectorPlot): def __init__(self): - super(CtlRotPlot, self).__init__('Control Rotation', 'Euler Angle (deg)', - ['ekf_time', 'truth_time', 'traj_time', 'shaper_time', - 'ekf_time', 'truth_time', 'traj_time', 'shaper_time', - 'ekf_time', 'truth_time', 'traj_time', 'shaper_time'], - ['ekf_rot_x', 'truth_rot_x', 'traj_rot_x', 'shaper_rot_x', - 'ekf_rot_y', 'truth_rot_y', 'traj_rot_y', 'shaper_rot_y', - 'ekf_rot_z', 'truth_rot_z', 'traj_rot_z', 'shaper_rot_z'], - legend_names=['EKF X Rot', 'Truth X Rot', 'Trajectory X Rot', 'Shaper X Rot'], - pens=[pg.mkPen(colors[0]), pg.mkPen(colors[0], style=QtCore.Qt.DotLine, width=2), pg.mkPen(colors[0], style=QtCore.Qt.DotLine), pg.mkPen(colors[0], style=QtCore.Qt.DashLine), - pg.mkPen(colors[1]), pg.mkPen(colors[1], style=QtCore.Qt.DotLine, width=2), pg.mkPen(colors[1], style=QtCore.Qt.DotLine), pg.mkPen(colors[1], style=QtCore.Qt.DashLine), - pg.mkPen(colors[2]), pg.mkPen(colors[2], style=QtCore.Qt.DotLine, width=2), pg.mkPen(colors[2], style=QtCore.Qt.DotLine), pg.mkPen(colors[2], style=QtCore.Qt.DashLine)] - ) + super(CtlRotPlot, self).__init__( + "Control Rotation", + "Euler Angle (deg)", + [ + "ekf_time", + "truth_time", + "traj_time", + "shaper_time", + "ekf_time", + "truth_time", + "traj_time", + "shaper_time", + "ekf_time", + "truth_time", + "traj_time", + "shaper_time", + ], + [ + "ekf_rot_x", + "truth_rot_x", + "traj_rot_x", + "shaper_rot_x", + "ekf_rot_y", + "truth_rot_y", + "traj_rot_y", + "shaper_rot_y", + "ekf_rot_z", + "truth_rot_z", + "traj_rot_z", + "shaper_rot_z", + ], + legend_names=[ + "EKF X Rot", + "Truth X Rot", + "Trajectory X Rot", + "Shaper X Rot", + ], + pens=[ + pg.mkPen(colors[0]), + pg.mkPen(colors[0], style=QtCore.Qt.DotLine, width=2), + pg.mkPen(colors[0], style=QtCore.Qt.DotLine), + pg.mkPen(colors[0], style=QtCore.Qt.DashLine), + pg.mkPen(colors[1]), + pg.mkPen(colors[1], style=QtCore.Qt.DotLine, width=2), + pg.mkPen(colors[1], style=QtCore.Qt.DotLine), + pg.mkPen(colors[1], style=QtCore.Qt.DashLine), + pg.mkPen(colors[2]), + pg.mkPen(colors[2], style=QtCore.Qt.DotLine, width=2), + pg.mkPen(colors[2], style=QtCore.Qt.DotLine), + pg.mkPen(colors[2], style=QtCore.Qt.DashLine), + ], + ) + class Pmc1NozzlePlot(VectorPlot): def __init__(self): - super(Pmc1NozzlePlot, self).__init__('PMC1 Nozzle Command', 'Ticks', - 'pmc_time', ['pmc_1_nozzle_1', 'pmc_1_nozzle_2', 'pmc_1_nozzle_3', 'pmc_1_nozzle_4', 'pmc_1_nozzle_5', 'pmc_1_nozzle_6' ]) + super(Pmc1NozzlePlot, self).__init__( + "PMC1 Nozzle Command", + "Ticks", + "pmc_time", + [ + "pmc_1_nozzle_1", + "pmc_1_nozzle_2", + "pmc_1_nozzle_3", + "pmc_1_nozzle_4", + "pmc_1_nozzle_5", + "pmc_1_nozzle_6", + ], + ) + class Pmc2NozzlePlot(VectorPlot): def __init__(self): - super(Pmc2NozzlePlot, self).__init__('PMC2 Nozzle Command', 'Ticks', - 'pmc_time', ['pmc_2_nozzle_1', 'pmc_2_nozzle_2', 'pmc_2_nozzle_3', 'pmc_2_nozzle_4', 'pmc_2_nozzle_5', 'pmc_2_nozzle_6' ]) + super(Pmc2NozzlePlot, self).__init__( + "PMC2 Nozzle Command", + "Ticks", + "pmc_time", + [ + "pmc_2_nozzle_1", + "pmc_2_nozzle_2", + "pmc_2_nozzle_3", + "pmc_2_nozzle_4", + "pmc_2_nozzle_5", + "pmc_2_nozzle_6", + ], + ) + class Pmc1BlowerPlot(VectorPlot): def __init__(self): - super(Pmc1BlowerPlot, self).__init__('PMC1 Blower speed', 'Ticks', - 'pmc_time', ['pmc_1_motor_speed']) + super(Pmc1BlowerPlot, self).__init__( + "PMC1 Blower speed", "Ticks", "pmc_time", ["pmc_1_motor_speed"] + ) + class Pmc2BlowerPlot(VectorPlot): def __init__(self): - super(Pmc2BlowerPlot, self).__init__('PMC2 Blower speed', 'Ticks', - 'pmc_time', ['pmc_2_motor_speed']) + super(Pmc2BlowerPlot, self).__init__( + "PMC2 Blower speed", "Ticks", "pmc_time", ["pmc_2_motor_speed"] + ) + plot_types = { - 'EKF' : { - 'Position' : PositionPlot, - 'Velocity' : VelocityPlot, - 'Acceleration' : AccelerationPlot, - 'Accel Bias' : AccelerationBiasPlot, - 'Orientation' : OrientationPlot, - 'Angular Velocity' : OmegaPlot, - 'Gyro Bias' : GyroBiasPlot, - 'Feature Counts' : FeatureCountPlot, - 'Confidence' : ConfidencePlot, - 'Mahalanobis Distances' : MLMahalPlot, - 'Covariance' : { - 'All' : CovPlot, - 'Rotation' : CovRotPlot, - 'Gyro Bias' : CovGBiasPlot, - 'Velocity' : CovVelPlot, - 'Accel Bias' : CovABiasPlot, - 'Position' : CovPosPlot - } + "EKF": { + "Position": PositionPlot, + "Velocity": VelocityPlot, + "Acceleration": AccelerationPlot, + "Accel Bias": AccelerationBiasPlot, + "Orientation": OrientationPlot, + "Angular Velocity": OmegaPlot, + "Gyro Bias": GyroBiasPlot, + "Feature Counts": FeatureCountPlot, + "Confidence": ConfidencePlot, + "Mahalanobis Distances": MLMahalPlot, + "Covariance": { + "All": CovPlot, + "Rotation": CovRotPlot, + "Gyro Bias": CovGBiasPlot, + "Velocity": CovVelPlot, + "Accel Bias": CovABiasPlot, + "Position": CovPosPlot, + }, + }, + "CTL": { + "Force": CommandForcePlot, + "Torque": CommandTorquePlot, + "Position": CtlPosPlot, + "Position Error": CommandPosErrPlot, + "Position Error Integ.": CommandPosErrIntPlot, + "Attitude Error": CommandAttErrPlot, + "Attitude Error Integ.": CommandAttErrIntPlot, + "Rotation": CtlRotPlot, + "Status": CommandStatusPlot, }, - 'CTL' : { - 'Force' : CommandForcePlot, - 'Torque' : CommandTorquePlot, - 'Position' : CtlPosPlot, - 'Position Error' : CommandPosErrPlot, - 'Position Error Integ.' : CommandPosErrIntPlot, - 'Attitude Error' : CommandAttErrPlot, - 'Attitude Error Integ.' : CommandAttErrIntPlot, - 'Rotation' : CtlRotPlot, - 'Status' : CommandStatusPlot + "PMC": { + "PMC1:Nozzle": Pmc1NozzlePlot, + "PMC1:Blower": Pmc1BlowerPlot, + "PMC2:Nozzle": Pmc2NozzlePlot, + "PMC2:Blower": Pmc2BlowerPlot, }, - 'PMC' : { - 'PMC1:Nozzle' : Pmc1NozzlePlot, - 'PMC1:Blower' : Pmc1BlowerPlot, - 'PMC2:Nozzle' : Pmc2NozzlePlot, - 'PMC2:Blower' : Pmc2BlowerPlot - } } plot_display_names = dict() + + def __generate_display_names(plot_types, base_name): for n in plot_types.keys(): v = plot_types[n] - name = base_name + ' ' + n + name = base_name + " " + n if type(v) == dict: __generate_display_names(v, name) else: plot_display_names[v] = name -__generate_display_names(plot_types, "") + +__generate_display_names(plot_types, "") diff --git a/tools/gnc_visualizer/scripts/visualizer.py b/tools/gnc_visualizer/scripts/visualizer.py index e659a010fe..65070e24a1 100755 --- a/tools/gnc_visualizer/scripts/visualizer.py +++ b/tools/gnc_visualizer/scripts/visualizer.py @@ -17,22 +17,22 @@ # License for the specific language governing permissions and limitations # under the License. -from pyqtgraph.Qt import QtGui, QtCore -import pyqtgraph as pg -import numpy as np - import argparse import atexit import functools -from math import asin, atan2, isnan, pi, sqrt import os import re import select import signal import socket import subprocess -import time import sys +import time +from math import asin, atan2, isnan, pi, sqrt + +import numpy as np +import pyqtgraph as pg +from pyqtgraph.Qt import QtCore, QtGui filepath = os.path.dirname(os.path.realpath(__file__)) sys.path.append(filepath + "/communications") @@ -42,70 +42,94 @@ ARRAY_SIZE = plot_types.DISPLAY_TIME * 65 -ASTROBEE_ROOT = os.path.dirname(os.path.realpath(__file__)) + '/../../../' +ASTROBEE_ROOT = os.path.dirname(os.path.realpath(__file__)) + "/../../../" # Creating communication manager with default value com_manager = com.ComManager() + def quat_to_eulers(quat): - return [atan2(2 * (quat.w * quat.x + quat.y * quat.z), 1 - 2 * (quat.x**2 + quat.y**2)), - asin(2 * (quat.w * quat.y - quat.z * quat.x)), - atan2(2 * (quat.w * quat.z + quat.x * quat.y), 1 - 2 * (quat.y**2 + quat.z**2))] + return [ + atan2( + 2 * (quat.w * quat.x + quat.y * quat.z), 1 - 2 * (quat.x ** 2 + quat.y ** 2) + ), + asin(2 * (quat.w * quat.y - quat.z * quat.x)), + atan2( + 2 * (quat.w * quat.z + quat.x * quat.y), 1 - 2 * (quat.y ** 2 + quat.z ** 2) + ), + ] + def mean(array): return sum(array) / len(array) + start_time = time.time() + class VisualizerCallback: def __init__(self, time_key, callbacks): self.time_key = time_key self.callbacks = callbacks -ekf_data = {'ekf_time': lambda x: time.time() - start_time, - 'ekf_position_x': lambda x: x.pose.position.x, - 'ekf_position_y': lambda x: x.pose.position.y, - 'ekf_position_z': lambda x: x.pose.position.z, - 'ekf_vel_x': lambda x: x.velocity.x, - 'ekf_vel_y': lambda x: x.velocity.y, - 'ekf_vel_z': lambda x: x.velocity.z, - 'ekf_accel_x': lambda x: x.accel.x, - 'ekf_accel_y': lambda x: x.accel.y, - 'ekf_accel_z': lambda x: x.accel.z, - 'ekf_accel_bias_x': lambda x: x.accel_bias.x, - 'ekf_accel_bias_y': lambda x: x.accel_bias.y, - 'ekf_accel_bias_z': lambda x: x.accel_bias.z, - 'ekf_rot_x': lambda x: quat_to_eulers(x.pose.orientation)[0] * 180 / pi, - 'ekf_rot_y': lambda x: quat_to_eulers(x.pose.orientation)[1] * 180 / pi, - 'ekf_rot_z': lambda x: quat_to_eulers(x.pose.orientation)[2] * 180 / pi, - 'ekf_omega_x': lambda x: x.omega.x, - 'ekf_omega_y': lambda x: x.omega.y, - 'ekf_omega_z': lambda x: x.omega.z, - 'ekf_gyro_bias_x': lambda x: x.gyro_bias.x, - 'ekf_gyro_bias_y': lambda x: x.gyro_bias.y, - 'ekf_gyro_bias_z': lambda x: x.gyro_bias.z, - 'ekf_confidence': lambda x: x.confidence, - 'ekf_cov_rot_x': lambda x: x.cov_diag[0], - 'ekf_cov_rot_y': lambda x: x.cov_diag[1], - 'ekf_cov_rot_z': lambda x: x.cov_diag[2], - 'ekf_cov_rot_m': lambda x: sqrt(x.cov_diag[0]**2 + x.cov_diag[1]**2 + x.cov_diag[2]**2), - 'ekf_cov_gbias_x': lambda x: x.cov_diag[3], - 'ekf_cov_gbias_y': lambda x: x.cov_diag[4], - 'ekf_cov_gbias_z': lambda x: x.cov_diag[5], - 'ekf_cov_gbias_m': lambda x: sqrt(x.cov_diag[3]**2 + x.cov_diag[4]**2 + x.cov_diag[5]**2), - 'ekf_cov_vel_x': lambda x: x.cov_diag[6], - 'ekf_cov_vel_y': lambda x: x.cov_diag[7], - 'ekf_cov_vel_z': lambda x: x.cov_diag[8], - 'ekf_cov_vel_m': lambda x: sqrt(x.cov_diag[6]**2 + x.cov_diag[7]**2 + x.cov_diag[8]**2), - 'ekf_cov_abias_x': lambda x: x.cov_diag[9], - 'ekf_cov_abias_y': lambda x: x.cov_diag[10], - 'ekf_cov_abias_z': lambda x: x.cov_diag[11], - 'ekf_cov_abias_m': lambda x: sqrt(x.cov_diag[9]**2 + x.cov_diag[10]**2 + x.cov_diag[11]**2), - 'ekf_cov_pos_x': lambda x: x.cov_diag[12], - 'ekf_cov_pos_y': lambda x: x.cov_diag[13], - 'ekf_cov_pos_z': lambda x: x.cov_diag[14], - 'ekf_cov_pos_m': lambda x: sqrt(x.cov_diag[12]**2 + x.cov_diag[13]**2 + x.cov_diag[14]**2)} -ekf_callbacks = VisualizerCallback('ekf_time', ekf_data) + +ekf_data = { + "ekf_time": lambda x: time.time() - start_time, + "ekf_position_x": lambda x: x.pose.position.x, + "ekf_position_y": lambda x: x.pose.position.y, + "ekf_position_z": lambda x: x.pose.position.z, + "ekf_vel_x": lambda x: x.velocity.x, + "ekf_vel_y": lambda x: x.velocity.y, + "ekf_vel_z": lambda x: x.velocity.z, + "ekf_accel_x": lambda x: x.accel.x, + "ekf_accel_y": lambda x: x.accel.y, + "ekf_accel_z": lambda x: x.accel.z, + "ekf_accel_bias_x": lambda x: x.accel_bias.x, + "ekf_accel_bias_y": lambda x: x.accel_bias.y, + "ekf_accel_bias_z": lambda x: x.accel_bias.z, + "ekf_rot_x": lambda x: quat_to_eulers(x.pose.orientation)[0] * 180 / pi, + "ekf_rot_y": lambda x: quat_to_eulers(x.pose.orientation)[1] * 180 / pi, + "ekf_rot_z": lambda x: quat_to_eulers(x.pose.orientation)[2] * 180 / pi, + "ekf_omega_x": lambda x: x.omega.x, + "ekf_omega_y": lambda x: x.omega.y, + "ekf_omega_z": lambda x: x.omega.z, + "ekf_gyro_bias_x": lambda x: x.gyro_bias.x, + "ekf_gyro_bias_y": lambda x: x.gyro_bias.y, + "ekf_gyro_bias_z": lambda x: x.gyro_bias.z, + "ekf_confidence": lambda x: x.confidence, + "ekf_cov_rot_x": lambda x: x.cov_diag[0], + "ekf_cov_rot_y": lambda x: x.cov_diag[1], + "ekf_cov_rot_z": lambda x: x.cov_diag[2], + "ekf_cov_rot_m": lambda x: sqrt( + x.cov_diag[0] ** 2 + x.cov_diag[1] ** 2 + x.cov_diag[2] ** 2 + ), + "ekf_cov_gbias_x": lambda x: x.cov_diag[3], + "ekf_cov_gbias_y": lambda x: x.cov_diag[4], + "ekf_cov_gbias_z": lambda x: x.cov_diag[5], + "ekf_cov_gbias_m": lambda x: sqrt( + x.cov_diag[3] ** 2 + x.cov_diag[4] ** 2 + x.cov_diag[5] ** 2 + ), + "ekf_cov_vel_x": lambda x: x.cov_diag[6], + "ekf_cov_vel_y": lambda x: x.cov_diag[7], + "ekf_cov_vel_z": lambda x: x.cov_diag[8], + "ekf_cov_vel_m": lambda x: sqrt( + x.cov_diag[6] ** 2 + x.cov_diag[7] ** 2 + x.cov_diag[8] ** 2 + ), + "ekf_cov_abias_x": lambda x: x.cov_diag[9], + "ekf_cov_abias_y": lambda x: x.cov_diag[10], + "ekf_cov_abias_z": lambda x: x.cov_diag[11], + "ekf_cov_abias_m": lambda x: sqrt( + x.cov_diag[9] ** 2 + x.cov_diag[10] ** 2 + x.cov_diag[11] ** 2 + ), + "ekf_cov_pos_x": lambda x: x.cov_diag[12], + "ekf_cov_pos_y": lambda x: x.cov_diag[13], + "ekf_cov_pos_z": lambda x: x.cov_diag[14], + "ekf_cov_pos_m": lambda x: sqrt( + x.cov_diag[12] ** 2 + x.cov_diag[13] ** 2 + x.cov_diag[14] ** 2 + ), +} +ekf_callbacks = VisualizerCallback("ekf_time", ekf_data) + def mahal_filter(dists): f = filter(lambda t: not isnan(t), dists) @@ -113,95 +137,122 @@ def mahal_filter(dists): return [-1] return f -ml_data = {'ml_time': lambda x: time.time() - start_time, - 'ml_landmarks': lambda x: x.ml_count, - 'ml_mahal_min': lambda x: min(mahal_filter(x.ml_mahal_dists)), - 'ml_mahal_mean': lambda x: mean(mahal_filter(x.ml_mahal_dists)), - 'ml_mahal_max': lambda x: max(mahal_filter(x.ml_mahal_dists)) - } -ml_callbacks = VisualizerCallback('ml_time', ml_data) - -of_data = {'of_time': lambda x: time.time() - start_time, - 'of_landmarks': lambda x: x.of_count} -of_callbacks = VisualizerCallback('of_time', of_data) - -truth_data = {'truth_time': lambda x: time.time() - start_time, - 'truth_position_x': lambda x: x.pose.position.x, - 'truth_position_y': lambda x: x.pose.position.y, - 'truth_position_z': lambda x: x.pose.position.z, - 'truth_rot_x': lambda x: quat_to_eulers(x.pose.orientation)[0] * 180 / pi, - 'truth_rot_y': lambda x: quat_to_eulers(x.pose.orientation)[1] * 180 / pi, - 'truth_rot_z': lambda x: quat_to_eulers(x.pose.orientation)[2] * 180 / pi} -truth_callbacks = VisualizerCallback('truth_time', truth_data) - -ml_pose_data = {'ml_pose_time': lambda x: time.time() - start_time, - 'ml_pose_position_x': lambda x: x.pose.position.x, - 'ml_pose_position_y': lambda x: x.pose.position.y, - 'ml_pose_position_z': lambda x: x.pose.position.z, - 'ml_pose_rot_x': lambda x: quat_to_eulers(x.pose.orientation)[0] * 180 / pi, - 'ml_pose_rot_y': lambda x: quat_to_eulers(x.pose.orientation)[1] * 180 / pi, - 'ml_pose_rot_z': lambda x: quat_to_eulers(x.pose.orientation)[2] * 180 / pi} -ml_pose_callbacks = VisualizerCallback('ml_pose_time', ml_pose_data) - -command_data = {'command_time': lambda x: time.time() - start_time, - 'command_status': lambda x: x.status, - 'command_control_mode': lambda x: x.control_mode, - 'command_force_x': lambda x: x.wrench.force.x, - 'command_force_y': lambda x: x.wrench.force.y, - 'command_force_z': lambda x: x.wrench.force.z, - 'command_torque_x': lambda x: x.wrench.torque.x, - 'command_torque_y': lambda x: x.wrench.torque.y, - 'command_torque_z': lambda x: x.wrench.torque.z, - 'command_pos_err_x': lambda x: x.position_error.x, - 'command_pos_err_y': lambda x: x.position_error.y, - 'command_pos_err_z': lambda x: x.position_error.z, - 'command_pos_err_int_x': lambda x: x.position_error_integrated.x, - 'command_pos_err_int_y': lambda x: x.position_error_integrated.y, - 'command_pos_err_int_z': lambda x: x.position_error_integrated.z, - 'command_att_err_x': lambda x: x.attitude_error.x, - 'command_att_err_y': lambda x: x.attitude_error.y, - 'command_att_err_z': lambda x: x.attitude_error.z, - 'command_att_err_int_x': lambda x: x.attitude_error_integrated.x, - 'command_att_err_int_y': lambda x: x.attitude_error_integrated.y, - 'command_att_err_int_z': lambda x: x.attitude_error_integrated.z} -command_callbacks = VisualizerCallback('command_time', command_data) - -traj_data = {'traj_time': lambda x: time.time() - start_time, - 'traj_position_x': lambda x: x.pose.position.x, - 'traj_position_y': lambda x: x.pose.position.y, - 'traj_position_z': lambda x: x.pose.position.z, - 'traj_rot_x': lambda x: quat_to_eulers(x.pose.orientation)[0] * 180 / pi, - 'traj_rot_y': lambda x: quat_to_eulers(x.pose.orientation)[1] * 180 / pi, - 'traj_rot_z': lambda x: quat_to_eulers(x.pose.orientation)[2] * 180 / pi} -traj_callbacks = VisualizerCallback('traj_time', traj_data) - -shaper_data = {'shaper_time': lambda x: time.time() - start_time, - 'shaper_position_x': lambda x: x.pose.position.x, - 'shaper_position_y': lambda x: x.pose.position.y, - 'shaper_position_z': lambda x: x.pose.position.z, - 'shaper_rot_x': lambda x: quat_to_eulers(x.pose.orientation)[0] * 180 / pi, - 'shaper_rot_y': lambda x: quat_to_eulers(x.pose.orientation)[1] * 180 / pi, - 'shaper_rot_z': lambda x: quat_to_eulers(x.pose.orientation)[2] * 180 / pi} -shaper_callbacks = VisualizerCallback('shaper_time', shaper_data) - -pmc_data = {'pmc_time': lambda x: time.time() - start_time, - 'pmc_1_motor_speed': lambda x: x.goals[0].motor_speed, - 'pmc_1_nozzle_1': lambda x: ord(x.goals[0].nozzle_positions[0]), - 'pmc_1_nozzle_2': lambda x: ord(x.goals[0].nozzle_positions[1]), - 'pmc_1_nozzle_3': lambda x: ord(x.goals[0].nozzle_positions[2]), - 'pmc_1_nozzle_4': lambda x: ord(x.goals[0].nozzle_positions[3]), - 'pmc_1_nozzle_5': lambda x: ord(x.goals[0].nozzle_positions[4]), - 'pmc_1_nozzle_6': lambda x: ord(x.goals[0].nozzle_positions[5]), - 'pmc_2_motor_speed': lambda x: x.goals[1].motor_speed, - 'pmc_2_nozzle_1': lambda x: ord(x.goals[1].nozzle_positions[0]), - 'pmc_2_nozzle_2': lambda x: ord(x.goals[1].nozzle_positions[1]), - 'pmc_2_nozzle_3': lambda x: ord(x.goals[1].nozzle_positions[2]), - 'pmc_2_nozzle_4': lambda x: ord(x.goals[1].nozzle_positions[3]), - 'pmc_2_nozzle_5': lambda x: ord(x.goals[1].nozzle_positions[4]), - 'pmc_2_nozzle_6': lambda x: ord(x.goals[1].nozzle_positions[5]) } -pmc_callbacks = VisualizerCallback('pmc_time', pmc_data) - -callbacks_list = [ekf_callbacks, ml_callbacks, of_callbacks, truth_callbacks, ml_pose_callbacks, command_callbacks, traj_callbacks, shaper_callbacks, pmc_callbacks] + +ml_data = { + "ml_time": lambda x: time.time() - start_time, + "ml_landmarks": lambda x: x.ml_count, + "ml_mahal_min": lambda x: min(mahal_filter(x.ml_mahal_dists)), + "ml_mahal_mean": lambda x: mean(mahal_filter(x.ml_mahal_dists)), + "ml_mahal_max": lambda x: max(mahal_filter(x.ml_mahal_dists)), +} +ml_callbacks = VisualizerCallback("ml_time", ml_data) + +of_data = { + "of_time": lambda x: time.time() - start_time, + "of_landmarks": lambda x: x.of_count, +} +of_callbacks = VisualizerCallback("of_time", of_data) + +truth_data = { + "truth_time": lambda x: time.time() - start_time, + "truth_position_x": lambda x: x.pose.position.x, + "truth_position_y": lambda x: x.pose.position.y, + "truth_position_z": lambda x: x.pose.position.z, + "truth_rot_x": lambda x: quat_to_eulers(x.pose.orientation)[0] * 180 / pi, + "truth_rot_y": lambda x: quat_to_eulers(x.pose.orientation)[1] * 180 / pi, + "truth_rot_z": lambda x: quat_to_eulers(x.pose.orientation)[2] * 180 / pi, +} +truth_callbacks = VisualizerCallback("truth_time", truth_data) + +ml_pose_data = { + "ml_pose_time": lambda x: time.time() - start_time, + "ml_pose_position_x": lambda x: x.pose.position.x, + "ml_pose_position_y": lambda x: x.pose.position.y, + "ml_pose_position_z": lambda x: x.pose.position.z, + "ml_pose_rot_x": lambda x: quat_to_eulers(x.pose.orientation)[0] * 180 / pi, + "ml_pose_rot_y": lambda x: quat_to_eulers(x.pose.orientation)[1] * 180 / pi, + "ml_pose_rot_z": lambda x: quat_to_eulers(x.pose.orientation)[2] * 180 / pi, +} +ml_pose_callbacks = VisualizerCallback("ml_pose_time", ml_pose_data) + +command_data = { + "command_time": lambda x: time.time() - start_time, + "command_status": lambda x: x.status, + "command_control_mode": lambda x: x.control_mode, + "command_force_x": lambda x: x.wrench.force.x, + "command_force_y": lambda x: x.wrench.force.y, + "command_force_z": lambda x: x.wrench.force.z, + "command_torque_x": lambda x: x.wrench.torque.x, + "command_torque_y": lambda x: x.wrench.torque.y, + "command_torque_z": lambda x: x.wrench.torque.z, + "command_pos_err_x": lambda x: x.position_error.x, + "command_pos_err_y": lambda x: x.position_error.y, + "command_pos_err_z": lambda x: x.position_error.z, + "command_pos_err_int_x": lambda x: x.position_error_integrated.x, + "command_pos_err_int_y": lambda x: x.position_error_integrated.y, + "command_pos_err_int_z": lambda x: x.position_error_integrated.z, + "command_att_err_x": lambda x: x.attitude_error.x, + "command_att_err_y": lambda x: x.attitude_error.y, + "command_att_err_z": lambda x: x.attitude_error.z, + "command_att_err_int_x": lambda x: x.attitude_error_integrated.x, + "command_att_err_int_y": lambda x: x.attitude_error_integrated.y, + "command_att_err_int_z": lambda x: x.attitude_error_integrated.z, +} +command_callbacks = VisualizerCallback("command_time", command_data) + +traj_data = { + "traj_time": lambda x: time.time() - start_time, + "traj_position_x": lambda x: x.pose.position.x, + "traj_position_y": lambda x: x.pose.position.y, + "traj_position_z": lambda x: x.pose.position.z, + "traj_rot_x": lambda x: quat_to_eulers(x.pose.orientation)[0] * 180 / pi, + "traj_rot_y": lambda x: quat_to_eulers(x.pose.orientation)[1] * 180 / pi, + "traj_rot_z": lambda x: quat_to_eulers(x.pose.orientation)[2] * 180 / pi, +} +traj_callbacks = VisualizerCallback("traj_time", traj_data) + +shaper_data = { + "shaper_time": lambda x: time.time() - start_time, + "shaper_position_x": lambda x: x.pose.position.x, + "shaper_position_y": lambda x: x.pose.position.y, + "shaper_position_z": lambda x: x.pose.position.z, + "shaper_rot_x": lambda x: quat_to_eulers(x.pose.orientation)[0] * 180 / pi, + "shaper_rot_y": lambda x: quat_to_eulers(x.pose.orientation)[1] * 180 / pi, + "shaper_rot_z": lambda x: quat_to_eulers(x.pose.orientation)[2] * 180 / pi, +} +shaper_callbacks = VisualizerCallback("shaper_time", shaper_data) + +pmc_data = { + "pmc_time": lambda x: time.time() - start_time, + "pmc_1_motor_speed": lambda x: x.goals[0].motor_speed, + "pmc_1_nozzle_1": lambda x: ord(x.goals[0].nozzle_positions[0]), + "pmc_1_nozzle_2": lambda x: ord(x.goals[0].nozzle_positions[1]), + "pmc_1_nozzle_3": lambda x: ord(x.goals[0].nozzle_positions[2]), + "pmc_1_nozzle_4": lambda x: ord(x.goals[0].nozzle_positions[3]), + "pmc_1_nozzle_5": lambda x: ord(x.goals[0].nozzle_positions[4]), + "pmc_1_nozzle_6": lambda x: ord(x.goals[0].nozzle_positions[5]), + "pmc_2_motor_speed": lambda x: x.goals[1].motor_speed, + "pmc_2_nozzle_1": lambda x: ord(x.goals[1].nozzle_positions[0]), + "pmc_2_nozzle_2": lambda x: ord(x.goals[1].nozzle_positions[1]), + "pmc_2_nozzle_3": lambda x: ord(x.goals[1].nozzle_positions[2]), + "pmc_2_nozzle_4": lambda x: ord(x.goals[1].nozzle_positions[3]), + "pmc_2_nozzle_5": lambda x: ord(x.goals[1].nozzle_positions[4]), + "pmc_2_nozzle_6": lambda x: ord(x.goals[1].nozzle_positions[5]), +} +pmc_callbacks = VisualizerCallback("pmc_time", pmc_data) + +callbacks_list = [ + ekf_callbacks, + ml_callbacks, + of_callbacks, + truth_callbacks, + ml_pose_callbacks, + command_callbacks, + traj_callbacks, + shaper_callbacks, + pmc_callbacks, +] + class TerminalView(QtGui.QGraphicsTextItem): def __init__(self, graphics_view): @@ -210,16 +261,19 @@ def __init__(self, graphics_view): self.setZValue(1) self.setPos(10, 10) self.setDefaultTextColor(QtGui.QColor(255, 255, 255)) + def boundingRect(self): r = super(TerminalView, self).boundingRect() r.setWidth(self.graphics_view.width() - 50) r.setHeight(max(self.graphics_view.height() - 40, r.height())) return r + def paint(self, painter, o, w): painter.setBrush(QtGui.QColor(0, 0, 0, 150)) painter.drawRect(self.boundingRect()) super(TerminalView, self).paint(painter, o, w) + class TerminalGraphicsView(QtGui.QGraphicsView): def __init__(self, parent): super(TerminalGraphicsView, self).__init__() @@ -227,25 +281,29 @@ def __init__(self, parent): self.setScene(QtGui.QGraphicsScene()) self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) - self.setAttribute(QtCore.Qt.WA_TranslucentBackground); - self.viewport().setAutoFillBackground(False); - self.setStyleSheet("border: none"); + self.setAttribute(QtCore.Qt.WA_TranslucentBackground) + self.viewport().setAutoFillBackground(False) + self.setStyleSheet("border: none") self.setInteractive(False) + def resize(self, event): self.move(20, 20) self.setFixedSize(event.size().width() - 40, event.size().height() - 40) + class ParentGraphicsView(pg.GraphicsView): def __init__(self): super(ParentGraphicsView, self).__init__() self.setAntialiasing(False) + def resizeEvent(self, event): super(ParentGraphicsView, self).resizeEvent(event) if event != None: self.emit(QtCore.SIGNAL("resize"), event) + class Visualizer(QtGui.QMainWindow): - def __init__(self, launch_command = None, plan = None, com_method = com.DDS_COM): + def __init__(self, launch_command=None, plan=None, com_method=com.DDS_COM): super(Visualizer, self).__init__() self.com_method = com_method self.launch_command = launch_command @@ -262,14 +320,22 @@ def __init__(self, launch_command = None, plan = None, com_method = com.DDS_COM) for k in d.callbacks.keys(): self.data[k] = np.full(ARRAY_SIZE, 1e-10) - self.columns = [[plot_types.CtlPosPlot, plot_types.PositionPlot, plot_types.FeatureCountPlot, plot_types.ConfidencePlot, \ - plot_types.CommandStatusPlot], [plot_types.CtlRotPlot, plot_types.OrientationPlot, plot_types.CovPlot]] + self.columns = [ + [ + plot_types.CtlPosPlot, + plot_types.PositionPlot, + plot_types.FeatureCountPlot, + plot_types.ConfidencePlot, + plot_types.CommandStatusPlot, + ], + [plot_types.CtlRotPlot, plot_types.OrientationPlot, plot_types.CovPlot], + ] self.graphics_view = ParentGraphicsView() self.terminal_graphics_view = TerminalGraphicsView(self.graphics_view) self.terminal_view = TerminalView(self.terminal_graphics_view) - self.layout = pg.GraphicsLayout(border=(100,100,100)) + self.layout = pg.GraphicsLayout(border=(100, 100, 100)) self.layout.setBorder(None) self.layout.setZValue(-1) self.graphics_view.setCentralItem(self.layout) @@ -285,7 +351,7 @@ def __init__(self, launch_command = None, plan = None, com_method = com.DDS_COM) l.setBorder(pg.mkColor(150, 150, 150)) for r in range(len(self.columns[c])): t = self.columns[c][r] - new_item = t() # it gets added to l in constructor + new_item = t() # it gets added to l in constructor l.addItem(new_item) if r != len(self.columns[c]) - 1: l.nextRow() @@ -293,7 +359,7 @@ def __init__(self, launch_command = None, plan = None, com_method = com.DDS_COM) new_item.show_x_axis(True) self.layout.nextColumn() - self.setWindowTitle('GNC Visualizer') + self.setWindowTitle("GNC Visualizer") self.settings = QtCore.QSettings("NASA", "gnc_visualizer") self.restoreGeometry(self.settings.value("geometry", "").toByteArray()) @@ -317,16 +383,21 @@ def closeEvent(self, event): def startProcess(self): if self.launch_command != None: - self.print_to_log('Starting process: %s' % (self.launch_command), '#FFB266') - self.proc = subprocess.Popen(self.launch_command, preexec_fn=os.setsid, shell=True, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + self.print_to_log("Starting process: %s" % (self.launch_command), "#FFB266") + self.proc = subprocess.Popen( + self.launch_command, + preexec_fn=os.setsid, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) self.pmc_enabled = True def stopProcess(self): if self.proc != None: - self.print_to_log('Stopping process.', '#FFB266') + self.print_to_log("Stopping process.", "#FFB266") os.killpg(os.getpgid(self.proc.pid), signal.SIGINT) - #(output, error) = v.proc.communicate() + # (output, error) = v.proc.communicate() self.proc.wait() self.proc = None @@ -343,7 +414,8 @@ def eventFilter(self, source, event): else: rect = QtCore.QRect( self.menuBar().mapToGlobal(QtCore.QPoint(0, 0)), - self.menuBar().size()) + self.menuBar().size(), + ) if not rect.contains(event.globalPos()): self.menuBar().hide() elif event.type() == QtCore.QEvent.Leave and source is self: @@ -394,10 +466,10 @@ def add_plot(self, plot, col): self.columns[col].append(plot) column = self.layout.getItem(0, col) if len(self.columns[col]) > 1: - column.getItem(len(self.columns[col])-2, 0).show_x_axis(False) - column.nextRow() + column.getItem(len(self.columns[col]) - 2, 0).show_x_axis(False) + column.nextRow() column.addItem(plot()) - column.getItem(len(self.columns[col])-1, 0).show_x_axis(True) + column.getItem(len(self.columns[col]) - 1, 0).show_x_axis(True) self.create_menu() def generate_add_plot_menu(self, column, base, plot_types): @@ -410,36 +482,41 @@ def generate_add_plot_menu(self, column, base, plot_types): def create_menu(self): self.menuBar().clear() - viewmenu = self.menuBar().addMenu('Visualizer') - viewmenu.addAction('Pause / Resume (Space)', self.toggle_paused) - viewmenu.addAction('Start / Stop Process (s)', self.start_stop_process) - viewmenu.addAction('Show / Hide Log (l)', self.toggle_log_shown) - viewmenu.addAction('Toggle Full Screen (f)', self.toggle_full_screen) - viewmenu.addAction('Quit (q)', self.quit) - commandmenu = self.menuBar().addMenu('Commands') - commandmenu.addAction('Reset EKF (r)', self.reset_ekf) - commandmenu.addAction('Initialize Bias (b)', self.initialize_bias) - commandmenu.addAction('Choose Plan (c)', self.choose_plan) - commandmenu.addAction('Run Plan (p)', self.run_plan) - commandmenu.addAction('Undock (u)', self.undock) - commandmenu.addAction('Enable / Disable PMC (m)', self.toggle_pmc) + viewmenu = self.menuBar().addMenu("Visualizer") + viewmenu.addAction("Pause / Resume (Space)", self.toggle_paused) + viewmenu.addAction("Start / Stop Process (s)", self.start_stop_process) + viewmenu.addAction("Show / Hide Log (l)", self.toggle_log_shown) + viewmenu.addAction("Toggle Full Screen (f)", self.toggle_full_screen) + viewmenu.addAction("Quit (q)", self.quit) + commandmenu = self.menuBar().addMenu("Commands") + commandmenu.addAction("Reset EKF (r)", self.reset_ekf) + commandmenu.addAction("Initialize Bias (b)", self.initialize_bias) + commandmenu.addAction("Choose Plan (c)", self.choose_plan) + commandmenu.addAction("Run Plan (p)", self.run_plan) + commandmenu.addAction("Undock (u)", self.undock) + commandmenu.addAction("Enable / Disable PMC (m)", self.toggle_pmc) for i in range(len(self.columns) + 1): colmenu = self.menuBar().addMenu("Column " + str(i + 1)) if i < len(self.columns): col = self.columns[i] for j in range(len(col)): - colmenu.addAction(plot_types.plot_display_names[col[j]], functools.partial(self.delete_plot, i, j)) + colmenu.addAction( + plot_types.plot_display_names[col[j]], + functools.partial(self.delete_plot, i, j), + ) colmenu.addSeparator() add_menu = colmenu.addMenu("Add Plot") self.generate_add_plot_menu(i, add_menu, plot_types.plot_types) colmenu.addSeparator() if i < len(self.columns): - colmenu.addAction("Delete Column", functools.partial(self.delete_column, i)) + colmenu.addAction( + "Delete Column", functools.partial(self.delete_column, i) + ) if self.com_method == com.DDS_COM: # Disable menu actions for commands for action in self.menuBar().actions(): - if (action.menu() and action.text() == 'Commands'): + if action.menu() and action.text() == "Commands": for act in action.menu().actions(): act.setEnabled(False) @@ -474,18 +551,30 @@ def start_stop_process(self): def run_plan(self): if self.plan != None: - ret = os.system(ASTROBEE_ROOT + '/scripts/teleop/run_plan.sh ' + self.plan + ' > /dev/null &') + ret = os.system( + ASTROBEE_ROOT + + "/scripts/teleop/run_plan.sh " + + self.plan + + " > /dev/null &" + ) else: - self.print_to_log('No plan file specified.', '#FF0000') + self.print_to_log("No plan file specified.", "#FF0000") def undock(self): - ret = os.system('bash ' + ASTROBEE_ROOT + '/scripts/teleop/run_undock.sh > /dev/null &') + ret = os.system( + "bash " + ASTROBEE_ROOT + "/scripts/teleop/run_undock.sh > /dev/null &" + ) def toggle_pmc(self): self.pmc_enabled = com_manager.toggle_pmc(self.pmc_enabled) def choose_plan(self): - result = QtGui.QFileDialog.getOpenFileName(self, 'Open Plan', ASTROBEE_ROOT + '/gnc/matlab/scenarios', 'Plans (*.fplan)') + result = QtGui.QFileDialog.getOpenFileName( + self, + "Open Plan", + ASTROBEE_ROOT + "/gnc/matlab/scenarios", + "Plans (*.fplan)", + ) if result != None: self.plan = str(result) @@ -517,8 +606,11 @@ def keyPressEvent(self, event): def remove_old_data(self): t = time.time() - start_time for c in callbacks_list: - while self.data_sizes[c.time_key] > 0 and \ - t - self.data[c.time_key][self.data_sizes[c.time_key] - 1] > plot_types.DISPLAY_TIME + 2: + while ( + self.data_sizes[c.time_key] > 0 + and t - self.data[c.time_key][self.data_sizes[c.time_key] - 1] + > plot_types.DISPLAY_TIME + 2 + ): self.data_sizes[c.time_key] -= 1 def tick(self): @@ -545,10 +637,10 @@ def tick(self): if ret != None: (out, err) = self.proc.communicate() self.proc = None - ansi_escape = re.compile(r'\x1b[^m]*m') + ansi_escape = re.compile(r"\x1b[^m]*m") for line in err.splitlines(): - self.print_to_log(ansi_escape.sub('', line), '#FF0000') - self.print_to_log('Process terminated. Press \'s\' to re-run.', '#FFFFFF') + self.print_to_log(ansi_escape.sub("", line), "#FF0000") + self.print_to_log("Process terminated. Press 's' to re-run.", "#FFFFFF") if self.log_updated: self.terminal_view.setHtml(self.log_text) self.log_updated = False @@ -560,7 +652,9 @@ def tick(self): self.terminal_graphics_view.ensureVisible(r) def add_data(self, callbacks, data): - self.data_sizes[callbacks.time_key] = min(self.data_sizes[callbacks.time_key] + 1, ARRAY_SIZE) + self.data_sizes[callbacks.time_key] = min( + self.data_sizes[callbacks.time_key] + 1, ARRAY_SIZE + ) for key in callbacks.callbacks: self.data[key] = np.roll(self.data[key], 1) self.data[key][0] = callbacks.callbacks[key](data) @@ -596,52 +690,104 @@ def print_to_log(self, text, color): self.log_lines += 1 self.log_updated = True if self.log_lines > self.max_log_length: - self.log_text = self.log_text[self.log_text.find('
') + 1:] + self.log_text = self.log_text[self.log_text.find("
") + 1 :] def log_callback(self, data): - if data.name in ['/recorder'] or data.msg.startswith('waitForService') or \ - data.msg.startswith('Initializing nodelet with') or data.msg.startswith('Loading nodelet'): - return - log_colors = {1: '#C0C0C0', 2: '#FFFFFF', 4: '#FFB266', 8: '#FF6666', 16: '#FF0000'} + if ( + data.name in ["/recorder"] + or data.msg.startswith("waitForService") + or data.msg.startswith("Initializing nodelet with") + or data.msg.startswith("Loading nodelet") + ): + return + log_colors = { + 1: "#C0C0C0", + 2: "#FFFFFF", + 4: "#FFB266", + 8: "#FF6666", + 16: "#FF0000", + } self.print_to_log(data.msg, log_colors[data.level]) + def sigint_handler(*args): QtGui.QApplication.quit() + def main(): com_method = com.DDS_COM - parser = argparse.ArgumentParser(description='Gnc visualization.') + parser = argparse.ArgumentParser(description="Gnc visualization.") # ROS arguments only - parser.add_argument('--gantry', dest='launch_command', action='append_const', - const='roslaunch astrobee proto4c.launch disable_fans:=true', - help='Launch proto4.') - parser.add_argument('--granite', dest='launch_command', action='append_const', - const='roslaunch astrobee granite.launch', - help='Launch proto4.') - parser.add_argument('--bag', dest='launch_command', action='append_const', - const='roslaunch astrobee play_bag.launch', - help='Launch play_bag.launch.') - parser.add_argument('--sim', dest='launch_command', action='append_const', - const='roslaunch astrobee sim.launch', - help='Launch simulator.launch.') - parser.add_argument('--plan', dest='plan', action='store', help='The plan to execute.') - parser.add_argument('--disable_pmcs', dest='disable_pmcs', action='store_true', help='Disable the pmcs.') + parser.add_argument( + "--gantry", + dest="launch_command", + action="append_const", + const="roslaunch astrobee proto4c.launch disable_fans:=true", + help="Launch proto4.", + ) + parser.add_argument( + "--granite", + dest="launch_command", + action="append_const", + const="roslaunch astrobee granite.launch", + help="Launch proto4.", + ) + parser.add_argument( + "--bag", + dest="launch_command", + action="append_const", + const="roslaunch astrobee play_bag.launch", + help="Launch play_bag.launch.", + ) + parser.add_argument( + "--sim", + dest="launch_command", + action="append_const", + const="roslaunch astrobee sim.launch", + help="Launch simulator.launch.", + ) + parser.add_argument( + "--plan", dest="plan", action="store", help="The plan to execute." + ) + parser.add_argument( + "--disable_pmcs", + dest="disable_pmcs", + action="store_true", + help="Disable the pmcs.", + ) # General argument - parser.add_argument('--comm', dest='com_method', action='store', - help='Communication method to the robot: dds or ros') + parser.add_argument( + "--comm", + dest="com_method", + action="store", + help="Communication method to the robot: dds or ros", + ) # DDS arguments only - parser.add_argument('--use_ip', dest='use_ip', action='store', - help='Type an IP to be treated as initial peer for DDS.') - parser.add_argument('--robot_name', dest='robot_name', action='store', - help='Type the name of the robot you want to hear telemetry of over DDS.') - parser.add_argument('--domain', dest='domain', action='store', - help='DDS Domain ID. Default 37 ') - parser.add_argument('--public_ip', dest='public_ip', action='store', - help='TReK IP. Only use if needed') + parser.add_argument( + "--use_ip", + dest="use_ip", + action="store", + help="Type an IP to be treated as initial peer for DDS.", + ) + parser.add_argument( + "--robot_name", + dest="robot_name", + action="store", + help="Type the name of the robot you want to hear telemetry of over DDS.", + ) + parser.add_argument( + "--domain", dest="domain", action="store", help="DDS Domain ID. Default 37 " + ) + parser.add_argument( + "--public_ip", + dest="public_ip", + action="store", + help="TReK IP. Only use if needed", + ) args, unknown = parser.parse_known_args() @@ -651,44 +797,55 @@ def main(): if args.com_method in (com.DDS_COM, com.ROS_COM): com_method = args.com_method else: - print >> sys.stderr, 'Invalid communication method. Must be dds or ros' + print >>sys.stderr, "Invalid communication method. Must be dds or ros" return launch_command = None # Exclude ROS arguments when using DDS communication - if com_method == com.DDS_COM and ( args.launch_command != None - or args.disable_pmcs or args.plan != None): - print >> sys.stderr, ( '\n###\n' + - '\nAdditional arguments (--gantry --granite --bag --sim --plan --disable_pmcs) ' + - 'will not be processed when using DDS mode. You may use "--comm ros" or do not include this ' + - 'argument at all in order to use additional arguments.\n\n###\n') + if com_method == com.DDS_COM and ( + args.launch_command != None or args.disable_pmcs or args.plan != None + ): + print >>sys.stderr, ( + "\n###\n" + + "\nAdditional arguments (--gantry --granite --bag --sim --plan --disable_pmcs) " + + 'will not be processed when using DDS mode. You may use "--comm ros" or do not include this ' + + "argument at all in order to use additional arguments.\n\n###\n" + ) return # Exclude DDS commands when using ROS communication - elif com_method == com.ROS_COM and ( args.use_ip != None - or args.robot_name != None or args.public_ip != None - or args.domain != None): - print >> sys.stderr, ( '\n###\n' + - '\nAdditional arguments (--use_ip --robot_name --public_ip) ' + - 'will not be processed when using ROS mode. You may include "--comm dds" ' + - 'argument in order to use these additional arguments.\n\n###\n') + elif com_method == com.ROS_COM and ( + args.use_ip != None + or args.robot_name != None + or args.public_ip != None + or args.domain != None + ): + print >>sys.stderr, ( + "\n###\n" + + "\nAdditional arguments (--use_ip --robot_name --public_ip) " + + 'will not be processed when using ROS mode. You may include "--comm dds" ' + + "argument in order to use these additional arguments.\n\n###\n" + ) return else: if args.launch_command == None: args.launch_command = [] if len(args.launch_command) > 1: - print >> sys.stderr, 'Can only specify one launch command.' + print >>sys.stderr, "Can only specify one launch command." return if len(args.launch_command) == 1: launch_command = args.launch_command[0] if args.disable_pmcs: if launch_command != None: - launch_command += ' disable_fans:=true' - + launch_command += " disable_fans:=true" # Setting communication method - dds_args = dict(partition_name=args.robot_name, given_peer=args.use_ip, - domain=args.domain, public_ip=args.public_ip) + dds_args = dict( + partition_name=args.robot_name, + given_peer=args.use_ip, + domain=args.domain, + public_ip=args.public_ip, + ) if not com_manager.set_com_method(com_method, dds_args): return @@ -714,5 +871,6 @@ def main(): if com_manager.current_com_method == com.DDS_COM: com_manager.config.destroy_dom() -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/tools/graph_bag/scripts/average_results.py b/tools/graph_bag/scripts/average_results.py index 772b789252..644cfcc6aa 100755 --- a/tools/graph_bag/scripts/average_results.py +++ b/tools/graph_bag/scripts/average_results.py @@ -18,42 +18,48 @@ # under the License. import argparse -import pandas as pd import os +import pandas as pd import utilities def combined_results(csv_files): - dataframes = [pd.read_csv(file) for file in csv_files] - if not dataframes: - print('Failed to create dataframes') - exit() - names = dataframes[0].iloc[:, 0] - combined_dataframes = pd.DataFrame(None, None, names) - for dataframe in dataframes: - trimmed_dataframe = pd.DataFrame(dataframe.transpose().values[1:2], columns=names) - combined_dataframes = combined_dataframes.append(trimmed_dataframe, ignore_index=True) - return combined_dataframes + dataframes = [pd.read_csv(file) for file in csv_files] + if not dataframes: + print("Failed to create dataframes") + exit() + names = dataframes[0].iloc[:, 0] + combined_dataframes = pd.DataFrame(None, None, names) + for dataframe in dataframes: + trimmed_dataframe = pd.DataFrame( + dataframe.transpose().values[1:2], columns=names + ) + combined_dataframes = combined_dataframes.append( + trimmed_dataframe, ignore_index=True + ) + return combined_dataframes def average_results(directory, csv_files): - combined_dataframes = combined_results(csv_files) - names = combined_dataframes.columns - mean_dataframe = pd.DataFrame() - for name in names: - mean_dataframe[name] = [combined_dataframes[name].mean()] - averaged_results_file = os.path.join(directory, 'averaged_results.csv') - mean_dataframe.to_csv(averaged_results_file, index=False) + combined_dataframes = combined_results(csv_files) + names = combined_dataframes.columns + mean_dataframe = pd.DataFrame() + for name in names: + mean_dataframe[name] = [combined_dataframes[name].mean()] + averaged_results_file = os.path.join(directory, "averaged_results.csv") + mean_dataframe.to_csv(averaged_results_file, index=False) # Averages results from all *stats.csv files in a directory (including subdirectories). -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('directory', help='Full path to directory where results files are.') - args = parser.parse_args() - results_csv_files = utilities.get_files(args.directory, '*stats.csv') - if not results_csv_files: - print('Failed to find stats.csv files') - exit() - average_results(args.directory, results_csv_files) +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "directory", help="Full path to directory where results files are." + ) + args = parser.parse_args() + results_csv_files = utilities.get_files(args.directory, "*stats.csv") + if not results_csv_files: + print("Failed to find stats.csv files") + exit() + average_results(args.directory, results_csv_files) diff --git a/tools/graph_bag/scripts/bag_and_parameter_sweep.py b/tools/graph_bag/scripts/bag_and_parameter_sweep.py index d38b64bcab..b165b6d47f 100755 --- a/tools/graph_bag/scripts/bag_and_parameter_sweep.py +++ b/tools/graph_bag/scripts/bag_and_parameter_sweep.py @@ -17,89 +17,110 @@ # License for the specific language governing permissions and limitations # under the License. -import bag_sweep -import parameter_sweep -import plot_parameter_sweep_results -import utilities - import argparse -import pandas as pd - import os import shutil import sys +import bag_sweep +import pandas as pd +import parameter_sweep +import plot_parameter_sweep_results +import utilities + def save_ranges(param_range_directory, output_directory): - individual_ranges_file = os.path.join(param_range_directory, 'individual_value_ranges.csv') - copied_individual_ranges_file = os.path.join(output_directory, 'individual_value_ranges.csv') - shutil.copy(individual_ranges_file, copied_individual_ranges_file) - all_values_file = os.path.join(param_range_directory, 'all_value_combos.csv') - copied_all_values_file = os.path.join(output_directory, 'all_value_combos.csv') - shutil.copy(all_values_file, copied_all_values_file) + individual_ranges_file = os.path.join( + param_range_directory, "individual_value_ranges.csv" + ) + copied_individual_ranges_file = os.path.join( + output_directory, "individual_value_ranges.csv" + ) + shutil.copy(individual_ranges_file, copied_individual_ranges_file) + all_values_file = os.path.join(param_range_directory, "all_value_combos.csv") + copied_all_values_file = os.path.join(output_directory, "all_value_combos.csv") + shutil.copy(all_values_file, copied_all_values_file) def add_job_id_mean_row(job_id, dataframes, mean_dataframe, names): - combined_dataframe = pd.DataFrame(None, None, names) - for dataframe in dataframes: - trimmed_dataframe = pd.DataFrame(dataframe.values[job_id:job_id + 1], columns=names) - combined_dataframe = combined_dataframe.append(trimmed_dataframe, ignore_index=True) - job_id_mean_dataframe = pd.DataFrame(None, None, names) - for name in names: - job_id_mean_dataframe[name] = [combined_dataframe[name].mean()] - return mean_dataframe.append(job_id_mean_dataframe, ignore_index=True) + combined_dataframe = pd.DataFrame(None, None, names) + for dataframe in dataframes: + trimmed_dataframe = pd.DataFrame( + dataframe.values[job_id : job_id + 1], columns=names + ) + combined_dataframe = combined_dataframe.append( + trimmed_dataframe, ignore_index=True + ) + job_id_mean_dataframe = pd.DataFrame(None, None, names) + for name in names: + job_id_mean_dataframe[name] = [combined_dataframe[name].mean()] + return mean_dataframe.append(job_id_mean_dataframe, ignore_index=True) def average_parameter_sweep_results(combined_results_csv_files, directory): - dataframes = [pd.read_csv(file) for file in combined_results_csv_files] - if not dataframes: - print('Failed to create dataframes') - exit() - names = dataframes[0].columns - mean_dataframe = pd.DataFrame(None, None, names) - jobs = dataframes[0].shape[0] - # Save job id mean in order, so nth row coressponds to nth job id in final mean dataframe - for job_id in range(jobs): - mean_dataframe = add_job_id_mean_row(job_id, dataframes, mean_dataframe, names) - mean_dataframe_file = os.path.join(directory, 'bag_and_param_sweep_stats.csv') - mean_dataframe.to_csv(mean_dataframe_file, index=False) + dataframes = [pd.read_csv(file) for file in combined_results_csv_files] + if not dataframes: + print("Failed to create dataframes") + exit() + names = dataframes[0].columns + mean_dataframe = pd.DataFrame(None, None, names) + jobs = dataframes[0].shape[0] + # Save job id mean in order, so nth row coressponds to nth job id in final mean dataframe + for job_id in range(jobs): + mean_dataframe = add_job_id_mean_row(job_id, dataframes, mean_dataframe, names) + mean_dataframe_file = os.path.join(directory, "bag_and_param_sweep_stats.csv") + mean_dataframe.to_csv(mean_dataframe_file, index=False) def bag_and_parameter_sweep(graph_bag_params_list, output_dir): - combined_results_csv_files = [] - param_range_directory = None - for graph_bag_params in graph_bag_params_list: - # Save parameter sweep output in different directory for each bagfile, name directory using bagfile - bag_name_prefix = os.path.splitext(os.path.basename(graph_bag_params.bagfile))[0] - bag_output_dir = os.path.join(output_dir, bag_name_prefix) - param_range_directory_for_bag = parameter_sweep.make_values_and_parameter_sweep( - bag_output_dir, graph_bag_params.bagfile, graph_bag_params.map_file, graph_bag_params.image_topic, - graph_bag_params.config_path, graph_bag_params.robot_config_file, graph_bag_params.world, - graph_bag_params.use_image_features, graph_bag_params.groundtruth_bagfile, graph_bag_params.rmse_rel_start_time, - graph_bag_params.rmse_rel_end_time) - if not param_range_directory: - param_range_directory = param_range_directory_for_bag - combined_results_csv_files.append(os.path.join(bag_output_dir, 'param_sweep_combined_results.csv')) - average_parameter_sweep_results(combined_results_csv_files, output_dir) - save_ranges(param_range_directory, output_dir) + combined_results_csv_files = [] + param_range_directory = None + for graph_bag_params in graph_bag_params_list: + # Save parameter sweep output in different directory for each bagfile, name directory using bagfile + bag_name_prefix = os.path.splitext(os.path.basename(graph_bag_params.bagfile))[ + 0 + ] + bag_output_dir = os.path.join(output_dir, bag_name_prefix) + param_range_directory_for_bag = parameter_sweep.make_values_and_parameter_sweep( + bag_output_dir, + graph_bag_params.bagfile, + graph_bag_params.map_file, + graph_bag_params.image_topic, + graph_bag_params.config_path, + graph_bag_params.robot_config_file, + graph_bag_params.world, + graph_bag_params.use_image_features, + graph_bag_params.groundtruth_bagfile, + graph_bag_params.rmse_rel_start_time, + graph_bag_params.rmse_rel_end_time, + ) + if not param_range_directory: + param_range_directory = param_range_directory_for_bag + combined_results_csv_files.append( + os.path.join(bag_output_dir, "param_sweep_combined_results.csv") + ) + average_parameter_sweep_results(combined_results_csv_files, output_dir) + save_ranges(param_range_directory, output_dir) -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('config_file') - parser.add_argument('output_dir') - args = parser.parse_args() - if not os.path.isfile(args.config_file): - print('Config file ' + args.config_file + ' does not exist.') - sys.exit() - if os.path.isdir(args.output_dir): - print('Output directory ' + args.output_dir + ' already exists.') - sys.exit() - output_dir = utilities.create_directory(args.output_dir) +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("config_file") + parser.add_argument("output_dir") + args = parser.parse_args() + if not os.path.isfile(args.config_file): + print(("Config file " + args.config_file + " does not exist.")) + sys.exit() + if os.path.isdir(args.output_dir): + print(("Output directory " + args.output_dir + " already exists.")) + sys.exit() + output_dir = utilities.create_directory(args.output_dir) - graph_bag_params_list = bag_sweep.load_params(args.config_file) - bag_and_parameter_sweep(graph_bag_params_list, output_dir) - combined_results_file = os.path.join(output_dir, 'bag_and_param_sweep_stats.csv') - value_combos_file = os.path.join(output_dir, 'all_value_combos.csv') - results_pdf_file = os.path.join(output_dir, 'bag_and_param_sweep_results.pdf') - plot_parameter_sweep_results.create_plots(results_pdf_file, combined_results_file, value_combos_file) + graph_bag_params_list = bag_sweep.load_params(args.config_file) + bag_and_parameter_sweep(graph_bag_params_list, output_dir) + combined_results_file = os.path.join(output_dir, "bag_and_param_sweep_stats.csv") + value_combos_file = os.path.join(output_dir, "all_value_combos.csv") + results_pdf_file = os.path.join(output_dir, "bag_and_param_sweep_results.pdf") + plot_parameter_sweep_results.create_plots( + results_pdf_file, combined_results_file, value_combos_file + ) diff --git a/tools/graph_bag/scripts/bag_sweep.py b/tools/graph_bag/scripts/bag_sweep.py index 1350c1d392..3563f558da 100644 --- a/tools/graph_bag/scripts/bag_sweep.py +++ b/tools/graph_bag/scripts/bag_sweep.py @@ -17,70 +17,92 @@ # License for the specific language governing permissions and limitations # under the License. -import average_results -import multiprocessing_helpers - import csv import itertools import multiprocessing import os -import plot_bag_sweep_results import sys +import average_results +import multiprocessing_helpers +import plot_bag_sweep_results + class GraphBagParams(object): - - def __init__(self, bagfile, map_file, image_topic, config_path, robot_config_file, world, use_image_features, - groundtruth_bagfile, rmse_rel_start_time, rmse_rel_end_time): - self.bagfile = bagfile - self.map_file = map_file - self.image_topic = image_topic - self.config_path = config_path - self.robot_config_file = robot_config_file - self.world = world - self.use_image_features = use_image_features - self.groundtruth_bagfile = groundtruth_bagfile - self.rmse_rel_start_time = rmse_rel_start_time - self.rmse_rel_end_time = rmse_rel_end_time + def __init__( + self, + bagfile, + map_file, + image_topic, + config_path, + robot_config_file, + world, + use_image_features, + groundtruth_bagfile, + rmse_rel_start_time, + rmse_rel_end_time, + ): + self.bagfile = bagfile + self.map_file = map_file + self.image_topic = image_topic + self.config_path = config_path + self.robot_config_file = robot_config_file + self.world = world + self.use_image_features = use_image_features + self.groundtruth_bagfile = groundtruth_bagfile + self.rmse_rel_start_time = rmse_rel_start_time + self.rmse_rel_end_time = rmse_rel_end_time def load_params(param_file): - graph_bag_params_list = [] - with open(param_file) as param_csvfile: - reader = csv.reader(param_csvfile, delimiter=' ') - for row in reader: - graph_bag_params_list.append( - GraphBagParams(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9])) - - return graph_bag_params_list + graph_bag_params_list = [] + with open(param_file) as param_csvfile: + reader = csv.reader(param_csvfile, delimiter=" ") + for row in reader: + graph_bag_params_list.append( + GraphBagParams( + row[0], + row[1], + row[2], + row[3], + row[4], + row[5], + row[6], + row[7], + row[8], + row[9], + ) + ) + + return graph_bag_params_list def combine_results_in_csv_file(graph_bag_params, output_dir): - # Don't save this as *stats.csv otherwise it will be including when averaging bag results in average_results.py - combined_results_csv_file = os.path.join(output_dir, 'bag_sweep_stats_combined.csv') - output_csv_files = [] - bag_names = [] - for params in graph_bag_params: - bag_name = os.path.splitext(os.path.basename(params.bagfile))[0] - bag_names.append(bag_name) - output_csv_files.append(os.path.join(output_dir, bag_name + '_stats.csv')) - combined_dataframe = average_results.combined_results(output_csv_files) - combined_dataframe.insert(0, 'Bag', bag_names) - combined_dataframe.to_csv(combined_results_csv_file, index=False) - return combined_results_csv_file + # Don't save this as *stats.csv otherwise it will be including when averaging bag results in average_results.py + combined_results_csv_file = os.path.join(output_dir, "bag_sweep_stats_combined.csv") + output_csv_files = [] + bag_names = [] + for params in graph_bag_params: + bag_name = os.path.splitext(os.path.basename(params.bagfile))[0] + bag_names.append(bag_name) + output_csv_files.append(os.path.join(output_dir, bag_name + "_stats.csv")) + combined_dataframe = average_results.combined_results(output_csv_files) + combined_dataframe.insert(0, "Bag", bag_names) + combined_dataframe.to_csv(combined_results_csv_file, index=False) + return combined_results_csv_file def check_params(graph_bag_params_list): - for params in graph_bag_params_list: - if not os.path.isfile(params.bagfile): - print('Bagfile ' + params.bagfile + ' does not exist.') - sys.exit() - if not os.path.isfile(params.map_file): - print('Map file ' + params.map_file + ' does not exist.') - sys.exit() - if not os.path.isfile(params.groundtruth_bagfile): - print('Bagfile ' + params.groundtruth_bagfile + ' does not exist.') - sys.exit() + for params in graph_bag_params_list: + if not os.path.isfile(params.bagfile): + print(("Bagfile " + params.bagfile + " does not exist.")) + sys.exit() + if not os.path.isfile(params.map_file): + print(("Map file " + params.map_file + " does not exist.")) + sys.exit() + if not os.path.isfile(params.groundtruth_bagfile): + print(("Bagfile " + params.groundtruth_bagfile + " does not exist.")) + sys.exit() # Add traceback so errors are forwarded, otherwise @@ -88,30 +110,67 @@ def check_params(graph_bag_params_list): # library call @multiprocessing_helpers.full_traceback def run_graph_bag(params, output_dir): - bag_name = os.path.splitext(os.path.basename(params.bagfile))[0] - output_bag_path = os.path.join(output_dir, bag_name + '_results.bag') - output_csv_file = os.path.join(output_dir, bag_name + '_stats.csv') - run_command = 'rosrun graph_bag run_graph_bag ' + params.bagfile + ' ' + params.map_file + ' ' + params.config_path + ' -i ' + params.image_topic + ' -o ' + output_bag_path + ' -r ' + params.robot_config_file + ' -w ' + params.world + ' -s ' + output_csv_file + ' -f ' + params.use_image_features - os.system(run_command) - output_pdf_file = os.path.join(output_dir, bag_name + '_output.pdf') - plot_command = 'rosrun graph_bag plot_results_main.py ' + output_bag_path + ' --output-file ' + output_pdf_file + ' --output-csv-file ' + output_csv_file + ' -g ' + params.groundtruth_bagfile + ' --rmse-rel-start-time ' + params.rmse_rel_start_time + ' --rmse-rel-end-time ' + params.rmse_rel_end_time - os.system(plot_command) + bag_name = os.path.splitext(os.path.basename(params.bagfile))[0] + output_bag_path = os.path.join(output_dir, bag_name + "_results.bag") + output_csv_file = os.path.join(output_dir, bag_name + "_stats.csv") + run_command = ( + "rosrun graph_bag run_graph_bag " + + params.bagfile + + " " + + params.map_file + + " " + + params.config_path + + " -i " + + params.image_topic + + " -o " + + output_bag_path + + " -r " + + params.robot_config_file + + " -w " + + params.world + + " -s " + + output_csv_file + + " -f " + + params.use_image_features + ) + os.system(run_command) + output_pdf_file = os.path.join(output_dir, bag_name + "_output.pdf") + plot_command = ( + "rosrun graph_bag plot_results_main.py " + + output_bag_path + + " --output-file " + + output_pdf_file + + " --output-csv-file " + + output_csv_file + + " -g " + + params.groundtruth_bagfile + + " --rmse-rel-start-time " + + params.rmse_rel_start_time + + " --rmse-rel-end-time " + + params.rmse_rel_end_time + ) + os.system(plot_command) # Helper that unpacks arguments and calls original function # Aides running jobs in parallel as pool only supports # passing a single argument to workers def run_graph_bag_helper(zipped_vals): - return run_graph_bag(*zipped_vals) + return run_graph_bag(*zipped_vals) def bag_sweep(config_file, output_dir): - graph_bag_params_list = load_params(config_file) - check_params(graph_bag_params_list) - num_processes = 15 - pool = multiprocessing.Pool(num_processes) - # izip arguments so we can pass as one argument to pool worker - pool.map(run_graph_bag_helper, itertools.izip(graph_bag_params_list, itertools.repeat(output_dir))) - combined_results_csv_file = combine_results_in_csv_file(graph_bag_params_list, output_dir) - output_file = os.path.join(output_dir, 'bag_sweep_results.pdf') - plot_bag_sweep_results.create_plot(output_file, combined_results_csv_file) + graph_bag_params_list = load_params(config_file) + check_params(graph_bag_params_list) + num_processes = 15 + pool = multiprocessing.Pool(num_processes) + # izip arguments so we can pass as one argument to pool worker + pool.map( + run_graph_bag_helper, + list(zip(graph_bag_params_list, itertools.repeat(output_dir))), + ) + combined_results_csv_file = combine_results_in_csv_file( + graph_bag_params_list, output_dir + ) + output_file = os.path.join(output_dir, "bag_sweep_results.pdf") + plot_bag_sweep_results.create_plot(output_file, combined_results_csv_file) diff --git a/tools/graph_bag/scripts/bag_sweep_main.py b/tools/graph_bag/scripts/bag_sweep_main.py index cb017e2c1c..f7b2bfac33 100755 --- a/tools/graph_bag/scripts/bag_sweep_main.py +++ b/tools/graph_bag/scripts/bag_sweep_main.py @@ -17,24 +17,23 @@ # License for the specific language governing permissions and limitations # under the License. -import bag_sweep - import argparse - import os import sys -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('config_file') - parser.add_argument('output_dir') - args = parser.parse_args() - if not os.path.isfile(args.config_file): - print('Config file ' + args.config_file + ' does not exist.') - sys.exit() - if os.path.isdir(args.output_dir): - print('Output directory ' + args.output_dir + ' already exists.') - sys.exit() - os.makedirs(args.output_dir) +import bag_sweep + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("config_file") + parser.add_argument("output_dir") + args = parser.parse_args() + if not os.path.isfile(args.config_file): + print(("Config file " + args.config_file + " does not exist.")) + sys.exit() + if os.path.isdir(args.output_dir): + print(("Output directory " + args.output_dir + " already exists.")) + sys.exit() + os.makedirs(args.output_dir) - bag_sweep.bag_sweep(args.config_file, args.output_dir) + bag_sweep.bag_sweep(args.config_file, args.output_dir) diff --git a/tools/graph_bag/scripts/check_bags_for_gaps.py b/tools/graph_bag/scripts/check_bags_for_gaps.py index fd09c41b74..4c4f835ccc 100755 --- a/tools/graph_bag/scripts/check_bags_for_gaps.py +++ b/tools/graph_bag/scripts/check_bags_for_gaps.py @@ -15,24 +15,25 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import utilities - import argparse import os import sys import rosbag +import utilities -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('bagfile') - parser.add_argument('topic') - parser.add_argument('-m', '--max-time-diff', type=float, default=0.5) - # Use header or received time - parser.add_argument('-r', '--use-receive-time', action='store_true') - args = parser.parse_args() - if not os.path.isfile(args.bagfile): - print('Bag file ' + args.bagfile + ' does not exist.') - sys.exit() +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("bagfile") + parser.add_argument("topic") + parser.add_argument("-m", "--max-time-diff", type=float, default=0.5) + # Use header or received time + parser.add_argument("-r", "--use-receive-time", action="store_true") + args = parser.parse_args() + if not os.path.isfile(args.bagfile): + print(("Bag file " + args.bagfile + " does not exist.")) + sys.exit() - utilities.get_topic_rates(args.bagfile, args.topic, args.max_time_diff, not args.use_receive_time, True) + utilities.get_topic_rates( + args.bagfile, args.topic, args.max_time_diff, not args.use_receive_time, True + ) diff --git a/tools/graph_bag/scripts/config_creator.py b/tools/graph_bag/scripts/config_creator.py index 3d94412a63..6fcd757dec 100644 --- a/tools/graph_bag/scripts/config_creator.py +++ b/tools/graph_bag/scripts/config_creator.py @@ -19,32 +19,32 @@ def check_and_fill_line(value_map, config_file_line): - line_strings = config_file_line.split() - # Overwrite val if config variable is in value map - if len(line_strings) > 0 and line_strings[0] in value_map: - return line_strings[0] + ' = ' + str(value_map[line_strings[0]]) + '\n' - return config_file_line + line_strings = config_file_line.split() + # Overwrite val if config variable is in value map + if len(line_strings) > 0 and line_strings[0] in value_map: + return line_strings[0] + " = " + str(value_map[line_strings[0]]) + "\n" + return config_file_line def fill_in_values(original_config, value_map, new_config): - original_config_file = open(original_config, 'r') - new_config_file = open(new_config, 'w') - for config_file_line in original_config_file: - new_config_file.write(check_and_fill_line(value_map, config_file_line)) + original_config_file = open(original_config, "r") + new_config_file = open(new_config, "w") + for config_file_line in original_config_file: + new_config_file.write(check_and_fill_line(value_map, config_file_line)) def make_value_map(values, value_names): - value_map = {} - if len(values) != len(value_names): - print("values and value_names not same length!") - exit() + value_map = {} + if len(values) != len(value_names): + print("values and value_names not same length!") + exit() - for index, value_name in enumerate(value_names): - value_map[value_name] = values[index] + for index, value_name in enumerate(value_names): + value_map[value_name] = values[index] - return value_map + return value_map def make_config(values, value_names, original_config, new_config): - value_map = make_value_map(values, value_names) - fill_in_values(original_config, value_map, new_config) + value_map = make_value_map(values, value_names) + fill_in_values(original_config, value_map, new_config) diff --git a/tools/graph_bag/scripts/get_average_opt_and_update_times.py b/tools/graph_bag/scripts/get_average_opt_and_update_times.py index 78dc200a33..91fb74cfd8 100755 --- a/tools/graph_bag/scripts/get_average_opt_and_update_times.py +++ b/tools/graph_bag/scripts/get_average_opt_and_update_times.py @@ -20,43 +20,42 @@ import sys import numpy as np - import rosbag def get_average_opt_and_update_times(bagfile): - with rosbag.Bag(bagfile, 'r') as bag: - optimization_times = [] - update_times = [] - for _, msg, _ in bag.read_messages(['/graph_loc/state']): - optimization_times.append(msg.optimization_time) - update_times.append(msg.update_time) - - mean_optimization_time = np.mean(optimization_times) - min_optimization_time = np.min(optimization_times) - max_optimization_time = np.max(optimization_times) - stddev_optimization_time = np.std(optimization_times) - print('Mean optimization time: ' + str(mean_optimization_time)) - print('Min optimization time: ' + str(min_optimization_time)) - print('Max optimization time: ' + str(max_optimization_time)) - print('Stddev optimization time: ' + str(stddev_optimization_time)) - - mean_update_time = np.mean(update_times) - min_update_time = np.min(update_times) - max_update_time = np.max(update_times) - stddev_update_time = np.std(update_times) - print('Mean update time: ' + str(mean_update_time)) - print('Min update time: ' + str(min_update_time)) - print('Max update time: ' + str(max_update_time)) - print('Stddev update time: ' + str(stddev_update_time)) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('bagfile') - args = parser.parse_args() - if not os.path.isfile(args.bagfile): - print('Bag file ' + args.bagfile + ' does not exist.') - sys.exit() - - get_average_opt_and_update_times(args.bagfile) + with rosbag.Bag(bagfile, "r") as bag: + optimization_times = [] + update_times = [] + for _, msg, _ in bag.read_messages(["/graph_loc/state"]): + optimization_times.append(msg.optimization_time) + update_times.append(msg.update_time) + + mean_optimization_time = np.mean(optimization_times) + min_optimization_time = np.min(optimization_times) + max_optimization_time = np.max(optimization_times) + stddev_optimization_time = np.std(optimization_times) + print(("Mean optimization time: " + str(mean_optimization_time))) + print(("Min optimization time: " + str(min_optimization_time))) + print(("Max optimization time: " + str(max_optimization_time))) + print(("Stddev optimization time: " + str(stddev_optimization_time))) + + mean_update_time = np.mean(update_times) + min_update_time = np.min(update_times) + max_update_time = np.max(update_times) + stddev_update_time = np.std(update_times) + print(("Mean update time: " + str(mean_update_time))) + print(("Min update time: " + str(min_update_time))) + print(("Max update time: " + str(max_update_time))) + print(("Stddev update time: " + str(stddev_update_time))) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("bagfile") + args = parser.parse_args() + if not os.path.isfile(args.bagfile): + print(("Bag file " + args.bagfile + " does not exist.")) + sys.exit() + + get_average_opt_and_update_times(args.bagfile) diff --git a/tools/graph_bag/scripts/imu_analyzer.py b/tools/graph_bag/scripts/imu_analyzer.py index 81cfc8f541..25f86e0504 100644 --- a/tools/graph_bag/scripts/imu_analyzer.py +++ b/tools/graph_bag/scripts/imu_analyzer.py @@ -18,182 +18,329 @@ # under the License. import imu_measurements +import matplotlib import plot_helpers -import matplotlib -matplotlib.use('pdf') +matplotlib.use("pdf") +import geometry_msgs import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages import numpy as np -import scipy.signal - import rosbag -import geometry_msgs +import scipy.signal +from matplotlib.backends.backend_pdf import PdfPages def get_fft(data, times, sample_spacing): - magnitudes = np.fft.rfft(data) - frequencies = np.fft.rfftfreq(len(times), sample_spacing) - return magnitudes, frequencies + magnitudes = np.fft.rfft(data) + frequencies = np.fft.rfftfreq(len(times), sample_spacing) + return magnitudes, frequencies def butter_lowpass_sos_representation(cutoff_frequency, sample_rate, order=5): - nyquist_rate = 0.5 * sample_rate - # From Python Cookbook - # TODO(rsoussan): Why is this necessary? - critical_frequency = cutoff_frequency / nyquist_rate - sos = scipy.signal.butter(order, critical_frequency, btype='low', output='sos', analog=False) - return sos + nyquist_rate = 0.5 * sample_rate + # From Python Cookbook + # TODO(rsoussan): Why is this necessary? + critical_frequency = cutoff_frequency / nyquist_rate + sos = scipy.signal.butter( + order, critical_frequency, btype="low", output="sos", analog=False + ) + return sos def lowpass_filter(data, cutoff_frequency, sample_rate, order=5): - sos = butter_lowpass_sos_representation(cutoff_frequency, sample_rate, order=order) - filtered_data = scipy.signal.sosfilt(sos, data) - return filtered_data - - -def filter_imu_measurements(imu_measurements, filtered_imu_measurements, cutoff_frequency, sample_rate): - filtered_imu_measurements.accelerations.xs = lowpass_filter(imu_measurements.accelerations.xs, cutoff_frequency, - sample_rate) - filtered_imu_measurements.accelerations.ys = lowpass_filter(imu_measurements.accelerations.ys, cutoff_frequency, - sample_rate) - filtered_imu_measurements.accelerations.zs = lowpass_filter(imu_measurements.accelerations.zs, cutoff_frequency, - sample_rate) - filtered_imu_measurements.angular_velocities.xs = lowpass_filter(imu_measurements.angular_velocities.xs, - cutoff_frequency, sample_rate) - filtered_imu_measurements.angular_velocities.ys = lowpass_filter(imu_measurements.angular_velocities.ys, - cutoff_frequency, sample_rate) - filtered_imu_measurements.angular_velocities.zs = lowpass_filter(imu_measurements.angular_velocities.zs, - cutoff_frequency, sample_rate) - filtered_imu_measurements.times = imu_measurements.times - - -def plot_imu_measurements(pdf, imu_measurements, prefix=''): - # Acceleration - plt.figure() - plot_helpers.plot_vector3ds(imu_measurements.accelerations, imu_measurements.times, 'Acc.') - plt.xlabel('Time (s)') - plt.ylabel('Acceleration (m/s^2)') - plt.title(prefix + 'Acceleration') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # Angular Velocity - plt.figure() - plot_helpers.plot_vector3ds(imu_measurements.angular_velocities, imu_measurements.times, 'Ang. Vel.') - plt.xlabel('Time (s)') - plt.ylabel('Angular Velocities') - plt.title(prefix + 'Angular Velocities') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - -def plot_fft(pdf, magnitude, frequency, prefix=''): - plt.figure() - plt.ylim(top=100) - plt.plot(frequency, np.absolute(magnitude), lw=1) - plt.xlabel('Frequency') - plt.ylabel('Magnitude') - plt.title(prefix + ' FFT') - pdf.savefig() - plt.close() + sos = butter_lowpass_sos_representation(cutoff_frequency, sample_rate, order=order) + filtered_data = scipy.signal.sosfilt(sos, data) + return filtered_data + + +def filter_imu_measurements( + imu_measurements, filtered_imu_measurements, cutoff_frequency, sample_rate +): + filtered_imu_measurements.accelerations.xs = lowpass_filter( + imu_measurements.accelerations.xs, cutoff_frequency, sample_rate + ) + filtered_imu_measurements.accelerations.ys = lowpass_filter( + imu_measurements.accelerations.ys, cutoff_frequency, sample_rate + ) + filtered_imu_measurements.accelerations.zs = lowpass_filter( + imu_measurements.accelerations.zs, cutoff_frequency, sample_rate + ) + filtered_imu_measurements.angular_velocities.xs = lowpass_filter( + imu_measurements.angular_velocities.xs, cutoff_frequency, sample_rate + ) + filtered_imu_measurements.angular_velocities.ys = lowpass_filter( + imu_measurements.angular_velocities.ys, cutoff_frequency, sample_rate + ) + filtered_imu_measurements.angular_velocities.zs = lowpass_filter( + imu_measurements.angular_velocities.zs, cutoff_frequency, sample_rate + ) + filtered_imu_measurements.times = imu_measurements.times + + +def plot_imu_measurements(pdf, imu_measurements, prefix=""): + # Acceleration + plt.figure() + plot_helpers.plot_vector3ds( + imu_measurements.accelerations, imu_measurements.times, "Acc." + ) + plt.xlabel("Time (s)") + plt.ylabel("Acceleration (m/s^2)") + plt.title(prefix + "Acceleration") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # Angular Velocity + plt.figure() + plot_helpers.plot_vector3ds( + imu_measurements.angular_velocities, imu_measurements.times, "Ang. Vel." + ) + plt.xlabel("Time (s)") + plt.ylabel("Angular Velocities") + plt.title(prefix + "Angular Velocities") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + +def plot_fft(pdf, magnitude, frequency, prefix=""): + plt.figure() + plt.ylim(top=100) + plt.plot(frequency, np.absolute(magnitude), lw=1) + plt.xlabel("Frequency") + plt.ylabel("Magnitude") + plt.title(prefix + " FFT") + pdf.savefig() + plt.close() def plot_filtered_data(pdf, filtered_data, data, filtered_times, times, title): - plt.figure() - plt.plot(filtered_times, filtered_data, 'r', alpha=0.5, lw=1, label='Filtered') - plt.plot(times, data, 'g', alpha=0.5, lw=1, label='Raw') - plt.xlabel('Time (s)') - plt.ylabel('Acceleration (m/s^2)') - plt.title(title) - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() + plt.figure() + plt.plot(filtered_times, filtered_data, "r", alpha=0.5, lw=1, label="Filtered") + plt.plot(times, data, "g", alpha=0.5, lw=1, label="Raw") + plt.xlabel("Time (s)") + plt.ylabel("Acceleration (m/s^2)") + plt.title(title) + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() def load_imu_msgs(imu_measurements, imu_topic, bag): - for topic, msg, t in bag.read_messages(imu_topic): - imu_measurements.add_imu_measurement(msg) + for topic, msg, t in bag.read_messages(imu_topic): + imu_measurements.add_imu_measurement(msg) def create_plots(bagfile, filtered_bagfile, output_file, cutoff_frequency, sample_rate): - bag = rosbag.Bag(bagfile) - measurements = imu_measurements.ImuMeasurements() - load_imu_msgs(measurements, '/hw/imu', bag) - bag.close() - - sample_spacing = 1.0/sample_rate - - # Use second bagfile as filtered measurements, otherwise filter with python filter - filtered_measurements = imu_measurements.ImuMeasurements() - if (filtered_bagfile): - filtered_bag = rosbag.Bag(filtered_bagfile) - load_imu_msgs(filtered_measurements, '/hw/imu', filtered_bag) - filtered_bag.close() - else: - filter_imu_measurements(measurements, filtered_measurements, cutoff_frequency, sample_rate) - - # FFTs - acceleration_x_fft_magnitudes, acceleration_x_fft_frequencies = get_fft(measurements.accelerations.xs, - measurements.times, sample_spacing) - acceleration_y_fft_magnitudes, acceleration_y_fft_frequencies = get_fft(measurements.accelerations.ys, - measurements.times, sample_spacing) - acceleration_z_fft_magnitudes, acceleration_z_fft_frequencies = get_fft(measurements.accelerations.zs, - measurements.times, sample_spacing) - angular_velocity_x_fft_magnitudes, angular_velocity_x_fft_frequencies = get_fft(measurements.angular_velocities.xs, - measurements.times, sample_spacing) - angular_velocity_y_fft_magnitudes, angular_velocity_y_fft_frequencies = get_fft(measurements.angular_velocities.ys, - measurements.times, sample_spacing) - angular_velocity_z_fft_magnitudes, angular_velocity_z_fft_frequencies = get_fft(measurements.angular_velocities.zs, - measurements.times, sample_spacing) - - filtered_acceleration_x_fft_magnitudes, filtered_acceleration_x_fft_frequencies = get_fft( - filtered_measurements.accelerations.xs, filtered_measurements.times, sample_spacing) - filtered_acceleration_y_fft_magnitudes, filtered_acceleration_y_fft_frequencies = get_fft( - filtered_measurements.accelerations.ys, filtered_measurements.times, sample_spacing) - filtered_acceleration_z_fft_magnitudes, filtered_acceleration_z_fft_frequencies = get_fft( - filtered_measurements.accelerations.zs, filtered_measurements.times, sample_spacing) - filtered_angular_velocity_x_fft_magnitudes, filtered_angular_velocity_x_fft_frequencies = get_fft( - filtered_measurements.angular_velocities.xs, filtered_measurements.times, sample_spacing) - filtered_angular_velocity_y_fft_magnitudes, filtered_angular_velocity_y_fft_frequencies = get_fft( - filtered_measurements.angular_velocities.ys, filtered_measurements.times, sample_spacing) - filtered_angular_velocity_z_fft_magnitudes, filtered_angular_velocity_z_fft_frequencies = get_fft( - filtered_measurements.angular_velocities.zs, filtered_measurements.times, sample_spacing) - - with PdfPages(output_file) as pdf: - plot_imu_measurements(pdf, measurements, 'Raw Imu ') - - plot_fft(pdf, acceleration_x_fft_magnitudes, acceleration_x_fft_frequencies, 'Raw Imu FFT Accel x ') - plot_fft(pdf, acceleration_y_fft_magnitudes, acceleration_y_fft_frequencies, 'Raw Imu FFT Accel y ') - plot_fft(pdf, acceleration_z_fft_magnitudes, acceleration_z_fft_frequencies, 'Raw Imu FFT Accel z ') - plot_fft(pdf, angular_velocity_x_fft_magnitudes, angular_velocity_x_fft_frequencies, 'Raw Imu FFT Ang Vel x ') - plot_fft(pdf, angular_velocity_y_fft_magnitudes, angular_velocity_y_fft_frequencies, 'Raw Imu FFT Ang Vel y ') - plot_fft(pdf, angular_velocity_z_fft_magnitudes, angular_velocity_z_fft_frequencies, 'Raw Imu FFT Ang Vel z ') - - plot_filtered_data(pdf, filtered_measurements.accelerations.xs, measurements.accelerations.xs, - filtered_measurements.times, measurements.times, 'Filtered Accel x') - plot_filtered_data(pdf, filtered_measurements.accelerations.ys, measurements.accelerations.ys, - filtered_measurements.times, measurements.times, 'Filtered Accel y') - plot_filtered_data(pdf, filtered_measurements.accelerations.zs, measurements.accelerations.zs, - filtered_measurements.times, measurements.times, 'Filtered Accel z') - plot_filtered_data(pdf, filtered_measurements.angular_velocities.xs, measurements.angular_velocities.xs, - filtered_measurements.times, measurements.times, 'Filtered Ang Vel x') - plot_filtered_data(pdf, filtered_measurements.angular_velocities.ys, measurements.angular_velocities.ys, - filtered_measurements.times, measurements.times, 'Filtered Ang Vel y') - plot_filtered_data(pdf, filtered_measurements.angular_velocities.zs, measurements.angular_velocities.zs, - filtered_measurements.times, measurements.times, 'Filtered Ang Vel z') - - plot_fft(pdf, filtered_acceleration_x_fft_magnitudes, filtered_acceleration_x_fft_frequencies, - 'Filtered Imu FFT Accel x ') - plot_fft(pdf, filtered_acceleration_y_fft_magnitudes, filtered_acceleration_y_fft_frequencies, - 'Filtered Imu FFT Accel y ') - plot_fft(pdf, filtered_acceleration_z_fft_magnitudes, filtered_acceleration_z_fft_frequencies, - 'Filtered Imu FFT Accel z ') - plot_fft(pdf, filtered_angular_velocity_x_fft_magnitudes, filtered_angular_velocity_x_fft_frequencies, - 'Filtered Imu FFT Ang Vel x ') - plot_fft(pdf, filtered_angular_velocity_y_fft_magnitudes, filtered_angular_velocity_y_fft_frequencies, - 'Filtered Imu FFT Ang Vel y ') - plot_fft(pdf, filtered_angular_velocity_z_fft_magnitudes, filtered_angular_velocity_z_fft_frequencies, - 'Filtered Imu FFT Ang Vel z ') + bag = rosbag.Bag(bagfile) + measurements = imu_measurements.ImuMeasurements() + load_imu_msgs(measurements, "/hw/imu", bag) + bag.close() + + sample_spacing = 1.0 / sample_rate + + # Use second bagfile as filtered measurements, otherwise filter with python filter + filtered_measurements = imu_measurements.ImuMeasurements() + if filtered_bagfile: + filtered_bag = rosbag.Bag(filtered_bagfile) + load_imu_msgs(filtered_measurements, "/hw/imu", filtered_bag) + filtered_bag.close() + else: + filter_imu_measurements( + measurements, filtered_measurements, cutoff_frequency, sample_rate + ) + + # FFTs + acceleration_x_fft_magnitudes, acceleration_x_fft_frequencies = get_fft( + measurements.accelerations.xs, measurements.times, sample_spacing + ) + acceleration_y_fft_magnitudes, acceleration_y_fft_frequencies = get_fft( + measurements.accelerations.ys, measurements.times, sample_spacing + ) + acceleration_z_fft_magnitudes, acceleration_z_fft_frequencies = get_fft( + measurements.accelerations.zs, measurements.times, sample_spacing + ) + angular_velocity_x_fft_magnitudes, angular_velocity_x_fft_frequencies = get_fft( + measurements.angular_velocities.xs, measurements.times, sample_spacing + ) + angular_velocity_y_fft_magnitudes, angular_velocity_y_fft_frequencies = get_fft( + measurements.angular_velocities.ys, measurements.times, sample_spacing + ) + angular_velocity_z_fft_magnitudes, angular_velocity_z_fft_frequencies = get_fft( + measurements.angular_velocities.zs, measurements.times, sample_spacing + ) + + ( + filtered_acceleration_x_fft_magnitudes, + filtered_acceleration_x_fft_frequencies, + ) = get_fft( + filtered_measurements.accelerations.xs, + filtered_measurements.times, + sample_spacing, + ) + ( + filtered_acceleration_y_fft_magnitudes, + filtered_acceleration_y_fft_frequencies, + ) = get_fft( + filtered_measurements.accelerations.ys, + filtered_measurements.times, + sample_spacing, + ) + ( + filtered_acceleration_z_fft_magnitudes, + filtered_acceleration_z_fft_frequencies, + ) = get_fft( + filtered_measurements.accelerations.zs, + filtered_measurements.times, + sample_spacing, + ) + ( + filtered_angular_velocity_x_fft_magnitudes, + filtered_angular_velocity_x_fft_frequencies, + ) = get_fft( + filtered_measurements.angular_velocities.xs, + filtered_measurements.times, + sample_spacing, + ) + ( + filtered_angular_velocity_y_fft_magnitudes, + filtered_angular_velocity_y_fft_frequencies, + ) = get_fft( + filtered_measurements.angular_velocities.ys, + filtered_measurements.times, + sample_spacing, + ) + ( + filtered_angular_velocity_z_fft_magnitudes, + filtered_angular_velocity_z_fft_frequencies, + ) = get_fft( + filtered_measurements.angular_velocities.zs, + filtered_measurements.times, + sample_spacing, + ) + + with PdfPages(output_file) as pdf: + plot_imu_measurements(pdf, measurements, "Raw Imu ") + + plot_fft( + pdf, + acceleration_x_fft_magnitudes, + acceleration_x_fft_frequencies, + "Raw Imu FFT Accel x ", + ) + plot_fft( + pdf, + acceleration_y_fft_magnitudes, + acceleration_y_fft_frequencies, + "Raw Imu FFT Accel y ", + ) + plot_fft( + pdf, + acceleration_z_fft_magnitudes, + acceleration_z_fft_frequencies, + "Raw Imu FFT Accel z ", + ) + plot_fft( + pdf, + angular_velocity_x_fft_magnitudes, + angular_velocity_x_fft_frequencies, + "Raw Imu FFT Ang Vel x ", + ) + plot_fft( + pdf, + angular_velocity_y_fft_magnitudes, + angular_velocity_y_fft_frequencies, + "Raw Imu FFT Ang Vel y ", + ) + plot_fft( + pdf, + angular_velocity_z_fft_magnitudes, + angular_velocity_z_fft_frequencies, + "Raw Imu FFT Ang Vel z ", + ) + + plot_filtered_data( + pdf, + filtered_measurements.accelerations.xs, + measurements.accelerations.xs, + filtered_measurements.times, + measurements.times, + "Filtered Accel x", + ) + plot_filtered_data( + pdf, + filtered_measurements.accelerations.ys, + measurements.accelerations.ys, + filtered_measurements.times, + measurements.times, + "Filtered Accel y", + ) + plot_filtered_data( + pdf, + filtered_measurements.accelerations.zs, + measurements.accelerations.zs, + filtered_measurements.times, + measurements.times, + "Filtered Accel z", + ) + plot_filtered_data( + pdf, + filtered_measurements.angular_velocities.xs, + measurements.angular_velocities.xs, + filtered_measurements.times, + measurements.times, + "Filtered Ang Vel x", + ) + plot_filtered_data( + pdf, + filtered_measurements.angular_velocities.ys, + measurements.angular_velocities.ys, + filtered_measurements.times, + measurements.times, + "Filtered Ang Vel y", + ) + plot_filtered_data( + pdf, + filtered_measurements.angular_velocities.zs, + measurements.angular_velocities.zs, + filtered_measurements.times, + measurements.times, + "Filtered Ang Vel z", + ) + + plot_fft( + pdf, + filtered_acceleration_x_fft_magnitudes, + filtered_acceleration_x_fft_frequencies, + "Filtered Imu FFT Accel x ", + ) + plot_fft( + pdf, + filtered_acceleration_y_fft_magnitudes, + filtered_acceleration_y_fft_frequencies, + "Filtered Imu FFT Accel y ", + ) + plot_fft( + pdf, + filtered_acceleration_z_fft_magnitudes, + filtered_acceleration_z_fft_frequencies, + "Filtered Imu FFT Accel z ", + ) + plot_fft( + pdf, + filtered_angular_velocity_x_fft_magnitudes, + filtered_angular_velocity_x_fft_frequencies, + "Filtered Imu FFT Ang Vel x ", + ) + plot_fft( + pdf, + filtered_angular_velocity_y_fft_magnitudes, + filtered_angular_velocity_y_fft_frequencies, + "Filtered Imu FFT Ang Vel y ", + ) + plot_fft( + pdf, + filtered_angular_velocity_z_fft_magnitudes, + filtered_angular_velocity_z_fft_frequencies, + "Filtered Imu FFT Ang Vel z ", + ) diff --git a/tools/graph_bag/scripts/imu_analyzer_main.py b/tools/graph_bag/scripts/imu_analyzer_main.py index 204be30d1e..da6c73507b 100755 --- a/tools/graph_bag/scripts/imu_analyzer_main.py +++ b/tools/graph_bag/scripts/imu_analyzer_main.py @@ -17,26 +17,31 @@ # License for the specific language governing permissions and limitations # under the License. -import imu_analyzer - import argparse - import os import sys -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('bagfile') - parser.add_argument('--output-file', default='imu_analyzer_output.pdf') - parser.add_argument('-f', '--filtered-bagfile', default='') - parser.add_argument('-s', '--sample-rate', type=float, default=62.5) - # Only applicable if filtered_bagfile not provided, uses python filters - parser.add_argument('-c', '--cutoff-frequency', type=float, default=5.0) - args = parser.parse_args() - if not os.path.isfile(args.bagfile): - print('Bag file ' + args.bagfile + ' does not exist.') - sys.exit() - if args.filtered_bagfile and not os.path.isfile(args.filtered_bagfile): - print('Bag file ' + args.filtered_bagfile + ' does not exist.') - sys.exit() - imu_analyzer.create_plots(args.bagfile, args.filtered_bagfile, args.output_file, args.cutoff_frequency, args.sample_rate) +import imu_analyzer + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("bagfile") + parser.add_argument("--output-file", default="imu_analyzer_output.pdf") + parser.add_argument("-f", "--filtered-bagfile", default="") + parser.add_argument("-s", "--sample-rate", type=float, default=62.5) + # Only applicable if filtered_bagfile not provided, uses python filters + parser.add_argument("-c", "--cutoff-frequency", type=float, default=5.0) + args = parser.parse_args() + if not os.path.isfile(args.bagfile): + print(("Bag file " + args.bagfile + " does not exist.")) + sys.exit() + if args.filtered_bagfile and not os.path.isfile(args.filtered_bagfile): + print(("Bag file " + args.filtered_bagfile + " does not exist.")) + sys.exit() + imu_analyzer.create_plots( + args.bagfile, + args.filtered_bagfile, + args.output_file, + args.cutoff_frequency, + args.sample_rate, + ) diff --git a/tools/graph_bag/scripts/imu_measurements.py b/tools/graph_bag/scripts/imu_measurements.py index 1d5dc04bf0..dbd3b2bb58 100644 --- a/tools/graph_bag/scripts/imu_measurements.py +++ b/tools/graph_bag/scripts/imu_measurements.py @@ -20,14 +20,13 @@ import vector3ds -class ImuMeasurements(): +class ImuMeasurements: + def __init__(self): + self.accelerations = vector3ds.Vector3ds() + self.angular_velocities = vector3ds.Vector3ds() + self.times = [] - def __init__(self): - self.accelerations = vector3ds.Vector3ds() - self.angular_velocities = vector3ds.Vector3ds() - self.times = [] - - def add_imu_measurement(self, msg): - self.accelerations.add_vector3d(msg.linear_acceleration) - self.angular_velocities.add_vector3d(msg.angular_velocity) - self.times.append(msg.header.stamp.secs + 1e-9 * msg.header.stamp.nsecs) + def add_imu_measurement(self, msg): + self.accelerations.add_vector3d(msg.linear_acceleration) + self.angular_velocities.add_vector3d(msg.angular_velocity) + self.times.append(msg.header.stamp.secs + 1e-9 * msg.header.stamp.nsecs) diff --git a/tools/graph_bag/scripts/loc_states.py b/tools/graph_bag/scripts/loc_states.py index c6fc6cb780..694cb60866 100644 --- a/tools/graph_bag/scripts/loc_states.py +++ b/tools/graph_bag/scripts/loc_states.py @@ -22,44 +22,51 @@ class LocStates(poses.Poses): + def __init__(self, loc_type, topic): + super(LocStates, self).__init__(loc_type, topic) + self.num_detected_of_features = [] + self.num_detected_ml_features = [] + self.num_of_factors = [] + self.num_ml_projection_factors = [] + self.accelerations = vector3ds.Vector3ds() + self.velocities = vector3ds.Vector3ds() + self.angular_velocities = vector3ds.Vector3ds() + self.accelerometer_biases = vector3ds.Vector3ds() + self.gyro_biases = vector3ds.Vector3ds() + self.position_covariances = vector3ds.Vector3ds() + self.orientation_covariances = vector3ds.Vector3ds() + self.velocity_covariances = vector3ds.Vector3ds() + self.accelerometer_bias_covariances = vector3ds.Vector3ds() + self.gyro_bias_covariances = vector3ds.Vector3ds() - def __init__(self, loc_type, topic): - super(LocStates, self).__init__(loc_type, topic) - self.num_detected_of_features = [] - self.num_detected_ml_features = [] - self.num_of_factors = [] - self.num_ml_projection_factors = [] - self.accelerations = vector3ds.Vector3ds() - self.velocities = vector3ds.Vector3ds() - self.angular_velocities = vector3ds.Vector3ds() - self.accelerometer_biases = vector3ds.Vector3ds() - self.gyro_biases = vector3ds.Vector3ds() - self.position_covariances = vector3ds.Vector3ds() - self.orientation_covariances = vector3ds.Vector3ds() - self.velocity_covariances = vector3ds.Vector3ds() - self.accelerometer_bias_covariances = vector3ds.Vector3ds() - self.gyro_bias_covariances = vector3ds.Vector3ds() - - def add_loc_state(self, msg, bag_start_time=0): - self.add_pose(msg.pose, msg.header.stamp, bag_start_time) - if hasattr(msg, 'num_detected_of_features'): - self.num_detected_of_features.append(msg.num_detected_of_features) - if hasattr(msg, 'num_detected_ml_features'): - self.num_detected_ml_features.append(msg.num_detected_ml_features) - if hasattr(msg, 'num_of_factors'): - self.num_of_factors.append(msg.num_of_factors) - if hasattr(msg, 'num_ml_projection_factors'): - self.num_ml_projection_factors.append(msg.num_ml_projection_factors) - if hasattr(msg, 'accel'): - self.accelerations.add_vector3d(msg.accel) - self.velocities.add_vector3d(msg.velocity) - if hasattr(msg, 'omega'): - self.angular_velocities.add_vector3d(msg.omega) - self.accelerometer_biases.add_vector3d(msg.accel_bias) - self.gyro_biases.add_vector3d(msg.gyro_bias) - # See GraphState.msg or EkfState.msg for cov_diag offsets - self.position_covariances.add(msg.cov_diag[12], msg.cov_diag[13], msg.cov_diag[14]) - self.orientation_covariances.add(msg.cov_diag[0], msg.cov_diag[1], msg.cov_diag[2]) - self.velocity_covariances.add(msg.cov_diag[6], msg.cov_diag[7], msg.cov_diag[8]) - self.accelerometer_bias_covariances.add(msg.cov_diag[9], msg.cov_diag[10], msg.cov_diag[11]) - self.gyro_bias_covariances.add(msg.cov_diag[3], msg.cov_diag[4], msg.cov_diag[5]) + def add_loc_state(self, msg, bag_start_time=0): + self.add_pose(msg.pose, msg.header.stamp, bag_start_time) + if hasattr(msg, "num_detected_of_features"): + self.num_detected_of_features.append(msg.num_detected_of_features) + if hasattr(msg, "num_detected_ml_features"): + self.num_detected_ml_features.append(msg.num_detected_ml_features) + if hasattr(msg, "num_of_factors"): + self.num_of_factors.append(msg.num_of_factors) + if hasattr(msg, "num_ml_projection_factors"): + self.num_ml_projection_factors.append(msg.num_ml_projection_factors) + if hasattr(msg, "accel"): + self.accelerations.add_vector3d(msg.accel) + self.velocities.add_vector3d(msg.velocity) + if hasattr(msg, "omega"): + self.angular_velocities.add_vector3d(msg.omega) + self.accelerometer_biases.add_vector3d(msg.accel_bias) + self.gyro_biases.add_vector3d(msg.gyro_bias) + # See GraphState.msg or EkfState.msg for cov_diag offsets + self.position_covariances.add( + msg.cov_diag[12], msg.cov_diag[13], msg.cov_diag[14] + ) + self.orientation_covariances.add( + msg.cov_diag[0], msg.cov_diag[1], msg.cov_diag[2] + ) + self.velocity_covariances.add(msg.cov_diag[6], msg.cov_diag[7], msg.cov_diag[8]) + self.accelerometer_bias_covariances.add( + msg.cov_diag[9], msg.cov_diag[10], msg.cov_diag[11] + ) + self.gyro_bias_covariances.add( + msg.cov_diag[3], msg.cov_diag[4], msg.cov_diag[5] + ) diff --git a/tools/graph_bag/scripts/merge_all_bags.py b/tools/graph_bag/scripts/merge_all_bags.py index 12072090e9..5b57d05c74 100755 --- a/tools/graph_bag/scripts/merge_all_bags.py +++ b/tools/graph_bag/scripts/merge_all_bags.py @@ -16,33 +16,36 @@ # License for the specific language governing permissions and limitations # under the License. -import merge_bags - import argparse import os import re import string import sys +import merge_bags import rosbag -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--merged-bag', default='') - parser.add_argument('--only-loc-topics', dest='only_loc_topics', action='store_true') - args = parser.parse_args() +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--merged-bag", default="") + parser.add_argument( + "--only-loc-topics", dest="only_loc_topics", action="store_true" + ) + args = parser.parse_args() - # Find bagfiles with bag prefix in current directory, fail if none found - bag_names = [(os.path.splitext(bag)[0]).rstrip(string.digits) - for bag in os.listdir('.') - if os.path.isfile(bag) and bag.endswith('.bag')] - # Remove duplicates - bag_names = sorted(set(bag_names)) - if (len(bag_names) == 0): - print('No bag files found') - sys.exit() - else: - print('Found ' + str(len(bag_names)) + ' bag file prefixes.') + # Find bagfiles with bag prefix in current directory, fail if none found + bag_names = [ + (os.path.splitext(bag)[0]).rstrip(string.digits) + for bag in os.listdir(".") + if os.path.isfile(bag) and bag.endswith(".bag") + ] + # Remove duplicates + bag_names = sorted(set(bag_names)) + if len(bag_names) == 0: + print("No bag files found") + sys.exit() + else: + print(("Found " + str(len(bag_names)) + " bag file prefixes.")) - for bag_name in bag_names: - merge_bags.merge_bag(bag_name, args.merged_bag, args.only_loc_topics) + for bag_name in bag_names: + merge_bags.merge_bag(bag_name, args.merged_bag, args.only_loc_topics) diff --git a/tools/graph_bag/scripts/merge_bags.py b/tools/graph_bag/scripts/merge_bags.py index 0abe793a28..eb59d170c1 100755 --- a/tools/graph_bag/scripts/merge_bags.py +++ b/tools/graph_bag/scripts/merge_bags.py @@ -26,48 +26,63 @@ # https://stackoverflow.com/a/4836734 def natural_sort(l): - convert = lambda text: int(text) if text.isdigit() else text.lower() - alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] - return sorted(l, key=alphanum_key) + convert = lambda text: int(text) if text.isdigit() else text.lower() + alphanum_key = lambda key: [convert(c) for c in re.split("([0-9]+)", key)] + return sorted(l, key=alphanum_key) def merge_bag(input_bag_prefix, merged_bag, only_loc_topics=False): - # Find bagfiles with bag prefix in current directory, fail if none found - bag_names = [ - bag for bag in os.listdir('.') if os.path.isfile(bag) and bag.startswith(input_bag_prefix) and bag.endswith('.bag') - ] - if (len(bag_names) == 0): - print('No bag files found') - sys.exit() - else: - print('Found ' + str(len(bag_names)) + ' bag files.') + # Find bagfiles with bag prefix in current directory, fail if none found + bag_names = [ + bag + for bag in os.listdir(".") + if os.path.isfile(bag) + and bag.startswith(input_bag_prefix) + and bag.endswith(".bag") + ] + if len(bag_names) == 0: + print("No bag files found") + sys.exit() + else: + print(("Found " + str(len(bag_names)) + " bag files.")) - merged_bag_name = '' - if not merged_bag: - merged_bag_name = 'merged_' + input_bag_prefix + '.bag' + merged_bag_name = "" + if not merged_bag: + merged_bag_name = "merged_" + input_bag_prefix + ".bag" - sorted_bag_names = natural_sort(bag_names) + sorted_bag_names = natural_sort(bag_names) - topics = None - if only_loc_topics: - topics = [ - '/hw/imu', '/loc/of/features', '/loc/ml/features', '/loc/ar/features', '/mgt/img_sampler/nav_cam/image_record', - '/graph_loc/state', '/gnc/ekf', '/sparse_mapping/pose', '/mob/flight_mode', '/beh/inspection/feedback', - '/beh/inspection/goal', '/beh/inspection/result' - ] + topics = None + if only_loc_topics: + topics = [ + "/hw/imu", + "/loc/of/features", + "/loc/ml/features", + "/loc/ar/features", + "/mgt/img_sampler/nav_cam/image_record", + "/graph_loc/state", + "/gnc/ekf", + "/sparse_mapping/pose", + "/mob/flight_mode", + "/beh/inspection/feedback", + "/beh/inspection/goal", + "/beh/inspection/result", + ] - with rosbag.Bag(merged_bag_name, 'w') as merged_bag: - for sorted_bag_name in sorted_bag_names: - with rosbag.Bag(sorted_bag_name, 'r') as sorted_bag: - for topic, msg, t in sorted_bag.read_messages(topics): - merged_bag.write(topic, msg, t) + with rosbag.Bag(merged_bag_name, "w") as merged_bag: + for sorted_bag_name in sorted_bag_names: + with rosbag.Bag(sorted_bag_name, "r") as sorted_bag: + for topic, msg, t in sorted_bag.read_messages(topics): + merged_bag.write(topic, msg, t) -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('input_bag_prefix') - parser.add_argument('--merged-bag', default='') - parser.add_argument('--only-loc-topics', dest='only_loc_topics', action='store_true') - parser.add_argument('--merged-bag', default='') - args = parser.parse_args() - merge_bag(args.input_bag_prefix, args.merged_bag, args.only_loc_topics) +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("input_bag_prefix") + parser.add_argument("--merged-bag", default="") + parser.add_argument( + "--only-loc-topics", dest="only_loc_topics", action="store_true" + ) + parser.add_argument("--merged-bag", default="") + args = parser.parse_args() + merge_bag(args.input_bag_prefix, args.merged_bag, args.only_loc_topics) diff --git a/tools/graph_bag/scripts/multiprocessing_helpers.py b/tools/graph_bag/scripts/multiprocessing_helpers.py index 14d2486479..e46960992b 100644 --- a/tools/graph_bag/scripts/multiprocessing_helpers.py +++ b/tools/graph_bag/scripts/multiprocessing_helpers.py @@ -19,14 +19,15 @@ # even when running commands through multiprocessing # pooling def full_traceback(func): - import traceback, functools + import functools + import traceback - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception as e: - msg = "{}\n\nOriginal {}".format(e, traceback.format_exc()) - raise type(e)(msg) + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + msg = "{}\n\nOriginal {}".format(e, traceback.format_exc()) + raise type(e)(msg) - return wrapper + return wrapper diff --git a/tools/graph_bag/scripts/orientations.py b/tools/graph_bag/scripts/orientations.py index 24ad46359e..2e851702a7 100644 --- a/tools/graph_bag/scripts/orientations.py +++ b/tools/graph_bag/scripts/orientations.py @@ -20,22 +20,23 @@ class Orientations: + def __init__(self): + self.yaws = [] + self.pitches = [] + self.rolls = [] - def __init__(self): - self.yaws = [] - self.pitches = [] - self.rolls = [] + def get_euler(self, index): + return [self.yaws[index], self.pitches[index], self.rolls[index]] - def get_euler(self, index): - return [self.yaws[index], self.pitches[index], self.rolls[index]] + def get_rotation(self, index): + return scipy.spatial.transform.Rotation.from_euler( + "ZYX", self.get_euler(index), degrees=True + ) - def get_rotation(self, index): - return scipy.spatial.transform.Rotation.from_euler('ZYX', self.get_euler(index), degrees=True) + def add_euler(self, euler_angles): + self.add(euler_angles[0], euler_angles[1], euler_angles[2]) - def add_euler(self, euler_angles): - self.add(euler_angles[0], euler_angles[1], euler_angles[2]) - - def add(self, yaw, pitch, roll): - self.yaws.append(yaw) - self.pitches.append(pitch) - self.rolls.append(roll) + def add(self, yaw, pitch, roll): + self.yaws.append(yaw) + self.pitches.append(pitch) + self.rolls.append(roll) diff --git a/tools/graph_bag/scripts/parameter_sweep.py b/tools/graph_bag/scripts/parameter_sweep.py index 54b6620b56..c811e5d148 100755 --- a/tools/graph_bag/scripts/parameter_sweep.py +++ b/tools/graph_bag/scripts/parameter_sweep.py @@ -22,11 +22,11 @@ import itertools import math import multiprocessing -import numpy as np import os import average_results import config_creator +import numpy as np import plot_parameter_sweep_results import utilities @@ -36,157 +36,250 @@ # some errors are suppressed due to the multiprocessing # library call @utilities.full_traceback -def test_values(values, job_id, value_names, output_dir, bag_file, map_file, image_topic, config_path, robot_config, - world, use_image_features, groundtruth_bagfile, rmse_rel_start_time, rmse_rel_end_time): - new_output_dir = os.path.join(output_dir, str(job_id)) - os.mkdir(new_output_dir) - graph_config_filepath = os.path.join(config_path, "config", "graph_localizer.config") - new_graph_config_filepath = os.path.join(new_output_dir, "graph_localizer.config") - config_creator.make_config(values, value_names, graph_config_filepath, new_graph_config_filepath) - output_bag = os.path.join(new_output_dir, "results.bag") - output_stats_file = os.path.join(new_output_dir, "graph_stats.csv") - graph_config_prefix = new_output_dir + '/' - run_command = 'rosrun graph_bag run_graph_bag ' + bag_file + ' ' + map_file + ' ' + config_path + ' -o ' + output_bag + ' -s ' + output_stats_file + ' -r ' + robot_config + ' -w ' + world + ' -g ' + graph_config_prefix + ' -f ' + str( - use_image_features) - if image_topic is not None: - run_command += ' -i ' + image_topic - os.system(run_command) - output_pdf_file = os.path.join(new_output_dir, str(job_id) + '_output.pdf') - output_csv_file = os.path.join(new_output_dir, 'graph_stats.csv') - plot_command = 'rosrun graph_bag plot_results_main.py ' + output_bag + ' --output-file ' + output_pdf_file + ' --output-csv-file ' + output_csv_file + ' -g ' + groundtruth_bagfile + ' --rmse-rel-start-time ' + str( - rmse_rel_start_time) + ' --rmse-rel-end-time ' + str(rmse_rel_end_time) - os.system(plot_command) +def test_values( + values, + job_id, + value_names, + output_dir, + bag_file, + map_file, + image_topic, + config_path, + robot_config, + world, + use_image_features, + groundtruth_bagfile, + rmse_rel_start_time, + rmse_rel_end_time, +): + new_output_dir = os.path.join(output_dir, str(job_id)) + os.mkdir(new_output_dir) + graph_config_filepath = os.path.join( + config_path, "config", "graph_localizer.config" + ) + new_graph_config_filepath = os.path.join(new_output_dir, "graph_localizer.config") + config_creator.make_config( + values, value_names, graph_config_filepath, new_graph_config_filepath + ) + output_bag = os.path.join(new_output_dir, "results.bag") + output_stats_file = os.path.join(new_output_dir, "graph_stats.csv") + graph_config_prefix = new_output_dir + "/" + run_command = ( + "rosrun graph_bag run_graph_bag " + + bag_file + + " " + + map_file + + " " + + config_path + + " -o " + + output_bag + + " -s " + + output_stats_file + + " -r " + + robot_config + + " -w " + + world + + " -g " + + graph_config_prefix + + " -f " + + str(use_image_features) + ) + if image_topic is not None: + run_command += " -i " + image_topic + os.system(run_command) + output_pdf_file = os.path.join(new_output_dir, str(job_id) + "_output.pdf") + output_csv_file = os.path.join(new_output_dir, "graph_stats.csv") + plot_command = ( + "rosrun graph_bag plot_results_main.py " + + output_bag + + " --output-file " + + output_pdf_file + + " --output-csv-file " + + output_csv_file + + " -g " + + groundtruth_bagfile + + " --rmse-rel-start-time " + + str(rmse_rel_start_time) + + " --rmse-rel-end-time " + + str(rmse_rel_end_time) + ) + os.system(plot_command) # Helper that unpacks arguments and calls original function # Aides running jobs in parallel as pool only supports # passing a single argument to workers def test_values_helper(zipped_vals): - return test_values(*zipped_vals) + return test_values(*zipped_vals) def concat_results(job_ids, directory): - results_csv_files = [] - for job_id in job_ids: - results_csv_files.append(os.path.join(directory, str(job_id), 'graph_stats.csv')) - # Results are written in job id order - combined_results = average_results.combined_results(results_csv_files) - combined_results_file = os.path.join(directory, 'param_sweep_combined_results.csv') - combined_results.to_csv(combined_results_file, index=False) - - -def parameter_sweep(all_value_combos, - value_names, - output_dir, - bag_file, - map_file, - image_topic, - config_path, - robot_config, - world, - use_image_features, - groundtruth_bagfile, - rmse_rel_start_time=0, - rmse_rel_end_time=-1): - job_ids = list(range(len(all_value_combos))) - num_processes = 15 - pool = multiprocessing.Pool(num_processes) - # izip arguments so we can pass as one argument to pool worker - pool.map( - test_values_helper, - itertools.izip(all_value_combos, job_ids, itertools.repeat(value_names), itertools.repeat(output_dir), - itertools.repeat(bag_file), itertools.repeat(map_file), itertools.repeat(image_topic), - itertools.repeat(config_path), itertools.repeat(robot_config), itertools.repeat(world), - itertools.repeat(use_image_features), itertools.repeat(groundtruth_bagfile), - itertools.repeat(rmse_rel_start_time), itertools.repeat(rmse_rel_end_time))) - concat_results(job_ids, output_dir) + results_csv_files = [] + for job_id in job_ids: + results_csv_files.append( + os.path.join(directory, str(job_id), "graph_stats.csv") + ) + # Results are written in job id order + combined_results = average_results.combined_results(results_csv_files) + combined_results_file = os.path.join(directory, "param_sweep_combined_results.csv") + combined_results.to_csv(combined_results_file, index=False) + + +def parameter_sweep( + all_value_combos, + value_names, + output_dir, + bag_file, + map_file, + image_topic, + config_path, + robot_config, + world, + use_image_features, + groundtruth_bagfile, + rmse_rel_start_time=0, + rmse_rel_end_time=-1, +): + job_ids = list(range(len(all_value_combos))) + num_processes = 15 + pool = multiprocessing.Pool(num_processes) + # izip arguments so we can pass as one argument to pool worker + pool.map( + test_values_helper, + list( + zip( + all_value_combos, + job_ids, + itertools.repeat(value_names), + itertools.repeat(output_dir), + itertools.repeat(bag_file), + itertools.repeat(map_file), + itertools.repeat(image_topic), + itertools.repeat(config_path), + itertools.repeat(robot_config), + itertools.repeat(world), + itertools.repeat(use_image_features), + itertools.repeat(groundtruth_bagfile), + itertools.repeat(rmse_rel_start_time), + itertools.repeat(rmse_rel_end_time), + ) + ), + ) + concat_results(job_ids, output_dir) def make_all_value_combinations(value_ranges): - return list(itertools.product(*value_ranges)) + return list(itertools.product(*value_ranges)) def make_value_ranges(): - value_ranges = [] - value_names = [] - steps = 10 + value_ranges = [] + value_names = [] + steps = 10 - # tune num smart factors - #value_ranges.append(np.logspace(-1, -6, steps, endpoint=True)) - #value_names.append('accel_bias_sigma') - value_ranges.append(np.linspace(0, 500, steps, endpoint=True)) - value_names.append('smart_projection_adder_feature_track_min_separation') + # tune num smart factors + # value_ranges.append(np.logspace(-1, -6, steps, endpoint=True)) + # value_names.append('accel_bias_sigma') + value_ranges.append(np.linspace(0, 500, steps, endpoint=True)) + value_names.append("smart_projection_adder_feature_track_min_separation") - #q_gyro - # .001 -> 2 deg - #q_gyro_degrees_range = np.logspace(-3, .3, steps, endpoint=True) - #q_gyro_squared_rads_range = [math.radians(deg)**2 for deg in q_gyro_degrees_range] - #value_ranges.append(q_gyro_squared_rads_range) - #value_names.append('q_gyro') + # q_gyro + # .001 -> 2 deg + # q_gyro_degrees_range = np.logspace(-3, .3, steps, endpoint=True) + # q_gyro_squared_rads_range = [math.radians(deg)**2 for deg in q_gyro_degrees_range] + # value_ranges.append(q_gyro_squared_rads_range) + # value_names.append('q_gyro') - return value_ranges, value_names + return value_ranges, value_names def save_values(value_names, values, filename, output_dir): - with open(os.path.join(output_dir, filename), 'w') as values_file: - writer = csv.writer(values_file) - writer.writerow(value_names) - writer.writerows(values) - - -def make_values_and_parameter_sweep(output_dir, - bag_file, - map_file, - image_topic, - config_path, - robot_config, - world, - use_image_features, - groundtruth_bagfile, - rmse_rel_start_time=0, - rmse_rel_end_time=-1): - output_dir = utilities.create_directory(output_dir) - print('Output directory for results is {}'.format(output_dir)) - - value_ranges, value_names = make_value_ranges() - save_values(value_names, value_ranges, 'individual_value_ranges.csv', output_dir) - - all_value_combos = make_all_value_combinations(value_ranges) - save_values(value_names, all_value_combos, 'all_value_combos.csv', output_dir) - - parameter_sweep(all_value_combos, value_names, output_dir, bag_file, map_file, image_topic, config_path, robot_config, - world, use_image_features, groundtruth_bagfile, rmse_rel_start_time, rmse_rel_end_time) - combined_results_file = os.path.join(output_dir, 'param_sweep_combined_results.csv') - value_combos_file = os.path.join(output_dir, 'all_value_combos.csv') - results_pdf_file = os.path.join(output_dir, 'param_sweep_results.pdf') - plot_parameter_sweep_results.create_plots(results_pdf_file, combined_results_file, value_combos_file) - return output_dir - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('bag_file', help='Full path to bagfile.') - parser.add_argument('map_file', help='Full path to map file.') - parser.add_argument('image_topic', help='Image topic.') - parser.add_argument('config_path', help='Full path to config path.') - parser.add_argument('robot_config', help='Relative path to robot config.') - parser.add_argument('world', help='World being used.') - parser.add_argument('--generate-image-features', - dest='use_image_features', - action='store_false', - help='Use image features msgs from bagfile or generate features from images.') - - parser.add_argument('-g', '--groundtruth-bagfile', default=None) - parser.add_argument( - '--directory', - default=None, - help= - 'Full path to output directory where files will be saved. If not specified, timestamped directory will be created in current path.' - ) - args = parser.parse_args() - # Default set groundtruth bagfile to normal bagfile - if not args.groundtruth_bagfile: - args.groundtruth_bagfile = args.bag_file - - make_values_and_parameter_sweep(args.directory, args.bag_file, args.map_file, args.image_topic, args.config_path, - args.robot_config, args.world, args.use_image_features, args.groundtruth_bagfile) + with open(os.path.join(output_dir, filename), "w") as values_file: + writer = csv.writer(values_file) + writer.writerow(value_names) + writer.writerows(values) + + +def make_values_and_parameter_sweep( + output_dir, + bag_file, + map_file, + image_topic, + config_path, + robot_config, + world, + use_image_features, + groundtruth_bagfile, + rmse_rel_start_time=0, + rmse_rel_end_time=-1, +): + output_dir = utilities.create_directory(output_dir) + print(("Output directory for results is {}".format(output_dir))) + + value_ranges, value_names = make_value_ranges() + save_values(value_names, value_ranges, "individual_value_ranges.csv", output_dir) + + all_value_combos = make_all_value_combinations(value_ranges) + save_values(value_names, all_value_combos, "all_value_combos.csv", output_dir) + + parameter_sweep( + all_value_combos, + value_names, + output_dir, + bag_file, + map_file, + image_topic, + config_path, + robot_config, + world, + use_image_features, + groundtruth_bagfile, + rmse_rel_start_time, + rmse_rel_end_time, + ) + combined_results_file = os.path.join(output_dir, "param_sweep_combined_results.csv") + value_combos_file = os.path.join(output_dir, "all_value_combos.csv") + results_pdf_file = os.path.join(output_dir, "param_sweep_results.pdf") + plot_parameter_sweep_results.create_plots( + results_pdf_file, combined_results_file, value_combos_file + ) + return output_dir + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("bag_file", help="Full path to bagfile.") + parser.add_argument("map_file", help="Full path to map file.") + parser.add_argument("image_topic", help="Image topic.") + parser.add_argument("config_path", help="Full path to config path.") + parser.add_argument("robot_config", help="Relative path to robot config.") + parser.add_argument("world", help="World being used.") + parser.add_argument( + "--generate-image-features", + dest="use_image_features", + action="store_false", + help="Use image features msgs from bagfile or generate features from images.", + ) + + parser.add_argument("-g", "--groundtruth-bagfile", default=None) + parser.add_argument( + "--directory", + default=None, + help="Full path to output directory where files will be saved. If not specified, timestamped directory will be created in current path.", + ) + args = parser.parse_args() + # Default set groundtruth bagfile to normal bagfile + if not args.groundtruth_bagfile: + args.groundtruth_bagfile = args.bag_file + + make_values_and_parameter_sweep( + args.directory, + args.bag_file, + args.map_file, + args.image_topic, + args.config_path, + args.robot_config, + args.world, + args.use_image_features, + args.groundtruth_bagfile, + ) diff --git a/tools/graph_bag/scripts/plot_all_results.py b/tools/graph_bag/scripts/plot_all_results.py index 5b3d3015b5..ce6c9b1918 100755 --- a/tools/graph_bag/scripts/plot_all_results.py +++ b/tools/graph_bag/scripts/plot_all_results.py @@ -19,23 +19,32 @@ import argparse import os -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--output-dir', default='') - args = parser.parse_args() +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--output-dir", default="") + args = parser.parse_args() - # Find bagfiles with bag prefix in current directory, fail if none found - bag_names = [bag for bag in os.listdir('.') if os.path.isfile(bag) and bag.endswith('.bag')] - bag_names = sorted(set(bag_names)) - if (len(bag_names) == 0): - print('No bag files found') - sys.exit() - else: - print('Found ' + str(len(bag_names)) + ' bags.') + # Find bagfiles with bag prefix in current directory, fail if none found + bag_names = [ + bag for bag in os.listdir(".") if os.path.isfile(bag) and bag.endswith(".bag") + ] + bag_names = sorted(set(bag_names)) + if len(bag_names) == 0: + print("No bag files found") + sys.exit() + else: + print(("Found " + str(len(bag_names)) + " bags.")) - for bag_name in bag_names: - output_file_name = os.path.splitext(os.path.basename(bag_name))[0] + '_output.pdf' - if args.output_dir: - output_file_name = args.output_dir + '/' + output_file_name - plot_command = 'rosrun graph_bag plot_results_main.py ' + bag_name + ' --output-file ' + output_file_name - os.system(plot_command) + for bag_name in bag_names: + output_file_name = ( + os.path.splitext(os.path.basename(bag_name))[0] + "_output.pdf" + ) + if args.output_dir: + output_file_name = args.output_dir + "/" + output_file_name + plot_command = ( + "rosrun graph_bag plot_results_main.py " + + bag_name + + " --output-file " + + output_file_name + ) + os.system(plot_command) diff --git a/tools/graph_bag/scripts/plot_bag_sweep_results.py b/tools/graph_bag/scripts/plot_bag_sweep_results.py index 32f6e14e10..208ee7b768 100755 --- a/tools/graph_bag/scripts/plot_bag_sweep_results.py +++ b/tools/graph_bag/scripts/plot_bag_sweep_results.py @@ -18,231 +18,452 @@ # under the License. import argparse + import matplotlib -matplotlib.use('pdf') -import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages -import pandas as pd +matplotlib.use("pdf") import os import sys +import matplotlib.pyplot as plt +import pandas as pd +from matplotlib.backends.backend_pdf import PdfPages -def save_rmse_results_to_csv(rmses, prefix='', rmses_2=None, label_1=None, label_2=None): - mean_rmses_dataframe = pd.DataFrame() - labels = [] - if label_1 and label_2 and rmses_2 is not None: - labels.append(label_1) - labels.append(label_2) - if labels: - mean_rmses_dataframe['Label'] = labels - mean_rmses_list = [] - mean_rmses_list.append(rmses.mean()) - relative_rmses = [] - relative_change_in_rmses = [] - if rmses_2 is not None: - mean_rmses_list.append(rmses_2.mean()) - relative_rmses.append(mean_rmses_list[0] / mean_rmses_list[1]) - relative_rmses.append(mean_rmses_list[1] / mean_rmses_list[0]) - relative_change_in_rmses.append(mean_rmses_list[0] / mean_rmses_list[1] - 1.0) - relative_change_in_rmses.append(mean_rmses_list[1] / mean_rmses_list[0] - 1.0) - mean_rmses_dataframe['rel_' + prefix + 'rmse_%'] = relative_rmses - mean_rmses_dataframe['rel_' + prefix + 'rmse_delta_%'] = relative_change_in_rmses - mean_rmses_dataframe['mean_' + prefix + 'rmse'] = mean_rmses_list - mean_rmses_csv_file = 'mean_rmses.csv' - mean_rmses_dataframe.to_csv(mean_rmses_csv_file, index=False, mode='a') - return mean_rmses_list, labels, relative_rmses, relative_change_in_rmses - - -def rmse_plot(pdf, x_axis_vals, shortened_bag_names, rmses, prefix='', label_1='', rmses_2=None, label_2=''): - plt.figure() - plt.plot(x_axis_vals, rmses, 'b', label=label_1, linestyle='None', marker='o', markeredgewidth=0.1, markersize=10.5) - if rmses_2 is not None: - plt.plot(x_axis_vals, - rmses_2, - 'r', - label=label_2, - linestyle='None', - marker='o', - markeredgewidth=0.1, - markersize=10.5) - plt.legend(prop={'size': 8}, bbox_to_anchor=(1.05, 1)) - plt.xticks(x_axis_vals, shortened_bag_names, fontsize=7, rotation=20) - plt.ylabel(prefix + ' RMSE') - plt.title(prefix + ' RMSE vs. Bag') - x_range = x_axis_vals[len(x_axis_vals) - 1] - x_axis_vals[0] - x_buffer = x_range * 0.1 - # Extend x axis on either side to make data more visible - plt.xlim([x_axis_vals[0] - x_buffer, x_axis_vals[len(x_axis_vals) - 1] + x_buffer]) - plt.tight_layout() - pdf.savefig() - plt.close() - - -def save_rmse_stats_to_plot(pdf, rmses, prefix='', label_1='', rmses_2=None, label_2=''): - # Plot mean rmses - mean_rmses, labels, relative_rmses, relative_change_in_rmses = save_rmse_results_to_csv( - rmses, prefix, rmses_2, label_1, label_2) - - mean_rmses_1_string = prefix + 'rmse: ' + str(mean_rmses[0]) - if labels: - mean_rmses_1_string += ', label: ' + labels[0] - plt.figure() - plt.axis('off') - plt.text(0.0, 1.0, mean_rmses_1_string) - if len(mean_rmses) > 1: - mean_rmses_2_string = prefix + 'rmse: ' + str(mean_rmses[1]) + +def save_rmse_results_to_csv( + rmses, prefix="", rmses_2=None, label_1=None, label_2=None +): + mean_rmses_dataframe = pd.DataFrame() + labels = [] + if label_1 and label_2 and rmses_2 is not None: + labels.append(label_1) + labels.append(label_2) if labels: - mean_rmses_2_string += ', label: ' + labels[1] - plt.text(0.0, 0.85, mean_rmses_2_string) - relative_rmses_string = prefix + 'rel rmse %: ' + str(100 * relative_rmses[0]) - plt.text(0.0, 0.7, relative_rmses_string) - relative_rmses_change_string = prefix + 'rel change in rmse %: ' + str(100 * relative_change_in_rmses[0]) - plt.text(0.0, 0.55, relative_rmses_change_string) - pdf.savefig() - - -def rmse_plots(pdf, - x_axis_vals, - shortened_bag_names, - rmses, - integrated_rmses, - orientation_rmses, - prefix='', - label_1='', - rmses_2=None, - integrated_rmses_2=None, - orientation_rmses_2=None, - label_2=''): - rmse_plot(pdf, x_axis_vals, shortened_bag_names, rmses, prefix + ' ', label_1, rmses_2, label_2) - if integrated_rmses is not None: - rmse_plot(pdf, x_axis_vals, shortened_bag_names, integrated_rmses, prefix + ' Integrated ', label_1, - integrated_rmses_2, label_2) - rmse_plot(pdf, x_axis_vals, shortened_bag_names, orientation_rmses, prefix + ' Orientation ', label_1, - orientation_rmses_2, label_2) - - save_rmse_stats_to_plot(pdf, rmses, prefix + ' ', label_1, rmses_2, label_2) - if integrated_rmses is not None: - save_rmse_stats_to_plot(pdf, integrated_rmses, prefix + ' Integrated ', label_1, integrated_rmses_2, label_2) - save_rmse_stats_to_plot(pdf, orientation_rmses, prefix + ' Orientation ', label_1, orientation_rmses_2, label_2) - - -def create_plot(output_file, csv_file, label_1='', csv_file_2=None, label_2='', imu_augmented_2=True): - dataframe = pd.read_csv(csv_file) - dataframe.sort_values(by=['Bag'], inplace=True) - # Graph rmses - rmses = dataframe['rmse'] - rel_rmses = dataframe['rel_rmse'] - integrated_rmses = dataframe['integrated_rmse'] - rel_integrated_rmses = dataframe['rel_integrated_rmse'] - orientation_rmses = dataframe['orientation_rmse'] - rel_orientation_rmses = dataframe['rel_orientation_rmse'] - # IMU augmented rmses - imu_augmented_rmses = dataframe['imu_augmented_rmse'] - rel_imu_augmented_rmses = dataframe['rel_imu_augmented_rmse'] - imu_augmented_integrated_rmses = dataframe['imu_augmented_integrated_rmse'] - rel_imu_augmented_integrated_rmses = dataframe['rel_imu_augmented_integrated_rmse'] - imu_augmented_orientation_rmses = dataframe['imu_augmented_orientation_rmse'] - rel_imu_augmented_orientation_rmses = dataframe['rel_imu_augmented_orientation_rmse'] - # IMU bias tester rmses - imu_bias_tester_rmses = dataframe['imu_bias_tester_rmse'] - rel_imu_bias_tester_rmses = dataframe['rel_imu_bias_tester_rmse'] - imu_bias_tester_orientation_rmses = dataframe['imu_bias_tester_orientation_rmse'] - rel_imu_bias_tester_orientation_rmses = dataframe['rel_imu_bias_tester_orientation_rmse'] - - bag_names = dataframe['Bag'].tolist() - max_name_length = 45 - shortened_bag_names = [ - bag_name[-1 * max_name_length:] if len(bag_name) > max_name_length else bag_name for bag_name in bag_names - ] - x_axis_vals = range(len(shortened_bag_names)) - rmses_2 = None - rel_rmses_2 = None - integrated_rmses_2 = None - rel_integrated_rmses_2 = None - orientation_rmses_2 = None - rel_orientation_rmses_2 = None - imu_augmented_rmses_2 = None - rel_imu_augmented_rmses_2 = None - imu_augmented_integrated_rmses_2 = None - rel_imu_augmented_integrated_rmses_2 = None - imu_augmented_orientation_rmses_2 = None - rel_imu_augmented_orientation_rmses_2 = None - imu_bias_tester_rmses_2 = None - rel_imu_bias_tester_rmses_2 = None - imu_bias_tester_orientation_rmses_2 = None - rel_imu_bias_tester_orientation_rmses_2 = None - - if (csv_file_2): - dataframe_2 = pd.read_csv(csv_file_2) - dataframe_2.sort_values(by=['Bag'], inplace=True) + mean_rmses_dataframe["Label"] = labels + mean_rmses_list = [] + mean_rmses_list.append(rmses.mean()) + relative_rmses = [] + relative_change_in_rmses = [] + if rmses_2 is not None: + mean_rmses_list.append(rmses_2.mean()) + relative_rmses.append(mean_rmses_list[0] / mean_rmses_list[1]) + relative_rmses.append(mean_rmses_list[1] / mean_rmses_list[0]) + relative_change_in_rmses.append(mean_rmses_list[0] / mean_rmses_list[1] - 1.0) + relative_change_in_rmses.append(mean_rmses_list[1] / mean_rmses_list[0] - 1.0) + mean_rmses_dataframe["rel_" + prefix + "rmse_%"] = relative_rmses + mean_rmses_dataframe[ + "rel_" + prefix + "rmse_delta_%" + ] = relative_change_in_rmses + mean_rmses_dataframe["mean_" + prefix + "rmse"] = mean_rmses_list + mean_rmses_csv_file = "mean_rmses.csv" + mean_rmses_dataframe.to_csv(mean_rmses_csv_file, index=False, mode="a") + return mean_rmses_list, labels, relative_rmses, relative_change_in_rmses + + +def rmse_plot( + pdf, + x_axis_vals, + shortened_bag_names, + rmses, + prefix="", + label_1="", + rmses_2=None, + label_2="", +): + plt.figure() + plt.plot( + x_axis_vals, + rmses, + "b", + label=label_1, + linestyle="None", + marker="o", + markeredgewidth=0.1, + markersize=10.5, + ) + if rmses_2 is not None: + plt.plot( + x_axis_vals, + rmses_2, + "r", + label=label_2, + linestyle="None", + marker="o", + markeredgewidth=0.1, + markersize=10.5, + ) + plt.legend(prop={"size": 8}, bbox_to_anchor=(1.05, 1)) + plt.xticks(x_axis_vals, shortened_bag_names, fontsize=7, rotation=20) + plt.ylabel(prefix + " RMSE") + plt.title(prefix + " RMSE vs. Bag") + x_range = x_axis_vals[len(x_axis_vals) - 1] - x_axis_vals[0] + x_buffer = x_range * 0.1 + # Extend x axis on either side to make data more visible + plt.xlim([x_axis_vals[0] - x_buffer, x_axis_vals[len(x_axis_vals) - 1] + x_buffer]) + plt.tight_layout() + pdf.savefig() + plt.close() + + +def save_rmse_stats_to_plot( + pdf, rmses, prefix="", label_1="", rmses_2=None, label_2="" +): + # Plot mean rmses + ( + mean_rmses, + labels, + relative_rmses, + relative_change_in_rmses, + ) = save_rmse_results_to_csv(rmses, prefix, rmses_2, label_1, label_2) + + mean_rmses_1_string = prefix + "rmse: " + str(mean_rmses[0]) + if labels: + mean_rmses_1_string += ", label: " + labels[0] + plt.figure() + plt.axis("off") + plt.text(0.0, 1.0, mean_rmses_1_string) + if len(mean_rmses) > 1: + mean_rmses_2_string = prefix + "rmse: " + str(mean_rmses[1]) + if labels: + mean_rmses_2_string += ", label: " + labels[1] + plt.text(0.0, 0.85, mean_rmses_2_string) + relative_rmses_string = prefix + "rel rmse %: " + str(100 * relative_rmses[0]) + plt.text(0.0, 0.7, relative_rmses_string) + relative_rmses_change_string = ( + prefix + "rel change in rmse %: " + str(100 * relative_change_in_rmses[0]) + ) + plt.text(0.0, 0.55, relative_rmses_change_string) + pdf.savefig() + + +def rmse_plots( + pdf, + x_axis_vals, + shortened_bag_names, + rmses, + integrated_rmses, + orientation_rmses, + prefix="", + label_1="", + rmses_2=None, + integrated_rmses_2=None, + orientation_rmses_2=None, + label_2="", +): + rmse_plot( + pdf, + x_axis_vals, + shortened_bag_names, + rmses, + prefix + " ", + label_1, + rmses_2, + label_2, + ) + if integrated_rmses is not None: + rmse_plot( + pdf, + x_axis_vals, + shortened_bag_names, + integrated_rmses, + prefix + " Integrated ", + label_1, + integrated_rmses_2, + label_2, + ) + rmse_plot( + pdf, + x_axis_vals, + shortened_bag_names, + orientation_rmses, + prefix + " Orientation ", + label_1, + orientation_rmses_2, + label_2, + ) + + save_rmse_stats_to_plot(pdf, rmses, prefix + " ", label_1, rmses_2, label_2) + if integrated_rmses is not None: + save_rmse_stats_to_plot( + pdf, + integrated_rmses, + prefix + " Integrated ", + label_1, + integrated_rmses_2, + label_2, + ) + save_rmse_stats_to_plot( + pdf, + orientation_rmses, + prefix + " Orientation ", + label_1, + orientation_rmses_2, + label_2, + ) + + +def create_plot( + output_file, csv_file, label_1="", csv_file_2=None, label_2="", imu_augmented_2=True +): + dataframe = pd.read_csv(csv_file) + dataframe.sort_values(by=["Bag"], inplace=True) # Graph rmses - rmses_2 = dataframe_2['rmse'] - rel_rmses_2 = dataframe_2['rel_rmse'] - integrated_rmses_2 = dataframe_2['integrated_rmse'] - rel_integrated_rmses_2 = dataframe_2['rel_integrated_rmse'] - orientation_rmses_2 = dataframe_2['orientation_rmse'] - rel_orientation_rmses_2 = dataframe_2['rel_orientation_rmse'] - if (imu_augmented_2): - # IMU augmented rmses - imu_augmented_rmses_2 = dataframe_2['imu_augmented_rmse'] - rel_imu_augmented_rmses_2 = dataframe_2['rel_imu_augmented_rmse'] - imu_augmented_integrated_rmses_2 = dataframe_2['imu_augmented_integrated_rmse'] - rel_imu_augmented_integrated_rmses_2 = dataframe_2['rel_imu_augmented_integrated_rmse'] - imu_augmented_orientation_rmses_2 = dataframe_2['imu_augmented_orientation_rmse'] - rel_imu_augmented_orientation_rmses_2 = dataframe_2['rel_imu_augmented_orientation_rmse'] - # IMU bias tester rmses - imu_bias_tester_rmses_2 = dataframe_2['imu_bias_tester_rmse'] - rel_imu_bias_tester_rmses_2 = dataframe_2['rel_imu_bias_tester_rmse'] - imu_bias_tester_orientation_rmses_2 = dataframe_2['imu_bias_tester_orientation_rmse'] - rel_imu_bias_tester_orientation_rmses_2 = dataframe_2['rel_imu_bias_tester_orientation_rmse'] - - bag_names_2 = dataframe_2['Bag'].tolist() - if bag_names != bag_names_2: - print('Bag names for first and second csv file are not the same') - exit() - with PdfPages(output_file) as pdf: - rmse_plots(pdf, x_axis_vals, shortened_bag_names, rmses, integrated_rmses, orientation_rmses, '', label_1, rmses_2, - integrated_rmses_2, orientation_rmses_2, label_2) - rmse_plots(pdf, x_axis_vals, shortened_bag_names, rel_rmses, rel_integrated_rmses, rel_orientation_rmses, 'rel', - label_1, rel_rmses_2, rel_integrated_rmses_2, rel_orientation_rmses_2, label_2) - if imu_augmented_2: - rmse_plots(pdf, x_axis_vals, shortened_bag_names, imu_augmented_rmses, imu_augmented_integrated_rmses, - imu_augmented_orientation_rmses, 'imu_augmented', label_1, imu_augmented_rmses_2, - imu_augmented_integrated_rmses_2, imu_augmented_orientation_rmses_2, label_2) - rmse_plots(pdf, x_axis_vals, shortened_bag_names, rel_imu_augmented_rmses, rel_imu_augmented_integrated_rmses, - rel_imu_augmented_orientation_rmses, 'rel_imu_augmented', label_1, rel_imu_augmented_rmses_2, - rel_imu_augmented_integrated_rmses_2, rel_imu_augmented_orientation_rmses_2, label_2) - rmse_plots(pdf, x_axis_vals, shortened_bag_names, imu_bias_tester_rmses, None, imu_bias_tester_orientation_rmses, - 'imu_bias_tester', label_1, imu_bias_tester_rmses_2, None, imu_bias_tester_orientation_rmses_2, - label_2) - rmse_plots(pdf, x_axis_vals, shortened_bag_names, rel_imu_bias_tester_rmses, None, - rel_imu_bias_tester_orientation_rmses, 'rel_imu_bias_tester', label_1, rel_imu_bias_tester_rmses_2, - None, rel_imu_bias_tester_orientation_rmses_2, label_2) - - else: - rmse_plots(pdf, x_axis_vals, shortened_bag_names, imu_augmented_rmses, imu_augmented_integrated_rmses, - imu_augmented_orientation_rmses, 'imu_augmented', label_1, rmses_2, integrated_rmses_2, - orientation_rmses_2, label_2 + ' no imu aug') - rmse_plots(pdf, x_axis_vals, shortened_bag_names, rel_imu_augmented_rmses, rel_imu_augmented_integrated_rmses, - rel_imu_augmented_orientation_rmses, 'rel_imu_augmented', label_1, rel_rmses_2, rel_integrated_rmses_2, - rel_orientation_rmses_2, label_2 + ' no imu aug') - rmse_plots(pdf, x_axis_vals, shortened_bag_names, imu_bias_tester_rmses, None, imu_bias_tester_orientation_rmses, - 'imu_bias_tester', label_1, imu_bias_tester_rmses_2, None, imu_bias_tester_orientation_rmses_2, - label_2) - rmse_plots(pdf, x_axis_vals, shortened_bag_names, rel_imu_bias_tester_rmses, None, - rel_imu_bias_tester_orientation_rmses, 'rel_imu_bias_tester', label_1, rel_imu_bias_tester_rmses_2, - None, rel_imu_bias_tester_orientation_rmses_2, label_2) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - # Combined csv results, where each row is the result from a bag file - parser.add_argument('csv_file') - parser.add_argument('--output-file', default='bag_sweep_results.pdf') - parser.add_argument('--csv-file2', help='Optional second csv file to plot') - parser.add_argument('--label1', default='', help='Optional label for first csv file') - parser.add_argument('--label2', default='', help='Optional label for second csv file') - parser.add_argument('--no-imu-augmented2', dest='imu_augmented2', action='store_false') - args = parser.parse_args() - create_plot(args.output_file, args.csv_file, args.label1, args.csv_file2, args.label2, args.imu_augmented2) + rmses = dataframe["rmse"] + rel_rmses = dataframe["rel_rmse"] + integrated_rmses = dataframe["integrated_rmse"] + rel_integrated_rmses = dataframe["rel_integrated_rmse"] + orientation_rmses = dataframe["orientation_rmse"] + rel_orientation_rmses = dataframe["rel_orientation_rmse"] + # IMU augmented rmses + imu_augmented_rmses = dataframe["imu_augmented_rmse"] + rel_imu_augmented_rmses = dataframe["rel_imu_augmented_rmse"] + imu_augmented_integrated_rmses = dataframe["imu_augmented_integrated_rmse"] + rel_imu_augmented_integrated_rmses = dataframe["rel_imu_augmented_integrated_rmse"] + imu_augmented_orientation_rmses = dataframe["imu_augmented_orientation_rmse"] + rel_imu_augmented_orientation_rmses = dataframe[ + "rel_imu_augmented_orientation_rmse" + ] + # IMU bias tester rmses + imu_bias_tester_rmses = dataframe["imu_bias_tester_rmse"] + rel_imu_bias_tester_rmses = dataframe["rel_imu_bias_tester_rmse"] + imu_bias_tester_orientation_rmses = dataframe["imu_bias_tester_orientation_rmse"] + rel_imu_bias_tester_orientation_rmses = dataframe[ + "rel_imu_bias_tester_orientation_rmse" + ] + + bag_names = dataframe["Bag"].tolist() + max_name_length = 45 + shortened_bag_names = [ + bag_name[-1 * max_name_length :] + if len(bag_name) > max_name_length + else bag_name + for bag_name in bag_names + ] + x_axis_vals = list(range(len(shortened_bag_names))) + rmses_2 = None + rel_rmses_2 = None + integrated_rmses_2 = None + rel_integrated_rmses_2 = None + orientation_rmses_2 = None + rel_orientation_rmses_2 = None + imu_augmented_rmses_2 = None + rel_imu_augmented_rmses_2 = None + imu_augmented_integrated_rmses_2 = None + rel_imu_augmented_integrated_rmses_2 = None + imu_augmented_orientation_rmses_2 = None + rel_imu_augmented_orientation_rmses_2 = None + imu_bias_tester_rmses_2 = None + rel_imu_bias_tester_rmses_2 = None + imu_bias_tester_orientation_rmses_2 = None + rel_imu_bias_tester_orientation_rmses_2 = None + + if csv_file_2: + dataframe_2 = pd.read_csv(csv_file_2) + dataframe_2.sort_values(by=["Bag"], inplace=True) + # Graph rmses + rmses_2 = dataframe_2["rmse"] + rel_rmses_2 = dataframe_2["rel_rmse"] + integrated_rmses_2 = dataframe_2["integrated_rmse"] + rel_integrated_rmses_2 = dataframe_2["rel_integrated_rmse"] + orientation_rmses_2 = dataframe_2["orientation_rmse"] + rel_orientation_rmses_2 = dataframe_2["rel_orientation_rmse"] + if imu_augmented_2: + # IMU augmented rmses + imu_augmented_rmses_2 = dataframe_2["imu_augmented_rmse"] + rel_imu_augmented_rmses_2 = dataframe_2["rel_imu_augmented_rmse"] + imu_augmented_integrated_rmses_2 = dataframe_2[ + "imu_augmented_integrated_rmse" + ] + rel_imu_augmented_integrated_rmses_2 = dataframe_2[ + "rel_imu_augmented_integrated_rmse" + ] + imu_augmented_orientation_rmses_2 = dataframe_2[ + "imu_augmented_orientation_rmse" + ] + rel_imu_augmented_orientation_rmses_2 = dataframe_2[ + "rel_imu_augmented_orientation_rmse" + ] + # IMU bias tester rmses + imu_bias_tester_rmses_2 = dataframe_2["imu_bias_tester_rmse"] + rel_imu_bias_tester_rmses_2 = dataframe_2["rel_imu_bias_tester_rmse"] + imu_bias_tester_orientation_rmses_2 = dataframe_2[ + "imu_bias_tester_orientation_rmse" + ] + rel_imu_bias_tester_orientation_rmses_2 = dataframe_2[ + "rel_imu_bias_tester_orientation_rmse" + ] + + bag_names_2 = dataframe_2["Bag"].tolist() + if bag_names != bag_names_2: + print("Bag names for first and second csv file are not the same") + exit() + with PdfPages(output_file) as pdf: + rmse_plots( + pdf, + x_axis_vals, + shortened_bag_names, + rmses, + integrated_rmses, + orientation_rmses, + "", + label_1, + rmses_2, + integrated_rmses_2, + orientation_rmses_2, + label_2, + ) + rmse_plots( + pdf, + x_axis_vals, + shortened_bag_names, + rel_rmses, + rel_integrated_rmses, + rel_orientation_rmses, + "rel", + label_1, + rel_rmses_2, + rel_integrated_rmses_2, + rel_orientation_rmses_2, + label_2, + ) + if imu_augmented_2: + rmse_plots( + pdf, + x_axis_vals, + shortened_bag_names, + imu_augmented_rmses, + imu_augmented_integrated_rmses, + imu_augmented_orientation_rmses, + "imu_augmented", + label_1, + imu_augmented_rmses_2, + imu_augmented_integrated_rmses_2, + imu_augmented_orientation_rmses_2, + label_2, + ) + rmse_plots( + pdf, + x_axis_vals, + shortened_bag_names, + rel_imu_augmented_rmses, + rel_imu_augmented_integrated_rmses, + rel_imu_augmented_orientation_rmses, + "rel_imu_augmented", + label_1, + rel_imu_augmented_rmses_2, + rel_imu_augmented_integrated_rmses_2, + rel_imu_augmented_orientation_rmses_2, + label_2, + ) + rmse_plots( + pdf, + x_axis_vals, + shortened_bag_names, + imu_bias_tester_rmses, + None, + imu_bias_tester_orientation_rmses, + "imu_bias_tester", + label_1, + imu_bias_tester_rmses_2, + None, + imu_bias_tester_orientation_rmses_2, + label_2, + ) + rmse_plots( + pdf, + x_axis_vals, + shortened_bag_names, + rel_imu_bias_tester_rmses, + None, + rel_imu_bias_tester_orientation_rmses, + "rel_imu_bias_tester", + label_1, + rel_imu_bias_tester_rmses_2, + None, + rel_imu_bias_tester_orientation_rmses_2, + label_2, + ) + + else: + rmse_plots( + pdf, + x_axis_vals, + shortened_bag_names, + imu_augmented_rmses, + imu_augmented_integrated_rmses, + imu_augmented_orientation_rmses, + "imu_augmented", + label_1, + rmses_2, + integrated_rmses_2, + orientation_rmses_2, + label_2 + " no imu aug", + ) + rmse_plots( + pdf, + x_axis_vals, + shortened_bag_names, + rel_imu_augmented_rmses, + rel_imu_augmented_integrated_rmses, + rel_imu_augmented_orientation_rmses, + "rel_imu_augmented", + label_1, + rel_rmses_2, + rel_integrated_rmses_2, + rel_orientation_rmses_2, + label_2 + " no imu aug", + ) + rmse_plots( + pdf, + x_axis_vals, + shortened_bag_names, + imu_bias_tester_rmses, + None, + imu_bias_tester_orientation_rmses, + "imu_bias_tester", + label_1, + imu_bias_tester_rmses_2, + None, + imu_bias_tester_orientation_rmses_2, + label_2, + ) + rmse_plots( + pdf, + x_axis_vals, + shortened_bag_names, + rel_imu_bias_tester_rmses, + None, + rel_imu_bias_tester_orientation_rmses, + "rel_imu_bias_tester", + label_1, + rel_imu_bias_tester_rmses_2, + None, + rel_imu_bias_tester_orientation_rmses_2, + label_2, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Combined csv results, where each row is the result from a bag file + parser.add_argument("csv_file") + parser.add_argument("--output-file", default="bag_sweep_results.pdf") + parser.add_argument("--csv-file2", help="Optional second csv file to plot") + parser.add_argument( + "--label1", default="", help="Optional label for first csv file" + ) + parser.add_argument( + "--label2", default="", help="Optional label for second csv file" + ) + parser.add_argument( + "--no-imu-augmented2", dest="imu_augmented2", action="store_false" + ) + args = parser.parse_args() + create_plot( + args.output_file, + args.csv_file, + args.label1, + args.csv_file2, + args.label2, + args.imu_augmented2, + ) diff --git a/tools/graph_bag/scripts/plot_helpers.py b/tools/graph_bag/scripts/plot_helpers.py index d9cebed518..075432745f 100644 --- a/tools/graph_bag/scripts/plot_helpers.py +++ b/tools/graph_bag/scripts/plot_helpers.py @@ -17,64 +17,107 @@ # License for the specific language governing permissions and limitations # under the License. +import matplotlib import poses import vector3ds -import matplotlib -matplotlib.use('pdf') +matplotlib.use("pdf") import matplotlib.pyplot as plt -def plot_vals(x_axis_vals, - vec_of_y_axis_vals, - labels, - colors, - linewidth=1, - linestyle='-', - marker=None, - markeredgewidth=None, - markersize=1): - for index, _ in enumerate(vec_of_y_axis_vals): - plt.plot(x_axis_vals, - vec_of_y_axis_vals[index], - colors[index], - linestyle=linestyle, - linewidth=linewidth, - marker=marker, - markeredgewidth=markeredgewidth, - markersize=markersize, - label=labels[index]) +def plot_vals( + x_axis_vals, + vec_of_y_axis_vals, + labels, + colors, + linewidth=1, + linestyle="-", + marker=None, + markeredgewidth=None, + markersize=1, +): + for index, _ in enumerate(vec_of_y_axis_vals): + plt.plot( + x_axis_vals, + vec_of_y_axis_vals[index], + colors[index], + linestyle=linestyle, + linewidth=linewidth, + marker=marker, + markeredgewidth=markeredgewidth, + markersize=markersize, + label=labels[index], + ) -def plot_vector3ds(vector3ds, - times, - label, - colors=['r', 'g', 'b'], - linewidth=1, - linestyle='-', - marker=None, - markeredgewidth=None, - markersize=1): - labels = [label + ' (X)', label + ' (Y)', label + ' (Z)'] - plot_vals(times, [vector3ds.xs, vector3ds.ys, vector3ds.zs], labels, colors, linewidth, linestyle, marker, - markeredgewidth, markersize) +def plot_vector3ds( + vector3ds, + times, + label, + colors=["r", "g", "b"], + linewidth=1, + linestyle="-", + marker=None, + markeredgewidth=None, + markersize=1, +): + labels = [label + " (X)", label + " (Y)", label + " (Z)"] + plot_vals( + times, + [vector3ds.xs, vector3ds.ys, vector3ds.zs], + labels, + colors, + linewidth, + linestyle, + marker, + markeredgewidth, + markersize, + ) -def plot_positions(poses, colors, linewidth=1, linestyle='-', marker=None, markeredgewidth=None, markersize=1): - plot_vector3ds(poses.positions, - poses.times, - poses.pose_type + ' Pos.', - linewidth=linewidth, - linestyle=linestyle, - marker=marker, - markeredgewidth=markeredgewidth, - markersize=markersize) +def plot_positions( + poses, + colors, + linewidth=1, + linestyle="-", + marker=None, + markeredgewidth=None, + markersize=1, +): + plot_vector3ds( + poses.positions, + poses.times, + poses.pose_type + " Pos.", + linewidth=linewidth, + linestyle=linestyle, + marker=marker, + markeredgewidth=markeredgewidth, + markersize=markersize, + ) -def plot_orientations(poses, colors, linewidth=1, linestyle='-', marker=None, markeredgewidth=None, markersize=1): - labels = [ - poses.pose_type + ' Orientation (Yaw)', poses.pose_type + ' Orientation (Roll)', - poses.pose_type + ' Orienation (Pitch)' - ] - plot_vals(poses.times, [poses.orientations.yaws, poses.orientations.rolls, poses.orientations.pitches], labels, - colors, linewidth, linestyle, marker, markeredgewidth, markersize) +def plot_orientations( + poses, + colors, + linewidth=1, + linestyle="-", + marker=None, + markeredgewidth=None, + markersize=1, +): + labels = [ + poses.pose_type + " Orientation (Yaw)", + poses.pose_type + " Orientation (Roll)", + poses.pose_type + " Orienation (Pitch)", + ] + plot_vals( + poses.times, + [poses.orientations.yaws, poses.orientations.rolls, poses.orientations.pitches], + labels, + colors, + linewidth, + linestyle, + marker, + markeredgewidth, + markersize, + ) diff --git a/tools/graph_bag/scripts/plot_parameter_sweep_results.py b/tools/graph_bag/scripts/plot_parameter_sweep_results.py index 72ae0c7cab..a57f425134 100755 --- a/tools/graph_bag/scripts/plot_parameter_sweep_results.py +++ b/tools/graph_bag/scripts/plot_parameter_sweep_results.py @@ -18,97 +18,116 @@ # under the License. import argparse + import matplotlib -matplotlib.use('pdf') -import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages -import pandas as pd +matplotlib.use("pdf") import math import os import sys +import matplotlib.pyplot as plt +import pandas as pd +from matplotlib.backends.backend_pdf import PdfPages + + +def create_plot(pdf, csv_file, value_combos_file, prefix=""): + dataframe = pd.read_csv(csv_file) + rmses = dataframe[prefix + "rmse"] + x_axis_vals = [] + x_axis_label = "" + if value_combos_file: + value_combos_dataframe = pd.read_csv(value_combos_file) + if len(value_combos_dataframe.columns) > 1: + print( + "Value combos include more than one parameter, cannot use for x axis of plot" + ) + job_count = dataframe.shape[0] + x_axis_vals = list(range(job_count)) + x_axis_label = "Job Id" + else: + x_axis_label = value_combos_dataframe.columns[0] + x_axis_vals = value_combos_dataframe[x_axis_label] + if isinstance(x_axis_vals[0], str): + job_count = dataframe.shape[0] + x_axis_vals = list(range(job_count)) + x_axis_label = "Job Id" -def create_plot(pdf, csv_file, value_combos_file, prefix=''): - dataframe = pd.read_csv(csv_file) - rmses = dataframe[prefix + 'rmse'] - x_axis_vals = [] - x_axis_label = '' - if value_combos_file: - value_combos_dataframe = pd.read_csv(value_combos_file) - if (len(value_combos_dataframe.columns) > 1): - print('Value combos include more than one parameter, cannot use for x axis of plot') - job_count = dataframe.shape[0] - x_axis_vals = range(job_count) - x_axis_label = 'Job Id' else: - x_axis_label = value_combos_dataframe.columns[0] - x_axis_vals = value_combos_dataframe[x_axis_label] - if isinstance(x_axis_vals[0], basestring): job_count = dataframe.shape[0] - x_axis_vals = range(job_count) - x_axis_label = 'Job Id' - - else: - job_count = dataframe.shape[0] - x_axis_vals = range(job_count) - x_axis_label = 'Job Id' + x_axis_vals = list(range(job_count)) + x_axis_label = "Job Id" - plt.figure() - plt.plot(x_axis_vals, rmses, linestyle='None', marker='o', markeredgewidth=0.1, markersize=10.5) - plt.ylabel(prefix + ' RMSE') - plt.title(prefix + ' RMSE vs. ' + x_axis_label) - x_range = x_axis_vals[len(x_axis_vals) - 1] - x_axis_vals[0] - first_x_val = x_axis_vals[0] - last_x_val = x_axis_vals[len(x_axis_vals) - 1] - # Use log scale if min and max x vals are more than 3 orders of magnitude apart - if (first_x_val != 0 and last_x_val != 0 and abs(math.log10(last_x_val) - math.log10(first_x_val)) > 3): - plt.xscale('log', basex=10) - # Extend x axis on either side using a log scale to make data more visible - if (first_x_val < last_x_val): - plt.xlim([first_x_val * 0.1, last_x_val * 10.0]) + plt.figure() + plt.plot( + x_axis_vals, + rmses, + linestyle="None", + marker="o", + markeredgewidth=0.1, + markersize=10.5, + ) + plt.ylabel(prefix + " RMSE") + plt.title(prefix + " RMSE vs. " + x_axis_label) + x_range = x_axis_vals[len(x_axis_vals) - 1] - x_axis_vals[0] + first_x_val = x_axis_vals[0] + last_x_val = x_axis_vals[len(x_axis_vals) - 1] + # Use log scale if min and max x vals are more than 3 orders of magnitude apart + if ( + first_x_val != 0 + and last_x_val != 0 + and abs(math.log10(last_x_val) - math.log10(first_x_val)) > 3 + ): + plt.xscale("log", basex=10) + # Extend x axis on either side using a log scale to make data more visible + if first_x_val < last_x_val: + plt.xlim([first_x_val * 0.1, last_x_val * 10.0]) + else: + plt.xlim([last_x_val * 0.1, first_x_val * 10.0]) else: - plt.xlim([last_x_val * 0.1, first_x_val * 10.0]) - else: - # Extend x axis on either side to make data more visible - x_buffer = x_range * 0.1 - plt.xlim([x_axis_vals[0] - x_buffer, x_axis_vals[len(x_axis_vals) - 1] + x_buffer]) - plt.ticklabel_format(useOffset=False) - plt.tight_layout() - plt.xlabel(x_axis_label) - pdf.savefig() - plt.close() + # Extend x axis on either side to make data more visible + x_buffer = x_range * 0.1 + plt.xlim( + [x_axis_vals[0] - x_buffer, x_axis_vals[len(x_axis_vals) - 1] + x_buffer] + ) + plt.ticklabel_format(useOffset=False) + plt.tight_layout() + plt.xlabel(x_axis_label) + pdf.savefig() + plt.close() def create_plots(output_file, csv_file, value_combos_file): - with PdfPages(output_file) as pdf: - # Graph RMSEs - create_plot(pdf, csv_file, value_combos_file) - create_plot(pdf, csv_file, value_combos_file, 'orientation_') - create_plot(pdf, csv_file, value_combos_file, 'integrated_') - create_plot(pdf, csv_file, value_combos_file, 'rel_') - create_plot(pdf, csv_file, value_combos_file, 'rel_orientation_') - create_plot(pdf, csv_file, value_combos_file, 'rel_integrated_') - # IMU Augmented RMSEs - create_plot(pdf, csv_file, value_combos_file, 'imu_augmented_') - create_plot(pdf, csv_file, value_combos_file, 'imu_augmented_orientation_') - create_plot(pdf, csv_file, value_combos_file, 'imu_augmented_integrated_') - create_plot(pdf, csv_file, value_combos_file, 'rel_imu_augmented_') - create_plot(pdf, csv_file, value_combos_file, 'rel_imu_augmented_orientation_') - create_plot(pdf, csv_file, value_combos_file, 'rel_imu_augmented_integrated_') + with PdfPages(output_file) as pdf: + # Graph RMSEs + create_plot(pdf, csv_file, value_combos_file) + create_plot(pdf, csv_file, value_combos_file, "orientation_") + create_plot(pdf, csv_file, value_combos_file, "integrated_") + create_plot(pdf, csv_file, value_combos_file, "rel_") + create_plot(pdf, csv_file, value_combos_file, "rel_orientation_") + create_plot(pdf, csv_file, value_combos_file, "rel_integrated_") + # IMU Augmented RMSEs + create_plot(pdf, csv_file, value_combos_file, "imu_augmented_") + create_plot(pdf, csv_file, value_combos_file, "imu_augmented_orientation_") + create_plot(pdf, csv_file, value_combos_file, "imu_augmented_integrated_") + create_plot(pdf, csv_file, value_combos_file, "rel_imu_augmented_") + create_plot(pdf, csv_file, value_combos_file, "rel_imu_augmented_orientation_") + create_plot(pdf, csv_file, value_combos_file, "rel_imu_augmented_integrated_") - # IMU Bias Tester RMSEs - create_plot(pdf, csv_file, value_combos_file, 'imu_bias_tester_') - create_plot(pdf, csv_file, value_combos_file, 'imu_bias_tester_orientation_') - create_plot(pdf, csv_file, value_combos_file, 'rel_imu_bias_tester_') - create_plot(pdf, csv_file, value_combos_file, 'rel_imu_bias_tester_orientation_') + # IMU Bias Tester RMSEs + create_plot(pdf, csv_file, value_combos_file, "imu_bias_tester_") + create_plot(pdf, csv_file, value_combos_file, "imu_bias_tester_orientation_") + create_plot(pdf, csv_file, value_combos_file, "rel_imu_bias_tester_") + create_plot( + pdf, csv_file, value_combos_file, "rel_imu_bias_tester_orientation_" + ) -if __name__ == '__main__': - parser = argparse.ArgumentParser() - # Csv file with combined results (or mean results if using bag_and_param_sweep) for each job - parser.add_argument('csv_file') - parser.add_argument('--output-file', default='param_sweep_results.pdf') - parser.add_argument('--value-combos-file') - args = parser.parse_args() - create_plots(args.output_file, args.csv_file, args.value_combos_file) +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Csv file with combined results (or mean results if using bag_and_param_sweep) for each job + parser.add_argument("csv_file") + parser.add_argument("--output-file", default="param_sweep_results.pdf") + parser.add_argument("--value-combos-file") + args = parser.parse_args() + create_plots(args.output_file, args.csv_file, args.value_combos_file) diff --git a/tools/graph_bag/scripts/plot_results.py b/tools/graph_bag/scripts/plot_results.py index ff6fed1b39..4774a9d09c 100644 --- a/tools/graph_bag/scripts/plot_results.py +++ b/tools/graph_bag/scripts/plot_results.py @@ -18,575 +18,800 @@ # under the License. import loc_states +import matplotlib import plot_helpers import poses -import velocities import rmse_utilities import utilities import vector3d_plotter +import velocities -import matplotlib -matplotlib.use('pdf') -import matplotlib.pyplot as plt -from matplotlib.backends.backend_pdf import PdfPages +matplotlib.use("pdf") +import csv +import math import geometry_msgs -import math +import matplotlib.pyplot as plt import rosbag - -import csv +from matplotlib.backends.backend_pdf import PdfPages def l2_map(vector3ds): - return map(lambda (x, y, z): math.sqrt(x*x + y*y + z*z), zip(vector3ds.xs, vector3ds.ys, vector3ds.zs)) - - -def add_graph_plots(pdf, sparse_mapping_poses, ar_tag_poses, graph_localization_states, - imu_augmented_graph_localization_poses): - colors = ['r', 'b', 'g'] - position_plotter = vector3d_plotter.Vector3dPlotter('Time (s)', 'Position (m)', 'Graph vs. Sparse Mapping Position', - True) - position_plotter.add_pose_position(sparse_mapping_poses, - linestyle='None', - marker='o', - markeredgewidth=0.1, - markersize=1.5) - if ar_tag_poses.times: - position_plotter.add_pose_position(ar_tag_poses, linestyle='None', marker='x', markeredgewidth=0.1, markersize=1.5) - position_plotter.add_pose_position(graph_localization_states) - position_plotter.plot(pdf) - - # orientations - orientation_plotter = vector3d_plotter.Vector3dPlotter('Time (s)', 'Orientation (deg)', - 'Graph vs. Sparse Mapping Orientation', True) - orientation_plotter.add_pose_orientation(sparse_mapping_poses, - linestyle='None', - marker='o', - markeredgewidth=0.1, - markersize=1.5) - if ar_tag_poses.times: - orientation_plotter.add_pose_orientation(ar_tag_poses, - linestyle='None', - marker='x', - markeredgewidth=0.1, - markersize=1.5) - orientation_plotter.add_pose_orientation(graph_localization_states) - orientation_plotter.plot(pdf) - - # Imu Augmented Loc vs. Loc - position_plotter = vector3d_plotter.Vector3dPlotter('Time (s)', 'Position (m)', - 'Graph vs. IMU Augmented Graph Position', True) - position_plotter.add_pose_position(graph_localization_states, - linestyle='None', - marker='o', - markeredgewidth=0.1, - markersize=1.5) - - position_plotter.add_pose_position(imu_augmented_graph_localization_poses, linewidth=0.5) - position_plotter.plot(pdf) - - # orientations - orientation_plotter = vector3d_plotter.Vector3dPlotter('Time (s)', 'Orientation (deg)', - 'Graph vs. IMU Augmented Graph Orientation', True) - orientation_plotter.add_pose_orientation(graph_localization_states, marker='o', markeredgewidth=0.1, markersize=1.5) - orientation_plotter.add_pose_orientation(imu_augmented_graph_localization_poses, linewidth=0.5) - orientation_plotter.plot(pdf) - - # Velocity - plt.figure() - plot_helpers.plot_vector3ds(graph_localization_states.velocities, graph_localization_states.times, 'Vel.') - plt.xlabel('Time (s)') - plt.ylabel('Velocities') - plt.title('Graph Velocities') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # Integrated Velocities - position_plotter = vector3d_plotter.Vector3dPlotter('Time (s)', 'Position (m)', - 'Integrated Graph Velocities vs. Sparse Mapping Position', True) - position_plotter.add_pose_position(sparse_mapping_poses, - linestyle='None', - marker='o', - markeredgewidth=0.1, - markersize=1.5) - if ar_tag_poses.times: - position_plotter.add_pose_position(ar_tag_poses, linestyle='None', marker='x', markeredgewidth=0.1, markersize=1.5) - - integrated_graph_localization_states = utilities.integrate_velocities(graph_localization_states) - position_plotter.add_pose_position(integrated_graph_localization_states) - position_plotter.plot(pdf) - - -def plot_features(feature_counts, - times, - label, - color, - linestyle='None', - marker='o', - markeredgewidth=0.1, - markersize=1.5): - plt.plot(times, - feature_counts, - color, - linestyle=linestyle, - marker=marker, - markeredgewidth=markeredgewidth, - markersize=markersize, - label=label) + return [ + math.sqrt(x_y_z[0] * x_y_z[0] + x_y_z[1] * x_y_z[1] + x_y_z[2] * x_y_z[2]) + for x_y_z in zip(vector3ds.xs, vector3ds.ys, vector3ds.zs) + ] + + +def add_graph_plots( + pdf, + sparse_mapping_poses, + ar_tag_poses, + graph_localization_states, + imu_augmented_graph_localization_poses, +): + colors = ["r", "b", "g"] + position_plotter = vector3d_plotter.Vector3dPlotter( + "Time (s)", "Position (m)", "Graph vs. Sparse Mapping Position", True + ) + position_plotter.add_pose_position( + sparse_mapping_poses, + linestyle="None", + marker="o", + markeredgewidth=0.1, + markersize=1.5, + ) + if ar_tag_poses.times: + position_plotter.add_pose_position( + ar_tag_poses, + linestyle="None", + marker="x", + markeredgewidth=0.1, + markersize=1.5, + ) + position_plotter.add_pose_position(graph_localization_states) + position_plotter.plot(pdf) + + # orientations + orientation_plotter = vector3d_plotter.Vector3dPlotter( + "Time (s)", "Orientation (deg)", "Graph vs. Sparse Mapping Orientation", True + ) + orientation_plotter.add_pose_orientation( + sparse_mapping_poses, + linestyle="None", + marker="o", + markeredgewidth=0.1, + markersize=1.5, + ) + if ar_tag_poses.times: + orientation_plotter.add_pose_orientation( + ar_tag_poses, + linestyle="None", + marker="x", + markeredgewidth=0.1, + markersize=1.5, + ) + orientation_plotter.add_pose_orientation(graph_localization_states) + orientation_plotter.plot(pdf) + + # Imu Augmented Loc vs. Loc + position_plotter = vector3d_plotter.Vector3dPlotter( + "Time (s)", "Position (m)", "Graph vs. IMU Augmented Graph Position", True + ) + position_plotter.add_pose_position( + graph_localization_states, + linestyle="None", + marker="o", + markeredgewidth=0.1, + markersize=1.5, + ) + + position_plotter.add_pose_position( + imu_augmented_graph_localization_poses, linewidth=0.5 + ) + position_plotter.plot(pdf) + + # orientations + orientation_plotter = vector3d_plotter.Vector3dPlotter( + "Time (s)", + "Orientation (deg)", + "Graph vs. IMU Augmented Graph Orientation", + True, + ) + orientation_plotter.add_pose_orientation( + graph_localization_states, marker="o", markeredgewidth=0.1, markersize=1.5 + ) + orientation_plotter.add_pose_orientation( + imu_augmented_graph_localization_poses, linewidth=0.5 + ) + orientation_plotter.plot(pdf) + + # Velocity + plt.figure() + plot_helpers.plot_vector3ds( + graph_localization_states.velocities, graph_localization_states.times, "Vel." + ) + plt.xlabel("Time (s)") + plt.ylabel("Velocities") + plt.title("Graph Velocities") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # Integrated Velocities + position_plotter = vector3d_plotter.Vector3dPlotter( + "Time (s)", + "Position (m)", + "Integrated Graph Velocities vs. Sparse Mapping Position", + True, + ) + position_plotter.add_pose_position( + sparse_mapping_poses, + linestyle="None", + marker="o", + markeredgewidth=0.1, + markersize=1.5, + ) + if ar_tag_poses.times: + position_plotter.add_pose_position( + ar_tag_poses, + linestyle="None", + marker="x", + markeredgewidth=0.1, + markersize=1.5, + ) + + integrated_graph_localization_states = utilities.integrate_velocities( + graph_localization_states + ) + position_plotter.add_pose_position(integrated_graph_localization_states) + position_plotter.plot(pdf) + + +def plot_features( + feature_counts, + times, + label, + color, + linestyle="None", + marker="o", + markeredgewidth=0.1, + markersize=1.5, +): + plt.plot( + times, + feature_counts, + color, + linestyle=linestyle, + marker=marker, + markeredgewidth=markeredgewidth, + markersize=markersize, + label=label, + ) def add_feature_count_plots(pdf, graph_localization_states): - plt.figure() - plot_features(graph_localization_states.num_detected_ml_features, - graph_localization_states.times, - 'Det. VL', - 'b', - marker='o', - markeredgewidth=0.1, - markersize=1.5) - plot_features(graph_localization_states.num_ml_projection_factors, - graph_localization_states.times, - 'VL Proj Factors', - 'r', - marker='x', - markeredgewidth=0.1, - markersize=1.5) - plt.xlabel('Time (s)') - plt.ylabel('ML Feature Counts') - plt.title('ML Feature Counts') - plt.legend(prop={'size': 6}) - plt.ylim(ymin=-1) - pdf.savefig() - plt.close() - - plt.figure() - plot_features(graph_localization_states.num_detected_of_features, - graph_localization_states.times, - 'Det. OF', - 'b', - marker='o', - markeredgewidth=0.1, - markersize=1.5) - plot_features(graph_localization_states.num_of_factors, - graph_localization_states.times, - 'OF Factors', - 'r', - marker='x', - markeredgewidth=0.1, - markersize=1.5) - plt.xlabel('Time (s)') - plt.ylabel('Optical Flow Feature Counts') - plt.title('Optical Flow Feature Counts') - plt.legend(prop={'size': 6}) - plt.ylim(ymin=-1) - pdf.savefig() - plt.close() - - -def add_other_vector3d_plots(pdf, imu_augmented_graph_localization_states, sparse_mapping_poses, ar_tag_poses): - colors = ['r', 'b', 'g'] - - # Acceleration - plt.figure() - plot_helpers.plot_vector3ds(imu_augmented_graph_localization_states.accelerations, - imu_augmented_graph_localization_states.times, 'Acc.') - plt.xlabel('Time (s)') - plt.ylabel('Acceleration (m/s^2)') - plt.title('Acceleration') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # Biases - # Plot Accelerometer Biases on different pages since they can start with quite different - # values, plotting on the same page will lead to a large y axis scale and hide subtle changes. - plt.figure() - plt.plot(imu_augmented_graph_localization_states.times, - imu_augmented_graph_localization_states.accelerometer_biases.xs, 'r') - plt.xlabel('Time (s)') - plt.ylabel('Accelerometer Biases (X)') - plt.title('Accelerometer Biases (X)') - pdf.savefig() - plt.close() - - plt.figure() - plt.plot(imu_augmented_graph_localization_states.times, - imu_augmented_graph_localization_states.accelerometer_biases.ys, 'r') - plt.xlabel('Time (s)') - plt.ylabel('Accelerometer Biases (Y)') - plt.title('Accelerometer Biases (Y)') - pdf.savefig() - plt.close() - - plt.figure() - plt.plot(imu_augmented_graph_localization_states.times, - imu_augmented_graph_localization_states.accelerometer_biases.zs, 'r') - plt.xlabel('Time (s)') - plt.ylabel('Accelerometer Biases (Z)') - plt.title('Accelerometer Biases (Z)') - pdf.savefig() - plt.close() - - plt.figure() - plt.plot(imu_augmented_graph_localization_states.times, imu_augmented_graph_localization_states.gyro_biases.xs, 'r') - plt.xlabel('Time (s)') - plt.ylabel('Gyro Biases (X)') - plt.title('Gyro Biases (X)') - pdf.savefig() - plt.close() - - plt.figure() - plt.plot(imu_augmented_graph_localization_states.times, imu_augmented_graph_localization_states.gyro_biases.ys, 'r') - plt.xlabel('Time (s)') - plt.ylabel('Gyro Biases (Y)') - plt.title('Gyro Biases (Y)') - pdf.savefig() - plt.close() - - plt.figure() - plt.plot(imu_augmented_graph_localization_states.times, imu_augmented_graph_localization_states.gyro_biases.zs, 'r') - plt.xlabel('Time (s)') - plt.ylabel('Gyro Biases (Z)') - plt.title('Gyro Biases (Z)') - pdf.savefig() - plt.close() - - # Angular Velocity - plt.figure() - plot_helpers.plot_vector3ds(imu_augmented_graph_localization_states.angular_velocities, - imu_augmented_graph_localization_states.times, 'Ang. Vel.') - plt.xlabel('Time (s)') - plt.ylabel('Angular Velocities') - plt.title('Angular Velocities') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # Velocity - plt.figure() - plot_helpers.plot_vector3ds(imu_augmented_graph_localization_states.velocities, - imu_augmented_graph_localization_states.times, 'Vel.') - plt.xlabel('Time (s)') - plt.ylabel('Velocities') - plt.title('IMU Augmented Graph Velocities') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # Integrated Velocities - plt.figure() - plot_helpers.plot_positions(sparse_mapping_poses, - colors, - linestyle='None', - marker='o', - markeredgewidth=0.1, - markersize=1.5) - if ar_tag_poses.times: - plot_helpers.plot_positions(ar_tag_poses, colors, linestyle='None', marker='x', markeredgewidth=0.1, markersize=1.5) - integrated_imu_augmented_graph_localization_states = utilities.integrate_velocities( - imu_augmented_graph_localization_states) - plot_helpers.plot_positions(integrated_imu_augmented_graph_localization_states, colors, linewidth=0.5) - plt.xlabel('Time (s)') - plt.ylabel('Position (m)') - plt.title('Integrated IMU Augmented Graph Velocities vs. Sparse Mapping Position') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # Position covariance - plt.figure() - plt.plot(imu_augmented_graph_localization_states.times, - l2_map(imu_augmented_graph_localization_states.position_covariances), - 'r', - linewidth=0.5, - label='Position Covariance') - plt.title('Position Covariance') - plt.xlabel('Time (s)') - plt.ylabel('Position Covariance') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # Orientation covariance - plt.figure() - plt.plot(imu_augmented_graph_localization_states.times, - l2_map(imu_augmented_graph_localization_states.orientation_covariances), - 'r', - linewidth=0.5, - label='Orientation Covariance') - plt.title('Orientation Covariance (Quaternion)') - plt.xlabel('Time (s)') - plt.ylabel('Orientation Covariance') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # Velocity covariance - plt.figure() - plt.plot(imu_augmented_graph_localization_states.times, - l2_map(imu_augmented_graph_localization_states.velocity_covariances), - 'r', - linewidth=0.5, - label='Velocity Covariance') - plt.title('Velocity Covariance') - plt.xlabel('Time (s)') - plt.ylabel('Velocity Covariance') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # Accel Bias covariance - plt.figure() - plt.plot(imu_augmented_graph_localization_states.times, - l2_map(imu_augmented_graph_localization_states.accelerometer_bias_covariances), - 'r', - linewidth=0.5, - label='Accelerometer Bias Covariance') - plt.title('Accelerometer Bias Covariance') - plt.xlabel('Time (s)') - plt.ylabel('Accelerometer Bias Covariance') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # Gyro Bias covariance - plt.figure() - plt.plot(imu_augmented_graph_localization_states.times, - l2_map(imu_augmented_graph_localization_states.gyro_bias_covariances), - 'r', - linewidth=0.5, - label='Gyro Bias Covariance') - plt.title('Gyro Bias Covariance') - plt.xlabel('Time (s)') - plt.ylabel('Gyro Bias Covariance') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - -def plot_loc_state_stats(pdf, - localization_states, - sparse_mapping_poses, - output_csv_file, - prefix='', - atol=0, - plot_integrated_velocities=True, - rmse_rel_start_time=0, - rmse_rel_end_time=-1): - plot_loc_state_stats_abs(pdf, localization_states, sparse_mapping_poses, output_csv_file, prefix, atol, - plot_integrated_velocities, rmse_rel_start_time, rmse_rel_end_time) - plot_loc_state_stats_rel(pdf, localization_states, sparse_mapping_poses, output_csv_file, prefix, atol, - plot_integrated_velocities, rmse_rel_start_time, rmse_rel_end_time) - - -def plot_loc_state_stats_abs(pdf, - localization_states, - sparse_mapping_poses, - output_csv_file, - prefix='', - atol=0, - plot_integrated_velocities=True, - rmse_rel_start_time=0, - rmse_rel_end_time=-1): - rmse = rmse_utilities.rmse_timestamped_poses(localization_states, sparse_mapping_poses, True, atol, - rmse_rel_start_time, rmse_rel_end_time) - integrated_rmse = [] - if plot_integrated_velocities: - integrated_localization_states = utilities.integrate_velocities(localization_states) - integrated_rmse = rmse_utilities.rmse_timestamped_poses(integrated_localization_states, sparse_mapping_poses, False, - atol, rmse_rel_start_time, rmse_rel_end_time) - stats = prefix + ' pos rmse: ' + str(rmse[0]) + '\n' + 'orientation rmse: ' + str(rmse[1]) - if plot_integrated_velocities: - stats += '\n' + 'integrated rmse: ' + str(integrated_rmse[0]) - with open(output_csv_file, 'a') as output_csv: - csv_writer = csv.writer(output_csv, lineterminator='\n') - csv_writer.writerow([prefix + 'rmse', str(rmse[0])]) - csv_writer.writerow([prefix + 'orientation_rmse', str(rmse[1])]) + plt.figure() + plot_features( + graph_localization_states.num_detected_ml_features, + graph_localization_states.times, + "Det. VL", + "b", + marker="o", + markeredgewidth=0.1, + markersize=1.5, + ) + plot_features( + graph_localization_states.num_ml_projection_factors, + graph_localization_states.times, + "VL Proj Factors", + "r", + marker="x", + markeredgewidth=0.1, + markersize=1.5, + ) + plt.xlabel("Time (s)") + plt.ylabel("ML Feature Counts") + plt.title("ML Feature Counts") + plt.legend(prop={"size": 6}) + plt.ylim(ymin=-1) + pdf.savefig() + plt.close() + + plt.figure() + plot_features( + graph_localization_states.num_detected_of_features, + graph_localization_states.times, + "Det. OF", + "b", + marker="o", + markeredgewidth=0.1, + markersize=1.5, + ) + plot_features( + graph_localization_states.num_of_factors, + graph_localization_states.times, + "OF Factors", + "r", + marker="x", + markeredgewidth=0.1, + markersize=1.5, + ) + plt.xlabel("Time (s)") + plt.ylabel("Optical Flow Feature Counts") + plt.title("Optical Flow Feature Counts") + plt.legend(prop={"size": 6}) + plt.ylim(ymin=-1) + pdf.savefig() + plt.close() + + +def add_other_vector3d_plots( + pdf, imu_augmented_graph_localization_states, sparse_mapping_poses, ar_tag_poses +): + colors = ["r", "b", "g"] + + # Acceleration + plt.figure() + plot_helpers.plot_vector3ds( + imu_augmented_graph_localization_states.accelerations, + imu_augmented_graph_localization_states.times, + "Acc.", + ) + plt.xlabel("Time (s)") + plt.ylabel("Acceleration (m/s^2)") + plt.title("Acceleration") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # Biases + # Plot Accelerometer Biases on different pages since they can start with quite different + # values, plotting on the same page will lead to a large y axis scale and hide subtle changes. + plt.figure() + plt.plot( + imu_augmented_graph_localization_states.times, + imu_augmented_graph_localization_states.accelerometer_biases.xs, + "r", + ) + plt.xlabel("Time (s)") + plt.ylabel("Accelerometer Biases (X)") + plt.title("Accelerometer Biases (X)") + pdf.savefig() + plt.close() + + plt.figure() + plt.plot( + imu_augmented_graph_localization_states.times, + imu_augmented_graph_localization_states.accelerometer_biases.ys, + "r", + ) + plt.xlabel("Time (s)") + plt.ylabel("Accelerometer Biases (Y)") + plt.title("Accelerometer Biases (Y)") + pdf.savefig() + plt.close() + + plt.figure() + plt.plot( + imu_augmented_graph_localization_states.times, + imu_augmented_graph_localization_states.accelerometer_biases.zs, + "r", + ) + plt.xlabel("Time (s)") + plt.ylabel("Accelerometer Biases (Z)") + plt.title("Accelerometer Biases (Z)") + pdf.savefig() + plt.close() + + plt.figure() + plt.plot( + imu_augmented_graph_localization_states.times, + imu_augmented_graph_localization_states.gyro_biases.xs, + "r", + ) + plt.xlabel("Time (s)") + plt.ylabel("Gyro Biases (X)") + plt.title("Gyro Biases (X)") + pdf.savefig() + plt.close() + + plt.figure() + plt.plot( + imu_augmented_graph_localization_states.times, + imu_augmented_graph_localization_states.gyro_biases.ys, + "r", + ) + plt.xlabel("Time (s)") + plt.ylabel("Gyro Biases (Y)") + plt.title("Gyro Biases (Y)") + pdf.savefig() + plt.close() + + plt.figure() + plt.plot( + imu_augmented_graph_localization_states.times, + imu_augmented_graph_localization_states.gyro_biases.zs, + "r", + ) + plt.xlabel("Time (s)") + plt.ylabel("Gyro Biases (Z)") + plt.title("Gyro Biases (Z)") + pdf.savefig() + plt.close() + + # Angular Velocity + plt.figure() + plot_helpers.plot_vector3ds( + imu_augmented_graph_localization_states.angular_velocities, + imu_augmented_graph_localization_states.times, + "Ang. Vel.", + ) + plt.xlabel("Time (s)") + plt.ylabel("Angular Velocities") + plt.title("Angular Velocities") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # Velocity + plt.figure() + plot_helpers.plot_vector3ds( + imu_augmented_graph_localization_states.velocities, + imu_augmented_graph_localization_states.times, + "Vel.", + ) + plt.xlabel("Time (s)") + plt.ylabel("Velocities") + plt.title("IMU Augmented Graph Velocities") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # Integrated Velocities + plt.figure() + plot_helpers.plot_positions( + sparse_mapping_poses, + colors, + linestyle="None", + marker="o", + markeredgewidth=0.1, + markersize=1.5, + ) + if ar_tag_poses.times: + plot_helpers.plot_positions( + ar_tag_poses, + colors, + linestyle="None", + marker="x", + markeredgewidth=0.1, + markersize=1.5, + ) + integrated_imu_augmented_graph_localization_states = utilities.integrate_velocities( + imu_augmented_graph_localization_states + ) + plot_helpers.plot_positions( + integrated_imu_augmented_graph_localization_states, colors, linewidth=0.5 + ) + plt.xlabel("Time (s)") + plt.ylabel("Position (m)") + plt.title("Integrated IMU Augmented Graph Velocities vs. Sparse Mapping Position") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # Position covariance + plt.figure() + plt.plot( + imu_augmented_graph_localization_states.times, + l2_map(imu_augmented_graph_localization_states.position_covariances), + "r", + linewidth=0.5, + label="Position Covariance", + ) + plt.title("Position Covariance") + plt.xlabel("Time (s)") + plt.ylabel("Position Covariance") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # Orientation covariance + plt.figure() + plt.plot( + imu_augmented_graph_localization_states.times, + l2_map(imu_augmented_graph_localization_states.orientation_covariances), + "r", + linewidth=0.5, + label="Orientation Covariance", + ) + plt.title("Orientation Covariance (Quaternion)") + plt.xlabel("Time (s)") + plt.ylabel("Orientation Covariance") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # Velocity covariance + plt.figure() + plt.plot( + imu_augmented_graph_localization_states.times, + l2_map(imu_augmented_graph_localization_states.velocity_covariances), + "r", + linewidth=0.5, + label="Velocity Covariance", + ) + plt.title("Velocity Covariance") + plt.xlabel("Time (s)") + plt.ylabel("Velocity Covariance") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # Accel Bias covariance + plt.figure() + plt.plot( + imu_augmented_graph_localization_states.times, + l2_map(imu_augmented_graph_localization_states.accelerometer_bias_covariances), + "r", + linewidth=0.5, + label="Accelerometer Bias Covariance", + ) + plt.title("Accelerometer Bias Covariance") + plt.xlabel("Time (s)") + plt.ylabel("Accelerometer Bias Covariance") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # Gyro Bias covariance + plt.figure() + plt.plot( + imu_augmented_graph_localization_states.times, + l2_map(imu_augmented_graph_localization_states.gyro_bias_covariances), + "r", + linewidth=0.5, + label="Gyro Bias Covariance", + ) + plt.title("Gyro Bias Covariance") + plt.xlabel("Time (s)") + plt.ylabel("Gyro Bias Covariance") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + +def plot_loc_state_stats( + pdf, + localization_states, + sparse_mapping_poses, + output_csv_file, + prefix="", + atol=0, + plot_integrated_velocities=True, + rmse_rel_start_time=0, + rmse_rel_end_time=-1, +): + plot_loc_state_stats_abs( + pdf, + localization_states, + sparse_mapping_poses, + output_csv_file, + prefix, + atol, + plot_integrated_velocities, + rmse_rel_start_time, + rmse_rel_end_time, + ) + plot_loc_state_stats_rel( + pdf, + localization_states, + sparse_mapping_poses, + output_csv_file, + prefix, + atol, + plot_integrated_velocities, + rmse_rel_start_time, + rmse_rel_end_time, + ) + + +def plot_loc_state_stats_abs( + pdf, + localization_states, + sparse_mapping_poses, + output_csv_file, + prefix="", + atol=0, + plot_integrated_velocities=True, + rmse_rel_start_time=0, + rmse_rel_end_time=-1, +): + rmse = rmse_utilities.rmse_timestamped_poses( + localization_states, + sparse_mapping_poses, + True, + atol, + rmse_rel_start_time, + rmse_rel_end_time, + ) + integrated_rmse = [] if plot_integrated_velocities: - csv_writer.writerow([prefix + 'integrated_rmse', str(integrated_rmse[0])]) - plt.figure() - plt.axis('off') - plt.text(0.0, 0.5, stats) - pdf.savefig() - - -def plot_loc_state_stats_rel(pdf, - localization_states, - sparse_mapping_poses, - output_csv_file, - prefix='', - atol=0, - plot_integrated_velocities=True, - rmse_rel_start_time=0, - rmse_rel_end_time=-1): - rmse = rmse_utilities.rmse_timestamped_poses_relative(localization_states, sparse_mapping_poses, True, atol, - rmse_rel_start_time, rmse_rel_end_time) - integrated_rmse = [] - if plot_integrated_velocities: - integrated_localization_states = utilities.integrate_velocities(localization_states) - integrated_rmse = rmse_utilities.rmse_timestamped_poses_relative(integrated_localization_states, - sparse_mapping_poses, False, atol, - rmse_rel_start_time, rmse_rel_end_time) - stats = prefix + ' rel pos rmse: ' + str(rmse[0]) + '\n' + 'rel orientation rmse: ' + str(rmse[1]) - if plot_integrated_velocities: - stats += '\n' + 'rel integrated rmse: ' + str(integrated_rmse[0]) - - with open(output_csv_file, 'a') as output_csv: - csv_writer = csv.writer(output_csv, lineterminator='\n') - csv_writer.writerow(['rel_' + prefix + 'rmse', str(rmse[0])]) - csv_writer.writerow(['rel_' + prefix + 'orientation_rmse', str(rmse[1])]) + integrated_localization_states = utilities.integrate_velocities( + localization_states + ) + integrated_rmse = rmse_utilities.rmse_timestamped_poses( + integrated_localization_states, + sparse_mapping_poses, + False, + atol, + rmse_rel_start_time, + rmse_rel_end_time, + ) + stats = ( + prefix + + " pos rmse: " + + str(rmse[0]) + + "\n" + + "orientation rmse: " + + str(rmse[1]) + ) if plot_integrated_velocities: - csv_writer.writerow(['rel_' + prefix + 'integrated_rmse', str(integrated_rmse[0])]) - plt.figure() - plt.axis('off') - plt.text(0.0, 0.5, stats) - pdf.savefig() + stats += "\n" + "integrated rmse: " + str(integrated_rmse[0]) + with open(output_csv_file, "a") as output_csv: + csv_writer = csv.writer(output_csv, lineterminator="\n") + csv_writer.writerow([prefix + "rmse", str(rmse[0])]) + csv_writer.writerow([prefix + "orientation_rmse", str(rmse[1])]) + if plot_integrated_velocities: + csv_writer.writerow([prefix + "integrated_rmse", str(integrated_rmse[0])]) + plt.figure() + plt.axis("off") + plt.text(0.0, 0.5, stats) + pdf.savefig() + + +def plot_loc_state_stats_rel( + pdf, + localization_states, + sparse_mapping_poses, + output_csv_file, + prefix="", + atol=0, + plot_integrated_velocities=True, + rmse_rel_start_time=0, + rmse_rel_end_time=-1, +): + rmse = rmse_utilities.rmse_timestamped_poses_relative( + localization_states, + sparse_mapping_poses, + True, + atol, + rmse_rel_start_time, + rmse_rel_end_time, + ) + integrated_rmse = [] + if plot_integrated_velocities: + integrated_localization_states = utilities.integrate_velocities( + localization_states + ) + integrated_rmse = rmse_utilities.rmse_timestamped_poses_relative( + integrated_localization_states, + sparse_mapping_poses, + False, + atol, + rmse_rel_start_time, + rmse_rel_end_time, + ) + stats = ( + prefix + + " rel pos rmse: " + + str(rmse[0]) + + "\n" + + "rel orientation rmse: " + + str(rmse[1]) + ) + if plot_integrated_velocities: + stats += "\n" + "rel integrated rmse: " + str(integrated_rmse[0]) + + with open(output_csv_file, "a") as output_csv: + csv_writer = csv.writer(output_csv, lineterminator="\n") + csv_writer.writerow(["rel_" + prefix + "rmse", str(rmse[0])]) + csv_writer.writerow(["rel_" + prefix + "orientation_rmse", str(rmse[1])]) + if plot_integrated_velocities: + csv_writer.writerow( + ["rel_" + prefix + "integrated_rmse", str(integrated_rmse[0])] + ) + plt.figure() + plt.axis("off") + plt.text(0.0, 0.5, stats) + pdf.savefig() def add_imu_bias_tester_poses(pdf, imu_bias_tester_poses, sparse_mapping_poses): - colors = ['r', 'b', 'g'] - plt.figure() - plot_helpers.plot_positions(sparse_mapping_poses, - colors, - linestyle='None', - marker='o', - markeredgewidth=0.1, - markersize=1.5) - plot_helpers.plot_positions(imu_bias_tester_poses, colors, linewidth=0.5) - plt.xlabel('Time (s)') - plt.ylabel('Position (m)') - plt.title('Imu Bias Tester vs. Sparse Mapping Position') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - # orientations - plt.figure() - plot_helpers.plot_orientations(sparse_mapping_poses, - colors, - linestyle='None', - marker='o', - markeredgewidth=0.1, - markersize=1.5) - plot_helpers.plot_orientations(imu_bias_tester_poses, colors, linewidth=0.5) - plt.xlabel('Time (s)') - plt.ylabel('Orienation (deg)') - plt.title('Imu Bias Tester vs. Sparse Mapping Orientation') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() + colors = ["r", "b", "g"] + plt.figure() + plot_helpers.plot_positions( + sparse_mapping_poses, + colors, + linestyle="None", + marker="o", + markeredgewidth=0.1, + markersize=1.5, + ) + plot_helpers.plot_positions(imu_bias_tester_poses, colors, linewidth=0.5) + plt.xlabel("Time (s)") + plt.ylabel("Position (m)") + plt.title("Imu Bias Tester vs. Sparse Mapping Position") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + # orientations + plt.figure() + plot_helpers.plot_orientations( + sparse_mapping_poses, + colors, + linestyle="None", + marker="o", + markeredgewidth=0.1, + markersize=1.5, + ) + plot_helpers.plot_orientations(imu_bias_tester_poses, colors, linewidth=0.5) + plt.xlabel("Time (s)") + plt.ylabel("Orienation (deg)") + plt.title("Imu Bias Tester vs. Sparse Mapping Orientation") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() def add_imu_bias_tester_velocities(pdf, imu_bias_tester_velocities): - plt.figure() - plot_helpers.plot_vector3ds(imu_bias_tester_velocities.velocities, imu_bias_tester_velocities.times, 'Vel.') - plt.xlabel('Time (s)') - plt.ylabel('Velocities') - plt.title('IMU Bias Tester Velocities') - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - -def add_other_loc_plots(pdf, graph_localization_states, imu_augmented_graph_localization_states, sparse_mapping_poses, - ar_tag_poses): - add_feature_count_plots(pdf, graph_localization_states) - add_other_vector3d_plots(pdf, imu_augmented_graph_localization_states, sparse_mapping_poses, ar_tag_poses) + plt.figure() + plot_helpers.plot_vector3ds( + imu_bias_tester_velocities.velocities, imu_bias_tester_velocities.times, "Vel." + ) + plt.xlabel("Time (s)") + plt.ylabel("Velocities") + plt.title("IMU Bias Tester Velocities") + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + +def add_other_loc_plots( + pdf, + graph_localization_states, + imu_augmented_graph_localization_states, + sparse_mapping_poses, + ar_tag_poses, +): + add_feature_count_plots(pdf, graph_localization_states) + add_other_vector3d_plots( + pdf, imu_augmented_graph_localization_states, sparse_mapping_poses, ar_tag_poses + ) def load_pose_msgs(vec_of_poses, bag, bag_start_time): - topics = [poses.topic for poses in vec_of_poses] - for topic, msg, t in bag.read_messages(topics): - for poses in vec_of_poses: - if poses.topic == topic: - poses.add_msg(msg, msg.header.stamp, bag_start_time) - break + topics = [poses.topic for poses in vec_of_poses] + for topic, msg, t in bag.read_messages(topics): + for poses in vec_of_poses: + if poses.topic == topic: + poses.add_msg(msg, msg.header.stamp, bag_start_time) + break def load_loc_state_msgs(vec_of_loc_states, bag, bag_start_time): - topics = [loc_states.topic for loc_states in vec_of_loc_states] - for topic, msg, t in bag.read_messages(topics): - for loc_states in vec_of_loc_states: - if loc_states.topic == topic: - loc_states.add_loc_state(msg, bag_start_time) - break + topics = [loc_states.topic for loc_states in vec_of_loc_states] + for topic, msg, t in bag.read_messages(topics): + for loc_states in vec_of_loc_states: + if loc_states.topic == topic: + loc_states.add_loc_state(msg, bag_start_time) + break def load_velocity_msgs(velocities, bag, bag_start_time): - topics = [velocities.topic] - for topic, msg, t in bag.read_messages(topics): - velocities.add_msg(msg, msg.header.stamp, bag_start_time) + topics = [velocities.topic] + for topic, msg, t in bag.read_messages(topics): + velocities.add_msg(msg, msg.header.stamp, bag_start_time) def has_topic(bag, topic): - topics = bag.get_type_and_topic_info().topics - return topic in topics + topics = bag.get_type_and_topic_info().topics + return topic in topics # Groundtruth bag must have the same start time as other bagfile, otherwise RMSE calculations will be flawed -def create_plots(bagfile, - output_pdf_file, - output_csv_file='results.csv', - groundtruth_bagfile=None, - rmse_rel_start_time=0, - rmse_rel_end_time=-1): - bag = rosbag.Bag(bagfile) - groundtruth_bag = rosbag.Bag(groundtruth_bagfile) if groundtruth_bagfile else bag - bag_start_time = bag.get_start_time() - - has_imu_augmented_graph_localization_state = has_topic(bag, '/gnc/ekf') - has_imu_bias_tester_poses = has_topic(bag, '/imu_bias_tester/pose') - sparse_mapping_poses = poses.Poses('Sparse Mapping', '/sparse_mapping/pose') - ar_tag_poses = poses.Poses('AR Tag', '/ar_tag/pose') - imu_bias_tester_poses = poses.Poses('Imu Bias Tester', '/imu_bias_tester/pose') - vec_of_poses = [ar_tag_poses, imu_bias_tester_poses] - load_pose_msgs(vec_of_poses, bag, bag_start_time) - groundtruth_vec_of_poses = [sparse_mapping_poses] - load_pose_msgs(groundtruth_vec_of_poses, groundtruth_bag, bag_start_time) - - graph_localization_states = loc_states.LocStates('Graph Localization', '/graph_loc/state') - imu_augmented_graph_localization_states = loc_states.LocStates('Imu Augmented Graph Localization', '/gnc/ekf') - vec_of_loc_states = [graph_localization_states, imu_augmented_graph_localization_states] - load_loc_state_msgs(vec_of_loc_states, bag, bag_start_time) - - imu_bias_tester_velocities = velocities.Velocities('Imu Bias Tester', '/imu_bias_tester/velocity') - load_velocity_msgs(imu_bias_tester_velocities, bag, bag_start_time) - - bag.close() - - with PdfPages(output_pdf_file) as pdf: - add_graph_plots(pdf, sparse_mapping_poses, ar_tag_poses, graph_localization_states, - imu_augmented_graph_localization_states) - if has_imu_bias_tester_poses: - add_imu_bias_tester_poses(pdf, imu_bias_tester_poses, sparse_mapping_poses) - add_imu_bias_tester_velocities(pdf, imu_bias_tester_velocities) - if has_imu_augmented_graph_localization_state: - add_other_loc_plots(pdf, graph_localization_states, imu_augmented_graph_localization_states, sparse_mapping_poses, - ar_tag_poses) - else: - add_other_loc_plots(pdf, graph_localization_states, graph_localization_states) - plot_loc_state_stats(pdf, - graph_localization_states, - sparse_mapping_poses, - output_csv_file, - rmse_rel_start_time=rmse_rel_start_time, - rmse_rel_end_time=rmse_rel_end_time) - plot_loc_state_stats(pdf, - imu_augmented_graph_localization_states, - sparse_mapping_poses, - output_csv_file, - 'imu_augmented_', - 0.01, - rmse_rel_start_time=rmse_rel_start_time, - rmse_rel_end_time=rmse_rel_end_time) - if has_imu_bias_tester_poses: - plot_loc_state_stats(pdf, - imu_bias_tester_poses, - sparse_mapping_poses, - output_csv_file, - 'imu_bias_tester_', - 0.01, - False, - rmse_rel_start_time=rmse_rel_start_time, - rmse_rel_end_time=rmse_rel_end_time) +def create_plots( + bagfile, + output_pdf_file, + output_csv_file="results.csv", + groundtruth_bagfile=None, + rmse_rel_start_time=0, + rmse_rel_end_time=-1, +): + bag = rosbag.Bag(bagfile) + groundtruth_bag = rosbag.Bag(groundtruth_bagfile) if groundtruth_bagfile else bag + bag_start_time = bag.get_start_time() + + has_imu_augmented_graph_localization_state = has_topic(bag, "/gnc/ekf") + has_imu_bias_tester_poses = has_topic(bag, "/imu_bias_tester/pose") + sparse_mapping_poses = poses.Poses("Sparse Mapping", "/sparse_mapping/pose") + ar_tag_poses = poses.Poses("AR Tag", "/ar_tag/pose") + imu_bias_tester_poses = poses.Poses("Imu Bias Tester", "/imu_bias_tester/pose") + vec_of_poses = [ar_tag_poses, imu_bias_tester_poses] + load_pose_msgs(vec_of_poses, bag, bag_start_time) + groundtruth_vec_of_poses = [sparse_mapping_poses] + load_pose_msgs(groundtruth_vec_of_poses, groundtruth_bag, bag_start_time) + + graph_localization_states = loc_states.LocStates( + "Graph Localization", "/graph_loc/state" + ) + imu_augmented_graph_localization_states = loc_states.LocStates( + "Imu Augmented Graph Localization", "/gnc/ekf" + ) + vec_of_loc_states = [ + graph_localization_states, + imu_augmented_graph_localization_states, + ] + load_loc_state_msgs(vec_of_loc_states, bag, bag_start_time) + + imu_bias_tester_velocities = velocities.Velocities( + "Imu Bias Tester", "/imu_bias_tester/velocity" + ) + load_velocity_msgs(imu_bias_tester_velocities, bag, bag_start_time) + + bag.close() + + with PdfPages(output_pdf_file) as pdf: + add_graph_plots( + pdf, + sparse_mapping_poses, + ar_tag_poses, + graph_localization_states, + imu_augmented_graph_localization_states, + ) + if has_imu_bias_tester_poses: + add_imu_bias_tester_poses(pdf, imu_bias_tester_poses, sparse_mapping_poses) + add_imu_bias_tester_velocities(pdf, imu_bias_tester_velocities) + if has_imu_augmented_graph_localization_state: + add_other_loc_plots( + pdf, + graph_localization_states, + imu_augmented_graph_localization_states, + sparse_mapping_poses, + ar_tag_poses, + ) + else: + add_other_loc_plots( + pdf, graph_localization_states, graph_localization_states + ) + plot_loc_state_stats( + pdf, + graph_localization_states, + sparse_mapping_poses, + output_csv_file, + rmse_rel_start_time=rmse_rel_start_time, + rmse_rel_end_time=rmse_rel_end_time, + ) + plot_loc_state_stats( + pdf, + imu_augmented_graph_localization_states, + sparse_mapping_poses, + output_csv_file, + "imu_augmented_", + 0.01, + rmse_rel_start_time=rmse_rel_start_time, + rmse_rel_end_time=rmse_rel_end_time, + ) + if has_imu_bias_tester_poses: + plot_loc_state_stats( + pdf, + imu_bias_tester_poses, + sparse_mapping_poses, + output_csv_file, + "imu_bias_tester_", + 0.01, + False, + rmse_rel_start_time=rmse_rel_start_time, + rmse_rel_end_time=rmse_rel_end_time, + ) diff --git a/tools/graph_bag/scripts/plot_results_main.py b/tools/graph_bag/scripts/plot_results_main.py index a52f8e409b..3717d88f04 100755 --- a/tools/graph_bag/scripts/plot_results_main.py +++ b/tools/graph_bag/scripts/plot_results_main.py @@ -17,27 +17,32 @@ # License for the specific language governing permissions and limitations # under the License. -import plot_results - import argparse - import os import sys -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('bagfile') - parser.add_argument('--output-file', default='output.pdf') - parser.add_argument('--output-csv-file', default='results.csv') - parser.add_argument('-g', '--groundtruth-bagfile', default=None) - parser.add_argument('--rmse-rel-start-time', type=float, default=0) - parser.add_argument('--rmse-rel-end-time', type=float, default=-1) - args = parser.parse_args() - if not os.path.isfile(args.bagfile): - print('Bag file ' + args.bagfile + ' does not exist.') - sys.exit() - if args.groundtruth_bagfile and not os.path.isfile(args.groundtruth_bagfile): - print('Groundtruth Bag file ' + args.groundtruth_bagfile + ' does not exist.') - sys.exit() - plot_results.create_plots(args.bagfile, args.output_file, args.output_csv_file, args.groundtruth_bagfile, - args.rmse_rel_start_time, args.rmse_rel_end_time) +import plot_results + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("bagfile") + parser.add_argument("--output-file", default="output.pdf") + parser.add_argument("--output-csv-file", default="results.csv") + parser.add_argument("-g", "--groundtruth-bagfile", default=None) + parser.add_argument("--rmse-rel-start-time", type=float, default=0) + parser.add_argument("--rmse-rel-end-time", type=float, default=-1) + args = parser.parse_args() + if not os.path.isfile(args.bagfile): + print(("Bag file " + args.bagfile + " does not exist.")) + sys.exit() + if args.groundtruth_bagfile and not os.path.isfile(args.groundtruth_bagfile): + print(("Groundtruth Bag file " + args.groundtruth_bagfile + " does not exist.")) + sys.exit() + plot_results.create_plots( + args.bagfile, + args.output_file, + args.output_csv_file, + args.groundtruth_bagfile, + args.rmse_rel_start_time, + args.rmse_rel_end_time, + ) diff --git a/tools/graph_bag/scripts/poses.py b/tools/graph_bag/scripts/poses.py index c92f7140d1..0dafd970a9 100644 --- a/tools/graph_bag/scripts/poses.py +++ b/tools/graph_bag/scripts/poses.py @@ -17,31 +17,40 @@ # License for the specific language governing permissions and limitations # under the License. -import vector3ds import orientations - import scipy.spatial.transform +import vector3ds class Poses(object): - - def __init__(self, pose_type, topic): - self.positions = vector3ds.Vector3ds() - self.orientations = orientations.Orientations() - self.times = [] - self.pose_type = pose_type - self.topic = topic - - def add_pose(self, pose_msg, timestamp, bag_start_time=0): - self.positions.add(pose_msg.position.x, pose_msg.position.y, pose_msg.position.z) - euler_angles = scipy.spatial.transform.Rotation.from_quat( - [pose_msg.orientation.x, pose_msg.orientation.y, pose_msg.orientation.z, - pose_msg.orientation.w]).as_euler('ZYX', degrees=True) - self.orientations.add(euler_angles[0], euler_angles[1], euler_angles[2]) - self.times.append(timestamp.secs + 1e-9 * timestamp.nsecs - bag_start_time) - - def add_msg(self, msg, timestamp, bag_start_time=0): - self.add_pose(msg.pose, timestamp, bag_start_time) - - def position_vector(self, index): - return [self.positions.xs[index], self.positions.ys[index], self.positions.zs[index]] + def __init__(self, pose_type, topic): + self.positions = vector3ds.Vector3ds() + self.orientations = orientations.Orientations() + self.times = [] + self.pose_type = pose_type + self.topic = topic + + def add_pose(self, pose_msg, timestamp, bag_start_time=0): + self.positions.add( + pose_msg.position.x, pose_msg.position.y, pose_msg.position.z + ) + euler_angles = scipy.spatial.transform.Rotation.from_quat( + [ + pose_msg.orientation.x, + pose_msg.orientation.y, + pose_msg.orientation.z, + pose_msg.orientation.w, + ] + ).as_euler("ZYX", degrees=True) + self.orientations.add(euler_angles[0], euler_angles[1], euler_angles[2]) + self.times.append(timestamp.secs + 1e-9 * timestamp.nsecs - bag_start_time) + + def add_msg(self, msg, timestamp, bag_start_time=0): + self.add_pose(msg.pose, timestamp, bag_start_time) + + def position_vector(self, index): + return [ + self.positions.xs[index], + self.positions.ys[index], + self.positions.zs[index], + ] diff --git a/tools/graph_bag/scripts/rmse_utilities.py b/tools/graph_bag/scripts/rmse_utilities.py index cf77837ff8..a6b9a81e66 100644 --- a/tools/graph_bag/scripts/rmse_utilities.py +++ b/tools/graph_bag/scripts/rmse_utilities.py @@ -17,136 +17,176 @@ # License for the specific language governing permissions and limitations # under the License. -import poses +import bisect +import math import numpy as np +import poses import scipy.spatial.transform -import bisect -import math - # Assumes poses_a and poses_b are sorted in time -def get_same_timestamp_poses(poses_a, poses_b, add_orientations=True, abs_tol=0, rel_start_time=0, rel_end_time=-1): - trimmed_poses_a = poses.Poses(poses_a.pose_type, poses_a.topic) - trimmed_poses_b = poses.Poses(poses_b.pose_type, poses_b.topic) - poses_a_size = len(poses_a.times) - poses_b_size = len(poses_b.times) - a_index = 0 - b_index = 0 - # Increment a and b index as needed. Add same timestamped poses to trimmed poses containers - while (a_index < poses_a_size) and (b_index < poses_b_size): - a_time = poses_a.times[a_index] - b_time = poses_b.times[b_index] - - # Check if times are within given start and end time bounds - if a_time < rel_start_time: - a_index += 1 - continue - if b_time < rel_start_time: - b_index += 1 - continue - # rel_end_time less than zero indicates no bound on end time - if rel_end_time >= 0: - if a_time > rel_end_time or b_time > rel_end_time: - break - - if (np.isclose(a_time, b_time, rtol=0, atol=abs_tol)): - trimmed_poses_a.positions.add_vector3d(poses_a.positions.get_vector3d(a_index)) - trimmed_poses_a.times.append(poses_a.times[a_index]) - trimmed_poses_b.positions.add_vector3d(poses_b.positions.get_vector3d(b_index)) - trimmed_poses_b.times.append(poses_b.times[b_index]) - if add_orientations: - trimmed_poses_a.orientations.add_euler(poses_a.orientations.get_euler(a_index)) - trimmed_poses_b.orientations.add_euler(poses_b.orientations.get_euler(b_index)) - a_index += 1 - b_index += 1 - elif (a_time < b_time): - a_index += 1 - else: - b_index += 1 - return trimmed_poses_a, trimmed_poses_b +def get_same_timestamp_poses( + poses_a, + poses_b, + add_orientations=True, + abs_tol=0, + rel_start_time=0, + rel_end_time=-1, +): + trimmed_poses_a = poses.Poses(poses_a.pose_type, poses_a.topic) + trimmed_poses_b = poses.Poses(poses_b.pose_type, poses_b.topic) + poses_a_size = len(poses_a.times) + poses_b_size = len(poses_b.times) + a_index = 0 + b_index = 0 + # Increment a and b index as needed. Add same timestamped poses to trimmed poses containers + while (a_index < poses_a_size) and (b_index < poses_b_size): + a_time = poses_a.times[a_index] + b_time = poses_b.times[b_index] + + # Check if times are within given start and end time bounds + if a_time < rel_start_time: + a_index += 1 + continue + if b_time < rel_start_time: + b_index += 1 + continue + # rel_end_time less than zero indicates no bound on end time + if rel_end_time >= 0: + if a_time > rel_end_time or b_time > rel_end_time: + break + + if np.isclose(a_time, b_time, rtol=0, atol=abs_tol): + trimmed_poses_a.positions.add_vector3d( + poses_a.positions.get_vector3d(a_index) + ) + trimmed_poses_a.times.append(poses_a.times[a_index]) + trimmed_poses_b.positions.add_vector3d( + poses_b.positions.get_vector3d(b_index) + ) + trimmed_poses_b.times.append(poses_b.times[b_index]) + if add_orientations: + trimmed_poses_a.orientations.add_euler( + poses_a.orientations.get_euler(a_index) + ) + trimmed_poses_b.orientations.add_euler( + poses_b.orientations.get_euler(b_index) + ) + a_index += 1 + b_index += 1 + elif a_time < b_time: + a_index += 1 + else: + b_index += 1 + return trimmed_poses_a, trimmed_poses_b def position_squared_difference(a, b): - difference_vec = np.subtract(a, b) - return np.inner(difference_vec, difference_vec) + difference_vec = np.subtract(a, b) + return np.inner(difference_vec, difference_vec) # Uses square of radian difference of rotations in axis angle representation def orientation_squared_difference(world_R_a, world_R_b): - a_R_b = world_R_a.inv() * world_R_b - return np.inner(a_R_b.as_rotvec(), a_R_b.as_rotvec()) + a_R_b = world_R_a.inv() * world_R_b + return np.inner(a_R_b.as_rotvec(), a_R_b.as_rotvec()) # RMSE between two sequences of poses. Only uses poses with the same timestamp -def rmse_timestamped_poses(poses_a, poses_b, add_orientation_rmse=True, abs_tol=0, rel_start_time=0, rel_end_time=-1): - trimmed_poses_a, trimmed_poses_b = get_same_timestamp_poses(poses_a, poses_b, add_orientation_rmse, abs_tol, - rel_start_time, rel_end_time) - assert len(trimmed_poses_a.times) == len(trimmed_poses_b.times), 'Length mismatch of poses' - num_poses = len(trimmed_poses_a.times) - mean_squared_position_error = 0 - mean_squared_orientation_error = 0 - for index in range(num_poses): - # Position Error - a_vec = trimmed_poses_a.positions.get_numpy_vector(index) - b_vec = trimmed_poses_b.positions.get_numpy_vector(index) - position_squared_error = position_squared_difference(a_vec, b_vec) - # Use rolling mean to avoid overflow - mean_squared_position_error += (position_squared_error - mean_squared_position_error) / (index + 1) - # Orientation Error - if add_orientation_rmse: - a_rot = trimmed_poses_a.orientations.get_rotation(index) - b_rot = trimmed_poses_b.orientations.get_rotation(index) - orientation_squared_error = orientation_squared_difference(a_rot, b_rot) - mean_squared_orientation_error += (orientation_squared_error - mean_squared_orientation_error) / (index + 1) - position_rmse = math.sqrt(mean_squared_position_error) - orientation_rmse = math.sqrt(mean_squared_orientation_error) - return position_rmse, orientation_rmse +def rmse_timestamped_poses( + poses_a, + poses_b, + add_orientation_rmse=True, + abs_tol=0, + rel_start_time=0, + rel_end_time=-1, +): + trimmed_poses_a, trimmed_poses_b = get_same_timestamp_poses( + poses_a, poses_b, add_orientation_rmse, abs_tol, rel_start_time, rel_end_time + ) + assert len(trimmed_poses_a.times) == len( + trimmed_poses_b.times + ), "Length mismatch of poses" + num_poses = len(trimmed_poses_a.times) + mean_squared_position_error = 0 + mean_squared_orientation_error = 0 + for index in range(num_poses): + # Position Error + a_vec = trimmed_poses_a.positions.get_numpy_vector(index) + b_vec = trimmed_poses_b.positions.get_numpy_vector(index) + position_squared_error = position_squared_difference(a_vec, b_vec) + # Use rolling mean to avoid overflow + mean_squared_position_error += ( + position_squared_error - mean_squared_position_error + ) / (index + 1) + # Orientation Error + if add_orientation_rmse: + a_rot = trimmed_poses_a.orientations.get_rotation(index) + b_rot = trimmed_poses_b.orientations.get_rotation(index) + orientation_squared_error = orientation_squared_difference(a_rot, b_rot) + mean_squared_orientation_error += ( + orientation_squared_error - mean_squared_orientation_error + ) / (index + 1) + position_rmse = math.sqrt(mean_squared_position_error) + orientation_rmse = math.sqrt(mean_squared_orientation_error) + return position_rmse, orientation_rmse # Relative RMSE between two sequences of poses. Only uses poses with the same timestamp -def rmse_timestamped_poses_relative(poses_a, - poses_b, - add_orientation_rmse=True, - abs_tol=0, - rel_start_time=0, - rel_end_time=-1, - min_relative_elapsed_time=10, max_relative_elapsed_time=20): - trimmed_poses_a, trimmed_poses_b = get_same_timestamp_poses(poses_a, poses_b, add_orientation_rmse, abs_tol, - rel_start_time, rel_end_time) - assert len(trimmed_poses_a.times) == len(trimmed_poses_b.times), 'Length mismatch of poses' - num_poses = len(trimmed_poses_a.times) - mean_squared_position_error = 0 - mean_squared_orientation_error = 0 - count = 0 - for index1 in range(num_poses): - # Position Error - a_vec1 = trimmed_poses_a.positions.get_numpy_vector(index1) - b_vec1 = trimmed_poses_b.positions.get_numpy_vector(index1) - time1 = trimmed_poses_a.times[index1] - index2 = bisect.bisect_left(trimmed_poses_a.times, time1 + min_relative_elapsed_time) - if (index2 == len(trimmed_poses_a.times)): - continue - time2 = trimmed_poses_a.times[index2] - if (time2 - time1 > max_relative_elapsed_time): - continue - a_vec2 = trimmed_poses_a.positions.get_numpy_vector(index2) - b_vec2 = trimmed_poses_b.positions.get_numpy_vector(index2) - a_rel_vec = a_vec2 - a_vec1 - b_rel_vec = b_vec2 - b_vec1 - - position_squared_error = position_squared_difference(a_rel_vec, b_rel_vec) - count += 1 - # Use rolling mean to avoid overflow - mean_squared_position_error += (position_squared_error - mean_squared_position_error) / float(count) - # Orientation Error - if add_orientation_rmse: - # TODO(rsoussan): Add relative calculations for orientations - a_rot = trimmed_poses_a.orientations.get_rotation(index1) - b_rot = trimmed_poses_b.orientations.get_rotation(index1) - orientation_squared_error = orientation_squared_difference(a_rot, b_rot) - mean_squared_orientation_error += (orientation_squared_error - mean_squared_orientation_error) / float(count) - position_rmse = math.sqrt(mean_squared_position_error) - orientation_rmse = math.sqrt(mean_squared_orientation_error) - return position_rmse, orientation_rmse +def rmse_timestamped_poses_relative( + poses_a, + poses_b, + add_orientation_rmse=True, + abs_tol=0, + rel_start_time=0, + rel_end_time=-1, + min_relative_elapsed_time=10, + max_relative_elapsed_time=20, +): + trimmed_poses_a, trimmed_poses_b = get_same_timestamp_poses( + poses_a, poses_b, add_orientation_rmse, abs_tol, rel_start_time, rel_end_time + ) + assert len(trimmed_poses_a.times) == len( + trimmed_poses_b.times + ), "Length mismatch of poses" + num_poses = len(trimmed_poses_a.times) + mean_squared_position_error = 0 + mean_squared_orientation_error = 0 + count = 0 + for index1 in range(num_poses): + # Position Error + a_vec1 = trimmed_poses_a.positions.get_numpy_vector(index1) + b_vec1 = trimmed_poses_b.positions.get_numpy_vector(index1) + time1 = trimmed_poses_a.times[index1] + index2 = bisect.bisect_left( + trimmed_poses_a.times, time1 + min_relative_elapsed_time + ) + if index2 == len(trimmed_poses_a.times): + continue + time2 = trimmed_poses_a.times[index2] + if time2 - time1 > max_relative_elapsed_time: + continue + a_vec2 = trimmed_poses_a.positions.get_numpy_vector(index2) + b_vec2 = trimmed_poses_b.positions.get_numpy_vector(index2) + a_rel_vec = a_vec2 - a_vec1 + b_rel_vec = b_vec2 - b_vec1 + + position_squared_error = position_squared_difference(a_rel_vec, b_rel_vec) + count += 1 + # Use rolling mean to avoid overflow + mean_squared_position_error += ( + position_squared_error - mean_squared_position_error + ) / float(count) + # Orientation Error + if add_orientation_rmse: + # TODO(rsoussan): Add relative calculations for orientations + a_rot = trimmed_poses_a.orientations.get_rotation(index1) + b_rot = trimmed_poses_b.orientations.get_rotation(index1) + orientation_squared_error = orientation_squared_difference(a_rot, b_rot) + mean_squared_orientation_error += ( + orientation_squared_error - mean_squared_orientation_error + ) / float(count) + position_rmse = math.sqrt(mean_squared_position_error) + orientation_rmse = math.sqrt(mean_squared_orientation_error) + return position_rmse, orientation_rmse diff --git a/tools/graph_bag/scripts/test_rmse_utilities.py b/tools/graph_bag/scripts/test_rmse_utilities.py index f116d7b08d..380164c292 100644 --- a/tools/graph_bag/scripts/test_rmse_utilities.py +++ b/tools/graph_bag/scripts/test_rmse_utilities.py @@ -17,156 +17,161 @@ # License for the specific language governing permissions and limitations # under the License. -import poses -import rmse_utilities - import math -import numpy as np import unittest +import numpy as np +import poses +import rmse_utilities + + def make_poses(times, xs, ys, zs): - new_poses = poses.Poses('', '') - new_poses.times = times - new_poses.positions.xs = xs - new_poses.positions.ys = ys - new_poses.positions.zs = zs - return new_poses + new_poses = poses.Poses("", "") + new_poses.times = times + new_poses.positions.xs = xs + new_poses.positions.ys = ys + new_poses.positions.zs = zs + return new_poses + class TestRMSESequence(unittest.TestCase): + def test_prune_missing_timestamps_beginning_set(self): + a_times = np.arange(10.0) + xs = np.arange(10.0) + ys = np.arange(10.0) + 1.0 + zs = np.arange(10.0) + 2.0 + b_times = np.arange(5.0) + poses_a = make_poses(a_times, xs, ys, zs) + poses_b = make_poses(b_times, xs, ys, zs) + + trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b) + self.assertEqual(len(trimmed_a.times), len(trimmed_b.times)) + self.assertEqual(len(trimmed_a.times), 5) + self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0)) + + def test_prune_missing_timestamps_middle_set(self): + a_times = np.arange(10.0) + xs = np.arange(10.0) + ys = np.arange(10.0) + 1.0 + zs = np.arange(10.0) + 2.0 + b_times = np.arange(3.0, 7.0) + poses_a = make_poses(a_times, xs, ys, zs) + poses_b = make_poses(b_times, xs, ys, zs) + + trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b) + self.assertEqual(len(trimmed_a.times), len(trimmed_b.times)) + self.assertEqual(len(trimmed_a.times), 4) + self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0)) + + def test_prune_missing_timestamps_end_set(self): + a_times = np.arange(10.0) + xs = np.arange(10.0) + ys = np.arange(10.0) + 1.0 + zs = np.arange(10.0) + 2.0 + b_times = np.arange(7.0, 10.0) + poses_a = make_poses(a_times, xs, ys, zs) + poses_b = make_poses(b_times, xs, ys, zs) + + trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b) + self.assertEqual(len(trimmed_a.times), len(trimmed_b.times)) + self.assertEqual(len(trimmed_a.times), 3) + self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0)) + + def test_prune_missing_timestamps_scattered_set(self): + a_times = np.arange(10.0) + xs = np.arange(10.0) + ys = np.arange(10.0) + 1.0 + zs = np.arange(10.0) + 2.0 + b_times = np.array([1.0, 5.0, 6.0, 9.0]) + poses_a = make_poses(a_times, xs, ys, zs) + poses_b = make_poses(b_times, xs, ys, zs) + + trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b) + self.assertEqual(len(trimmed_a.times), len(trimmed_b.times)) + self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0)) + + def test_prune_missing_timestamps_disjoint_set(self): + a_times = np.arange(10.0) + xs = np.arange(10.0) + ys = np.arange(10.0) + 1.0 + zs = np.arange(10.0) + 2.0 + b_times = np.arange(11, 20) + poses_a = make_poses(a_times, xs, ys, zs) + poses_b = make_poses(b_times, xs, ys, zs) + + trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b) + self.assertEqual(len(trimmed_a.times), 0) + self.assertEqual(len(trimmed_b.times), 0) + + def test_prune_missing_timestamps_some_overlap(self): + a_times = np.arange(10.0) + xs = np.arange(10.0) + ys = np.arange(10.0) + 1.0 + zs = np.arange(10.0) + 2.0 + b_times = np.arange(8.0, 20.0) + poses_a = make_poses(a_times, xs, ys, zs) + poses_b = make_poses(b_times, xs, ys, zs) + + expected_time_range = np.arange(8.0, 10.0) + trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b) + self.assertEqual(len(trimmed_a.times), len(trimmed_b.times)) + self.assertTrue(np.allclose(trimmed_a.times, trimmed_b.times, rtol=0)) + self.assertTrue(np.allclose(trimmed_a.times, expected_time_range, rtol=0)) + self.assertTrue( + np.allclose(trimmed_a.positions.xs, expected_time_range, rtol=0) + ) + self.assertTrue( + np.allclose(trimmed_a.positions.ys, expected_time_range + 1, rtol=0) + ) + self.assertTrue( + np.allclose(trimmed_a.positions.zs, expected_time_range + 2, rtol=0) + ) + + def test_rmse_same_poses(self): + a_times = np.arange(10.0) + xs = np.arange(10.0) + ys = np.arange(10.0) + 1.0 + zs = np.arange(10.0) + 2.0 + poses_a = make_poses(a_times, xs, ys, zs) + poses_b = make_poses(a_times, xs, ys, zs) + + rmse = rmse_utilities.rmse_timestamped_poses(poses_a, poses_b) + self.assertTrue(np.isclose(rmse, 0, rtol=0)) + + def test_rmse_off_by_one(self): + a_times = np.arange(10.0) + xs = np.arange(10.0) + ys = np.arange(10.0) + 1.0 + zs = np.arange(10.0) + 2.0 + poses_a = make_poses(a_times, xs, ys, zs) + poses_b = make_poses(a_times, xs + 1, ys, zs) + + rmse = rmse_utilities.rmse_timestamped_poses(poses_a, poses_b) + self.assertTrue(np.isclose(rmse, 1.0, rtol=0)) + + def test_rmse_all_off_by_one(self): + a_times = np.arange(10.0) + xs = np.arange(10.0) + ys = np.arange(10.0) + 1.0 + zs = np.arange(10.0) + 2.0 + poses_a = make_poses(a_times, xs, ys, zs) + poses_b = make_poses(a_times, xs + 1, ys + 1, zs + 1) + + rmse = rmse_utilities.rmse_timestamped_poses(poses_a, poses_b) + self.assertTrue(np.isclose(rmse, math.sqrt(3.0), rtol=0)) + - def test_prune_missing_timestamps_beginning_set(self): - a_times = np.arange(10.0) - xs = np.arange(10.0) - ys = np.arange(10.0) + 1.0 - zs = np.arange(10.0) + 2.0 - b_times = np.arange(5.0) - poses_a = make_poses(a_times, xs, ys, zs) - poses_b = make_poses(b_times, xs, ys, zs) - - trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b) - self.assertEqual(len(trimmed_a.times), len(trimmed_b.times)) - self.assertEqual(len(trimmed_a.times), 5) - self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0)) - - def test_prune_missing_timestamps_middle_set(self): - a_times = np.arange(10.0) - xs = np.arange(10.0) - ys = np.arange(10.0) + 1.0 - zs = np.arange(10.0) + 2.0 - b_times = np.arange(3.0, 7.0) - poses_a = make_poses(a_times, xs, ys, zs) - poses_b = make_poses(b_times, xs, ys, zs) - - trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b) - self.assertEqual(len(trimmed_a.times), len(trimmed_b.times)) - self.assertEqual(len(trimmed_a.times), 4) - self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0)) - - - def test_prune_missing_timestamps_end_set(self): - a_times = np.arange(10.0) - xs = np.arange(10.0) - ys = np.arange(10.0) + 1.0 - zs = np.arange(10.0) + 2.0 - b_times = np.arange(7.0, 10.0) - poses_a = make_poses(a_times, xs, ys, zs) - poses_b = make_poses(b_times, xs, ys, zs) - - trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b) - self.assertEqual(len(trimmed_a.times), len(trimmed_b.times)) - self.assertEqual(len(trimmed_a.times), 3) - self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0)) - - def test_prune_missing_timestamps_scattered_set(self): - a_times = np.arange(10.0) - xs = np.arange(10.0) - ys = np.arange(10.0) + 1.0 - zs = np.arange(10.0) + 2.0 - b_times = np.array([1.0, 5.0, 6.0, 9.0]) - poses_a = make_poses(a_times, xs, ys, zs) - poses_b = make_poses(b_times, xs, ys, zs) - - trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b) - self.assertEqual(len(trimmed_a.times), len(trimmed_b.times)) - self.assertTrue(np.allclose(trimmed_a.times, b_times, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.xs, b_times, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.ys, b_times + 1, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.zs, b_times + 2, rtol=0)) - - def test_prune_missing_timestamps_disjoint_set(self): - a_times = np.arange(10.0) - xs = np.arange(10.0) - ys = np.arange(10.0) + 1.0 - zs = np.arange(10.0) + 2.0 - b_times = np.arange(11, 20) - poses_a = make_poses(a_times, xs, ys, zs) - poses_b = make_poses(b_times, xs, ys, zs) - - trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b) - self.assertEqual(len(trimmed_a.times), 0) - self.assertEqual(len(trimmed_b.times), 0) - - - def test_prune_missing_timestamps_some_overlap(self): - a_times = np.arange(10.0) - xs = np.arange(10.0) - ys = np.arange(10.0) + 1.0 - zs = np.arange(10.0) + 2.0 - b_times = np.arange(8.0, 20.0) - poses_a = make_poses(a_times, xs, ys, zs) - poses_b = make_poses(b_times, xs, ys, zs) - - expected_time_range = np.arange(8.0, 10.0) - trimmed_a, trimmed_b = rmse_utilities.get_same_timestamp_poses(poses_a, poses_b) - self.assertEqual(len(trimmed_a.times), len(trimmed_b.times)) - self.assertTrue(np.allclose(trimmed_a.times, trimmed_b.times, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.times, expected_time_range, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.xs, expected_time_range, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.ys, expected_time_range + 1, rtol=0)) - self.assertTrue(np.allclose(trimmed_a.positions.zs, expected_time_range + 2, rtol=0)) - - - def test_rmse_same_poses(self): - a_times = np.arange(10.0) - xs = np.arange(10.0) - ys = np.arange(10.0) + 1.0 - zs = np.arange(10.0) + 2.0 - poses_a = make_poses(a_times, xs, ys, zs) - poses_b = make_poses(a_times, xs, ys, zs) - - rmse = rmse_utilities.rmse_timestamped_poses(poses_a, poses_b) - self.assertTrue(np.isclose(rmse, 0, rtol=0)) - - def test_rmse_off_by_one(self): - a_times = np.arange(10.0) - xs = np.arange(10.0) - ys = np.arange(10.0) + 1.0 - zs = np.arange(10.0) + 2.0 - poses_a = make_poses(a_times, xs, ys, zs) - poses_b = make_poses(a_times, xs + 1, ys, zs) - - rmse = rmse_utilities.rmse_timestamped_poses(poses_a, poses_b) - self.assertTrue(np.isclose(rmse, 1.0, rtol=0)) - - def test_rmse_all_off_by_one(self): - a_times = np.arange(10.0) - xs = np.arange(10.0) - ys = np.arange(10.0) + 1.0 - zs = np.arange(10.0) + 2.0 - poses_a = make_poses(a_times, xs, ys, zs) - poses_b = make_poses(a_times, xs + 1, ys + 1, zs + 1) - - rmse = rmse_utilities.rmse_timestamped_poses(poses_a, poses_b) - self.assertTrue(np.isclose(rmse, math.sqrt(3.0), rtol=0)) - -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() diff --git a/tools/graph_bag/scripts/trim_bag.py b/tools/graph_bag/scripts/trim_bag.py index 9321090e68..155cc6bb94 100755 --- a/tools/graph_bag/scripts/trim_bag.py +++ b/tools/graph_bag/scripts/trim_bag.py @@ -15,35 +15,43 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import utilities - import argparse import os import sys import rosbag +import utilities def trim_bag(bag_name, start_time_to_trim, end_time_to_trim): - with rosbag.Bag(bag_name, 'r') as bag: - start_time = bag.get_start_time() - new_start_time = start_time + start_time_to_trim - end_time = bag.get_end_time() - new_end_time = end_time - end_time_to_trim - output_bag_name = os.path.splitext(bag_name)[0] + '_trimmed.bag' - run_command = 'rosbag filter ' + bag_name + ' ' + output_bag_name + ' \"t.secs >= ' + str( - new_start_time) + ' and t.secs <= ' + str(new_end_time) + '\"' - os.system(run_command) + with rosbag.Bag(bag_name, "r") as bag: + start_time = bag.get_start_time() + new_start_time = start_time + start_time_to_trim + end_time = bag.get_end_time() + new_end_time = end_time - end_time_to_trim + output_bag_name = os.path.splitext(bag_name)[0] + "_trimmed.bag" + run_command = ( + "rosbag filter " + + bag_name + + " " + + output_bag_name + + ' "t.secs >= ' + + str(new_start_time) + + " and t.secs <= " + + str(new_end_time) + + '"' + ) + os.system(run_command) -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('bagfile') - parser.add_argument('-s', '--start-time-to-trim', type=float, default=0) - parser.add_argument('-e', '--end-time-to-trim', type=float, default=0) - args = parser.parse_args() - if not os.path.isfile(args.bagfile): - print('Bag file ' + args.bagfile + ' does not exist.') - sys.exit() +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("bagfile") + parser.add_argument("-s", "--start-time-to-trim", type=float, default=0) + parser.add_argument("-e", "--end-time-to-trim", type=float, default=0) + args = parser.parse_args() + if not os.path.isfile(args.bagfile): + print(("Bag file " + args.bagfile + " does not exist.")) + sys.exit() - trim_bag(args.bagfile, args.start_time_to_trim, args.end_time_to_trim) + trim_bag(args.bagfile, args.start_time_to_trim, args.end_time_to_trim) diff --git a/tools/graph_bag/scripts/utilities.py b/tools/graph_bag/scripts/utilities.py index c6032264c7..66d4f32e8e 100644 --- a/tools/graph_bag/scripts/utilities.py +++ b/tools/graph_bag/scripts/utilities.py @@ -15,14 +15,13 @@ # License for the specific language governing permissions and limitations # under the License. -import poses - import datetime import glob -import numpy as np import os -import pandas as pd +import numpy as np +import pandas as pd +import poses import rosbag @@ -30,118 +29,160 @@ # even when running commands through multiprocessing # pooling def full_traceback(func): - import traceback, functools + import functools + import traceback - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception as e: - msg = "{}\n\nOriginal {}".format(e, traceback.format_exc()) - raise type(e)(msg) + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + msg = "{}\n\nOriginal {}".format(e, traceback.format_exc()) + raise type(e)(msg) - return wrapper + return wrapper def get_files(directory, file_string): - return glob.glob(os.path.join(directory, file_string)) + return glob.glob(os.path.join(directory, file_string)) def get_files_recursive(directory, file_string): - subdirectory_csv_files = [] - _, subdirectories, _ = os.walk(directory).next() - for subdirectory in subdirectories: - subdirectory_path = os.path.join(directory, subdirectory) - for subdirectory_csv_file in get_files(subdirectory_path, file_string): - subdirectory_csv_files.append(subdirectory_csv_file) - return subdirectory_csv_files + subdirectory_csv_files = [] + _, subdirectories, _ = next(os.walk(directory)) + for subdirectory in subdirectories: + subdirectory_path = os.path.join(directory, subdirectory) + for subdirectory_csv_file in get_files(subdirectory_path, file_string): + subdirectory_csv_files.append(subdirectory_csv_file) + return subdirectory_csv_files def create_directory(directory=None): - if directory == None: - directory = os.path.join(os.getcwd(), datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')) - if os.path.exists(directory): - print(directory + " already exists!") - exit() - os.makedirs(directory) - return directory + if directory == None: + directory = os.path.join( + os.getcwd(), datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + ) + if os.path.exists(directory): + print((directory + " already exists!")) + exit() + os.makedirs(directory) + return directory def load_dataframe(files): - dataframes = [pd.read_csv(file) for file in files] - dataframe = pd.concat(dataframes) - return dataframe - - -def get_topic_rates(bag_name, - topic, - min_time_diff_for_gap, - use_header_time=True, - verbose=False, - ignore_zero_time_diffs=True): - with rosbag.Bag(bag_name, 'r') as bag: - last_time = 0.0 - gaps = 0 - time_diffs = [] - for _, msg, t in bag.read_messages([topic]): - time = msg.header.stamp.secs + msg.header.stamp.nsecs * 1.0e-9 if use_header_time else t.secs + t.nsecs * 1.0e-9 - time_diff = time - last_time - if last_time != 0 and time_diff >= min_time_diff_for_gap: + dataframes = [pd.read_csv(file) for file in files] + dataframe = pd.concat(dataframes) + return dataframe + + +def get_topic_rates( + bag_name, + topic, + min_time_diff_for_gap, + use_header_time=True, + verbose=False, + ignore_zero_time_diffs=True, +): + with rosbag.Bag(bag_name, "r") as bag: + last_time = 0.0 + gaps = 0 + time_diffs = [] + for _, msg, t in bag.read_messages([topic]): + time = ( + msg.header.stamp.secs + msg.header.stamp.nsecs * 1.0e-9 + if use_header_time + else t.secs + t.nsecs * 1.0e-9 + ) + time_diff = time - last_time + if last_time != 0 and time_diff >= min_time_diff_for_gap: + if verbose: + print( + ( + topic + + " Gap: time: " + + str(time) + + ", last_time: " + + str(last_time) + + ", diff: " + + str(time_diff) + ) + ) + gaps += 1 + if last_time != 0 and (time_diff != 0 or not ignore_zero_time_diffs): + time_diffs.append(time_diff) + last_time = time + + mean_time_diff = np.mean(time_diffs) + min_time_diff = np.min(time_diffs) + max_time_diff = np.max(time_diffs) + stddev_time_diff = np.std(time_diffs) if verbose: - print(topic + ' Gap: time: ' + str(time) + ', last_time: ' + str(last_time) + ', diff: ' + str(time_diff)) - gaps += 1 - if (last_time != 0 and (time_diff != 0 or not ignore_zero_time_diffs)): - time_diffs.append(time_diff) - last_time = time - - mean_time_diff = np.mean(time_diffs) - min_time_diff = np.min(time_diffs) - max_time_diff = np.max(time_diffs) - stddev_time_diff = np.std(time_diffs) - if verbose: - if use_header_time: - print('Using Header time.') - else: - print('Using Receive time.') - print('Found ' + str(gaps) + ' time diffs >= ' + str(min_time_diff_for_gap) + ' secs.') - print('Mean time diff: ' + str(mean_time_diff)) - print('Min time diff: ' + str(min_time_diff)) - print('Max time diff: ' + str(max_time_diff)) - print('Stddev time diff: ' + str(stddev_time_diff)) + if use_header_time: + print("Using Header time.") + else: + print("Using Receive time.") + print( + ( + "Found " + + str(gaps) + + " time diffs >= " + + str(min_time_diff_for_gap) + + " secs." + ) + ) + print(("Mean time diff: " + str(mean_time_diff))) + print(("Min time diff: " + str(min_time_diff))) + print(("Max time diff: " + str(max_time_diff))) + print(("Stddev time diff: " + str(stddev_time_diff))) def integrate_velocities(localization_states): - delta_times = [j - i for i, j in zip(localization_states.times[:-1], localization_states.times[1:])] - # Make sure times are same length as velocities, ignore last velocity - delta_times.append(0) - integrated_positions = poses.Poses('Integrated Graph Velocities', '') - # TODO(rsoussan): Integrate angular velocities? - # TODO(rsoussan): central difference instead? - x_increments = [velocity * delta_t for velocity, delta_t in zip(localization_states.velocities.xs, delta_times)] - cumulative_x_increments = np.cumsum(x_increments) - integrated_positions.positions.xs = [ - localization_states.positions.xs[0] + cumulative_x_increment for cumulative_x_increment in cumulative_x_increments - ] - y_increments = [velocity * delta_t for velocity, delta_t in zip(localization_states.velocities.ys, delta_times)] - cumulative_y_increments = np.cumsum(y_increments) - integrated_positions.positions.ys = [ - localization_states.positions.ys[0] + cumulative_y_increment for cumulative_y_increment in cumulative_y_increments - ] - z_increments = [velocity * delta_t for velocity, delta_t in zip(localization_states.velocities.zs, delta_times)] - cumulative_z_increments = np.cumsum(z_increments) - integrated_positions.positions.zs = [ - localization_states.positions.zs[0] + cumulative_z_increment for cumulative_z_increment in cumulative_z_increments - ] - - # Add start positions - integrated_positions.positions.xs.insert(0, localization_states.positions.xs[0]) - integrated_positions.positions.ys.insert(0, localization_states.positions.ys[0]) - integrated_positions.positions.zs.insert(0, localization_states.positions.zs[0]) - - # Remove last elements (no timestamp for these) - del integrated_positions.positions.xs[-1] - del integrated_positions.positions.ys[-1] - del integrated_positions.positions.zs[-1] - - integrated_positions.times = localization_states.times - return integrated_positions + delta_times = [ + j - i + for i, j in zip(localization_states.times[:-1], localization_states.times[1:]) + ] + # Make sure times are same length as velocities, ignore last velocity + delta_times.append(0) + integrated_positions = poses.Poses("Integrated Graph Velocities", "") + # TODO(rsoussan): Integrate angular velocities? + # TODO(rsoussan): central difference instead? + x_increments = [ + velocity * delta_t + for velocity, delta_t in zip(localization_states.velocities.xs, delta_times) + ] + cumulative_x_increments = np.cumsum(x_increments) + integrated_positions.positions.xs = [ + localization_states.positions.xs[0] + cumulative_x_increment + for cumulative_x_increment in cumulative_x_increments + ] + y_increments = [ + velocity * delta_t + for velocity, delta_t in zip(localization_states.velocities.ys, delta_times) + ] + cumulative_y_increments = np.cumsum(y_increments) + integrated_positions.positions.ys = [ + localization_states.positions.ys[0] + cumulative_y_increment + for cumulative_y_increment in cumulative_y_increments + ] + z_increments = [ + velocity * delta_t + for velocity, delta_t in zip(localization_states.velocities.zs, delta_times) + ] + cumulative_z_increments = np.cumsum(z_increments) + integrated_positions.positions.zs = [ + localization_states.positions.zs[0] + cumulative_z_increment + for cumulative_z_increment in cumulative_z_increments + ] + + # Add start positions + integrated_positions.positions.xs.insert(0, localization_states.positions.xs[0]) + integrated_positions.positions.ys.insert(0, localization_states.positions.ys[0]) + integrated_positions.positions.zs.insert(0, localization_states.positions.zs[0]) + + # Remove last elements (no timestamp for these) + del integrated_positions.positions.xs[-1] + del integrated_positions.positions.ys[-1] + del integrated_positions.positions.zs[-1] + + integrated_positions.times = localization_states.times + return integrated_positions diff --git a/tools/graph_bag/scripts/vector3d.py b/tools/graph_bag/scripts/vector3d.py index 90ef41495d..ee422308b8 100644 --- a/tools/graph_bag/scripts/vector3d.py +++ b/tools/graph_bag/scripts/vector3d.py @@ -17,9 +17,9 @@ # License for the specific language governing permissions and limitations # under the License. -class Vector3d: - def __init__(self, x, y, z): - self.x = x - self.y = y - self.z = z +class Vector3d: + def __init__(self, x, y, z): + self.x = x + self.y = y + self.z = z diff --git a/tools/graph_bag/scripts/vector3d_plotter.py b/tools/graph_bag/scripts/vector3d_plotter.py index aec1bdfed8..9a3453fb48 100644 --- a/tools/graph_bag/scripts/vector3d_plotter.py +++ b/tools/graph_bag/scripts/vector3d_plotter.py @@ -17,170 +17,203 @@ # License for the specific language governing permissions and limitations # under the License. +import matplotlib import poses import vector3ds -import matplotlib -matplotlib.use('pdf') +matplotlib.use("pdf") import matplotlib.pyplot as plt import numpy as np + def unwrap_in_degrees(angles): - return np.rad2deg(np.unwrap(np.deg2rad(angles))) - -class Vector3dPlotter(): - - def __init__(self, xlabel, ylabel, title, individual_plots): - self.xlabel = xlabel - self.ylabel = ylabel - self.title = title - self.individual_plots = individual_plots - self.y_vals_vec = [] - - def add_pose_position(self, - pose, - colors=['r', 'b', 'g'], - linestyle='-', - linewidth=1, - marker=None, - markeredgewidth=None, - markersize=1): - position_plotter = Vector3dYVals(pose.pose_type, pose.times, pose.positions.xs, pose.positions.ys, - pose.positions.zs, ['Pos. (X)', 'Pos. (Y)', 'Pos. (Z)'], colors, linestyle, - linewidth, marker, markeredgewidth, markersize) - self.add_y_vals(position_plotter) - - def add_pose_orientation(self, - pose, - colors=['r', 'b', 'g'], - linestyle='-', - linewidth=1, - marker=None, - markeredgewidth=None, - markersize=1): - orientation_plotter = Vector3dYVals(pose.pose_type, pose.times, unwrap_in_degrees(pose.orientations.yaws), unwrap_in_degrees(pose.orientations.rolls), - unwrap_in_degrees(pose.orientations.pitches), - ['Orientation (Yaw)', 'Orientation (Roll)', 'Orientation (Pitch)'], colors, - linestyle, linewidth, marker, markeredgewidth, markersize) - self.add_y_vals(orientation_plotter) - - def add_y_vals(self, y_vals): - self.y_vals_vec.append(y_vals) - - def plot(self, pdf, individual_plots=True): - plt.figure() - for y_vals in self.y_vals_vec: - y_vals.full_plot() - plt.xlabel(self.xlabel) - plt.ylabel(self.ylabel) - plt.title(self.title) - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - if individual_plots: - self.plot_xs(pdf) - self.plot_ys(pdf) - self.plot_zs(pdf) - - def plot_xs(self, pdf): - plt.figure() - for y_vals in self.y_vals_vec: - y_vals.plot_x() - plt.xlabel(self.xlabel) - plt.ylabel(self.ylabel) - plt.title(self.title) - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - def plot_ys(self, pdf): - plt.figure() - for y_vals in self.y_vals_vec: - y_vals.plot_y() - plt.xlabel(self.xlabel) - plt.ylabel(self.ylabel) - plt.title(self.title) - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - def plot_zs(self, pdf): - plt.figure() - for y_vals in self.y_vals_vec: - y_vals.plot_z() - plt.xlabel(self.xlabel) - plt.ylabel(self.ylabel) - plt.title(self.title) - plt.legend(prop={'size': 6}) - pdf.savefig() - plt.close() - - -class Vector3dYVals(): - - def __init__(self, - name, - x_axis_vals, - x_vals, - y_vals, - z_vals, - labels, - colors=['r', 'b', 'g'], - linestyle='-', - linewidth=1, - marker=None, - markeredgewidth=None, - markersize=1): - self.x_vals = x_vals - self.y_vals = y_vals - self.z_vals = z_vals - self.x_axis_vals = x_axis_vals - self.name = name - self.x_label = name + ' ' + labels[0] - self.y_label = name + ' ' + labels[1] - self.z_label = name + ' ' + labels[2] - self.x_color = colors[0] - self.y_color = colors[1] - self.z_color = colors[2] - self.linestyle = linestyle - self.linewidth = linewidth - self.marker = marker - self.markeredgewidth = markeredgewidth - self.markersize = markersize - - def full_plot(self): - self.plot_x() - self.plot_y() - self.plot_z() - - def plot_x(self): - plt.plot(self.x_axis_vals, - self.x_vals, - label=self.x_label, - color=self.x_color, - linestyle=self.linestyle, - marker=self.marker, - markeredgewidth=self.markeredgewidth, - markersize=self.markersize) - - def plot_y(self): - plt.plot(self.x_axis_vals, - self.y_vals, - label=self.y_label, - color=self.y_color, - linewidth=self.linewidth, - linestyle=self.linestyle, - marker=self.marker, - markeredgewidth=self.markeredgewidth, - markersize=self.markersize) - - def plot_z(self): - plt.plot(self.x_axis_vals, - self.z_vals, - label=self.z_label, - color=self.z_color, - linestyle=self.linestyle, - marker=self.marker, - markeredgewidth=self.markeredgewidth, - markersize=self.markersize) + return np.rad2deg(np.unwrap(np.deg2rad(angles))) + + +class Vector3dPlotter: + def __init__(self, xlabel, ylabel, title, individual_plots): + self.xlabel = xlabel + self.ylabel = ylabel + self.title = title + self.individual_plots = individual_plots + self.y_vals_vec = [] + + def add_pose_position( + self, + pose, + colors=["r", "b", "g"], + linestyle="-", + linewidth=1, + marker=None, + markeredgewidth=None, + markersize=1, + ): + position_plotter = Vector3dYVals( + pose.pose_type, + pose.times, + pose.positions.xs, + pose.positions.ys, + pose.positions.zs, + ["Pos. (X)", "Pos. (Y)", "Pos. (Z)"], + colors, + linestyle, + linewidth, + marker, + markeredgewidth, + markersize, + ) + self.add_y_vals(position_plotter) + + def add_pose_orientation( + self, + pose, + colors=["r", "b", "g"], + linestyle="-", + linewidth=1, + marker=None, + markeredgewidth=None, + markersize=1, + ): + orientation_plotter = Vector3dYVals( + pose.pose_type, + pose.times, + unwrap_in_degrees(pose.orientations.yaws), + unwrap_in_degrees(pose.orientations.rolls), + unwrap_in_degrees(pose.orientations.pitches), + ["Orientation (Yaw)", "Orientation (Roll)", "Orientation (Pitch)"], + colors, + linestyle, + linewidth, + marker, + markeredgewidth, + markersize, + ) + self.add_y_vals(orientation_plotter) + + def add_y_vals(self, y_vals): + self.y_vals_vec.append(y_vals) + + def plot(self, pdf, individual_plots=True): + plt.figure() + for y_vals in self.y_vals_vec: + y_vals.full_plot() + plt.xlabel(self.xlabel) + plt.ylabel(self.ylabel) + plt.title(self.title) + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + if individual_plots: + self.plot_xs(pdf) + self.plot_ys(pdf) + self.plot_zs(pdf) + + def plot_xs(self, pdf): + plt.figure() + for y_vals in self.y_vals_vec: + y_vals.plot_x() + plt.xlabel(self.xlabel) + plt.ylabel(self.ylabel) + plt.title(self.title) + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + def plot_ys(self, pdf): + plt.figure() + for y_vals in self.y_vals_vec: + y_vals.plot_y() + plt.xlabel(self.xlabel) + plt.ylabel(self.ylabel) + plt.title(self.title) + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + def plot_zs(self, pdf): + plt.figure() + for y_vals in self.y_vals_vec: + y_vals.plot_z() + plt.xlabel(self.xlabel) + plt.ylabel(self.ylabel) + plt.title(self.title) + plt.legend(prop={"size": 6}) + pdf.savefig() + plt.close() + + +class Vector3dYVals: + def __init__( + self, + name, + x_axis_vals, + x_vals, + y_vals, + z_vals, + labels, + colors=["r", "b", "g"], + linestyle="-", + linewidth=1, + marker=None, + markeredgewidth=None, + markersize=1, + ): + self.x_vals = x_vals + self.y_vals = y_vals + self.z_vals = z_vals + self.x_axis_vals = x_axis_vals + self.name = name + self.x_label = name + " " + labels[0] + self.y_label = name + " " + labels[1] + self.z_label = name + " " + labels[2] + self.x_color = colors[0] + self.y_color = colors[1] + self.z_color = colors[2] + self.linestyle = linestyle + self.linewidth = linewidth + self.marker = marker + self.markeredgewidth = markeredgewidth + self.markersize = markersize + + def full_plot(self): + self.plot_x() + self.plot_y() + self.plot_z() + + def plot_x(self): + plt.plot( + self.x_axis_vals, + self.x_vals, + label=self.x_label, + color=self.x_color, + linestyle=self.linestyle, + marker=self.marker, + markeredgewidth=self.markeredgewidth, + markersize=self.markersize, + ) + + def plot_y(self): + plt.plot( + self.x_axis_vals, + self.y_vals, + label=self.y_label, + color=self.y_color, + linewidth=self.linewidth, + linestyle=self.linestyle, + marker=self.marker, + markeredgewidth=self.markeredgewidth, + markersize=self.markersize, + ) + + def plot_z(self): + plt.plot( + self.x_axis_vals, + self.z_vals, + label=self.z_label, + color=self.z_color, + linestyle=self.linestyle, + marker=self.marker, + markeredgewidth=self.markeredgewidth, + markersize=self.markersize, + ) diff --git a/tools/graph_bag/scripts/vector3ds.py b/tools/graph_bag/scripts/vector3ds.py index 411f25c7bc..308dca741c 100644 --- a/tools/graph_bag/scripts/vector3ds.py +++ b/tools/graph_bag/scripts/vector3ds.py @@ -17,29 +17,28 @@ # License for the specific language governing permissions and limitations # under the License. +import numpy as np import vector3d -import numpy as np class Vector3ds: - - def __init__(self): - self.xs = [] - self.ys = [] - self.zs = [] - - def add(self, x, y, z): - self.xs.append(x) - self.ys.append(y) - self.zs.append(z) - - def add_vector3d(self, vector3d): - self.xs.append(vector3d.x) - self.ys.append(vector3d.y) - self.zs.append(vector3d.z) - - def get_vector3d(self, index): - return vector3d.Vector3d(self.xs[index], self.ys[index], self.zs[index]) - - def get_numpy_vector(self, index): - return np.array([self.xs[index], self.ys[index], self.zs[index]]) + def __init__(self): + self.xs = [] + self.ys = [] + self.zs = [] + + def add(self, x, y, z): + self.xs.append(x) + self.ys.append(y) + self.zs.append(z) + + def add_vector3d(self, vector3d): + self.xs.append(vector3d.x) + self.ys.append(vector3d.y) + self.zs.append(vector3d.z) + + def get_vector3d(self, index): + return vector3d.Vector3d(self.xs[index], self.ys[index], self.zs[index]) + + def get_numpy_vector(self, index): + return np.array([self.xs[index], self.ys[index], self.zs[index]]) diff --git a/tools/graph_bag/scripts/velocities.py b/tools/graph_bag/scripts/velocities.py index 4c71237050..5fad11c6f7 100644 --- a/tools/graph_bag/scripts/velocities.py +++ b/tools/graph_bag/scripts/velocities.py @@ -21,16 +21,15 @@ class Velocities(object): + def __init__(self, velocity_type, topic): + self.velocities = vector3ds.Vector3ds() + self.times = [] + self.velocity_type = velocity_type + self.topic = topic - def __init__(self, velocity_type, topic): - self.velocities = vector3ds.Vector3ds() - self.times = [] - self.velocity_type = velocity_type - self.topic = topic + def add_velocity(self, velocity_msg, timestamp, bag_start_time=0): + self.velocities.add(velocity_msg.x, velocity_msg.y, velocity_msg.z) + self.times.append(timestamp.secs + 1e-9 * timestamp.nsecs - bag_start_time) - def add_velocity(self, velocity_msg, timestamp, bag_start_time=0): - self.velocities.add(velocity_msg.x, velocity_msg.y, velocity_msg.z) - self.times.append(timestamp.secs + 1e-9 * timestamp.nsecs - bag_start_time) - - def add_msg(self, msg, timestamp, bag_start_time=0): - self.add_velocity(msg.vector, timestamp, bag_start_time) + def add_msg(self, msg, timestamp, bag_start_time=0): + self.add_velocity(msg.vector, timestamp, bag_start_time)