|
2025-Robot
Robot code for 2025 FRC season by Argos, FRC team #1756
|
Namespaces | |
| namespace | internal |
Classes | |
| class | BarcodeResultClass |
| class | ClassificationResultClass |
| class | DetectionResultClass |
| class | FiducialResultClass |
| class | IMUData |
| class | LimelightResultsClass |
| class | PoseEstimate |
| class | RawFiducial |
| class | RetroreflectiveResultClass |
| class | SingleTargetingResultClass |
| class | VisionResultsClass |
Functions | |
| std::string | sanitizeName (const std::string &name) |
| frc::Pose3d | toPose3D (const std::vector< double > &inData) |
| frc::Pose2d | toPose2D (const std::vector< double > &inData) |
| std::array< double, 6 > | pose3dToArray (const frc::Pose3d &pose) |
| std::array< double, 6 > | pose2dToArray (const frc::Pose2d &pose) |
| std::shared_ptr< nt::NetworkTable > | getLimelightNTTable (const std::string &tableName) |
| void | Flush () |
| nt::NetworkTableEntry | getLimelightNTTableEntry (const std::string &tableName, const std::string &entryName) |
| nt::DoubleArrayEntry & | getLimelightDoubleArrayEntry (const std::string &tableName, const std::string &entryName) |
| double | getLimelightNTDouble (const std::string &tableName, const std::string &entryName) |
| std::vector< double > | getLimelightNTDoubleArray (const std::string &tableName, const std::string &entryName) |
| std::string | getLimelightNTString (const std::string &tableName, const std::string &entryName) |
| std::vector< std::string > | getLimelightNTStringArray (const std::string &tableName, const std::string &entryName) |
| void | setLimelightNTDouble (const std::string &tableName, const std::string entryName, double val) |
| void | setLimelightNTDoubleArray (const std::string &tableName, const std::string &entryName, const std::span< const double > &vals) |
| bool | getTV (const std::string &limelightName="") |
| double | getTX (const std::string &limelightName="") |
| double | getTY (const std::string &limelightName="") |
| double | getTXNC (const std::string &limelightName) |
| double | getTYNC (const std::string &limelightName) |
| double | getTA (const std::string &limelightName="") |
| std::vector< double > | getT2DArray (const std::string &limelightName) |
| int | getTargetCount (const std::string &limelightName) |
| int | getClassifierClassIndex (const std::string &limelightName) |
| int | getDetectorClassIndex (const std::string &limelightName) |
| const std::string | getClassifierClass (const std::string &limelightName) |
| const std::string | getDetectorClass (const std::string &limelightName) |
| double | getLatency_Pipeline (const std::string &limelightName="") |
| double | getLatency_Capture (const std::string &limelightName="") |
| double | getCurrentPipelineIndex (const std::string &limelightName) |
| const std::string | getCurrentPipelineType (const std::string &limelightName) |
| std::string | getJSONDump (const std::string &limelightName="") |
| std::vector< double > | getBotpose (const std::string &limelightName="") |
| std::vector< double > | getBotpose_wpiRed (const std::string &limelightName="") |
| std::vector< double > | getBotpose_wpiBlue (const std::string &limelightName="") |
| std::vector< double > | getBotpose_TargetSpace (const std::string &limelightName="") |
| std::vector< double > | getCameraPose_TargetSpace (const std::string &limelightName="") |
| std::vector< double > | getCameraPose_RobotSpace (const std::string &limelightName="") |
| std::vector< double > | getTargetPose_CameraSpace (const std::string &limelightName="") |
| std::vector< double > | getTargetPose_RobotSpace (const std::string &limelightName="") |
| std::vector< double > | getTargetColor (const std::string &limelightName="") |
| double | getFiducialID (const std::string &limelightName="") |
| std::string | getNeuralClassID (const std::string &limelightName="") |
| std::vector< std::string > | getRawBarcodeData (const std::string &limelightName="") |
| void | setPipelineIndex (const std::string &limelightName, int index) |
| void | setPriorityTagID (const std::string &limelightName, int ID) |
| void | setLEDMode_PipelineControl (const std::string &limelightName="") |
| void | setLEDMode_ForceOff (const std::string &limelightName="") |
| void | setLEDMode_ForceBlink (const std::string &limelightName="") |
| void | setLEDMode_ForceOn (const std::string &limelightName="") |
| void | setStreamMode_Standard (const std::string &limelightName="") |
| void | setStreamMode_PiPMain (const std::string &limelightName="") |
| void | setStreamMode_PiPSecondary (const std::string &limelightName="") |
| void | setCropWindow (const std::string &limelightName, double cropXMin, double cropXMax, double cropYMin, double cropYMax) |
| void | setFiducial3DOffset (const std::string &limelightName, double offsetX, double offsetY, double offsetZ) |
| void | SetRobotOrientation_INTERNAL (const std::string &limelightName, double yaw, double yawRate, double pitch, double pitchRate, double roll, double rollRate, bool flush) |
| void | SetRobotOrientation (const std::string &limelightName, double yaw, double yawRate, double pitch, double pitchRate, double roll, double rollRate) |
| void | SetRobotOrientation_NoFlush (const std::string &limelightName, double yaw, double yawRate, double pitch, double pitchRate, double roll, double rollRate) |
| void | SetIMUMode (const std::string &limelightName, int mode) |
| void | SetFidcuial3DOffset (const std::string &limelightName, double x, double y, double z) |
| void | SetFiducialIDFiltersOverride (const std::string &limelightName, const std::vector< int > &validIDs) |
| void | setCameraPose_RobotSpace (const std::string &limelightName, double forward, double side, double up, double roll, double pitch, double yaw) |
| void | setPythonScriptData (const std::string &limelightName, const std::vector< double > &outgoingPythonData) |
| std::vector< double > | getPythonScriptData (const std::string &limelightName="") |
| double | extractBotPoseEntry (const std::vector< double > &inData, int position) |
| bool | validPoseEstimate (const PoseEstimate &pose) |
| PoseEstimate | getBotPoseEstimate (const std::string &limelightName, const std::string &entryName, bool isMegaTag2) |
| PoseEstimate | getBotPoseEstimate_wpiBlue (const std::string &limelightName="") |
| PoseEstimate | getBotPoseEstimate_wpiRed (const std::string &limelightName="") |
| PoseEstimate | getBotPoseEstimate_wpiBlue_MegaTag2 (const std::string &limelightName="") |
| PoseEstimate | getBotPoseEstimate_wpiRed_MegaTag2 (const std::string &limelightName="") |
| IMUData | getIMUData (const std::string &limelightName) |
| void | PhoneHome () |
| void | SetupPortForwarding (const std::string &limelightName) |
| template<typename T , typename KeyType > | |
| T | SafeJSONAccess (const wpi::json &jsonData, const KeyType &key, const T &defaultValue) |
| void | from_json (const wpi::json &j, RetroreflectiveResultClass &t) |
| void | from_json (const wpi::json &j, FiducialResultClass &t) |
| void | from_json (const wpi::json &j, DetectionResultClass &t) |
| void | from_json (const wpi::json &j, BarcodeResultClass &t) |
| void | from_json (const wpi::json &j, ClassificationResultClass &t) |
| void | from_json (const wpi::json &j, VisionResultsClass &t) |
| void | from_json (const wpi::json &j, LimelightResultsClass &t) |
| LimelightResultsClass | getLatestResults (const std::string &limelightName="", bool profile=false) |
Variables | |
| std::unordered_map< std::string, nt::DoubleArrayEntry > | doubleArrayEntries |
| const double | INVALID_TARGET = 0.0 |
|
inline |
|
inline |
|
inline |
Represents a Barcode Target Result extracted from JSON Output
Barcode family type (e.g. "QR", "DataMatrix", etc.)
Gets the decoded data content of the barcode
|
inline |
Represents a Neural Classifier Pipeline Result extracted from JSON Output
|
inline |
Represents a Neural Detector Pipeline Result extracted from JSON Output
|
inline |
Represents an AprilTag/Fiducial Target Result extracted from JSON Output
|
inline |
Limelight Results object, parsed from a Limelight's JSON results output.
|
inline |
Represents a Color/Retroreflective Target Result extracted from JSON Output
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
Gets the current neural classifier result class name.
| limelightName | Name of the Limelight camera |
|
inline |
Gets the classifier class index from the currently running neural classifier pipeline
| limelightName | Name of the Limelight camera |
|
inline |
Gets the active pipeline index.
| limelightName | Name of the Limelight camera |
|
inline |
Gets the current pipeline type.
| limelightName | Name of the Limelight camera |
|
inline |
Gets the primary neural detector result class name.
| limelightName | Name of the Limelight camera |
|
inline |
Gets the detector class index from the primary result of the currently running neural detector pipeline.
| limelightName | Name of the Limelight camera |
|
inline |
|
inline |
Gets the current IMU data from NetworkTables. IMU data is formatted as [robotYaw, Roll, Pitch, Yaw, gyroX, gyroY, gyroZ, accelX, accelY, accelZ]. Returns all zeros if data is invalid or unavailable.
| limelightName | Name/identifier of the Limelight |
|
inline |
Gets the full JSON results dump.
| limelightName | Name of the Limelight camera |
|
inline |
Gets the capture latency.
| limelightName | Name of the Limelight camera |
|
inline |
Gets the pipeline's processing latency contribution.
| limelightName | Name of the Limelight camera |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
T2D is an array that contains several targeting metrcis
| limelightName | Name of the Limelight camera |
|
inline |
Gets the target area as a percentage of the image (0-100%).
| limelightName | Name of the Limelight camera ("" for default) |
|
inline |
|
inline |
Gets the number of targets currently detected.
| limelightName | Name of the Limelight camera |
|
inline |
|
inline |
|
inline |
Does the Limelight have a valid target?
| limelightName | Name of the Limelight camera ("" for default) |
|
inline |
Gets the horizontal offset from the crosshair to the target in degrees.
| limelightName | Name of the Limelight camera ("" for default) |
|
inline |
Gets the horizontal offset from the principal pixel/point to the target in degrees. This is the most accurate 2d metric if you are using a calibrated camera and you don't need adjustable crosshair functionality.
| limelightName | Name of the Limelight camera ("" for default) |
|
inline |
Gets the vertical offset from the crosshair to the target in degrees.
| limelightName | Name of the Limelight camera ("" for default) |
|
inline |
Gets the vertical offset from the principal pixel/point to the target in degrees. This is the most accurate 2d metric if you are using a calibrated camera and you don't need adjustable crosshair functionality.
| limelightName | Name of the Limelight camera ("" for default) |
|
inline |
|
inline |
Converts a Pose2d object to an array of doubles in the format [x, y, z, roll, pitch, yaw]. Translation components are in meters, rotation components are in degrees. Note: z, roll, and pitch will be 0 since Pose2d only contains x, y, and yaw.
| pose | The Pose2d object to convert |
|
inline |
Converts a Pose3d object to an array of doubles in the format [x, y, z, roll, pitch, yaw]. Translation components are in meters, rotation components are in degrees.
| pose | The Pose3d object to convert |
| T LimelightHelpers::SafeJSONAccess | ( | const wpi::json & | jsonData, |
| const KeyType & | key, | ||
| const T & | defaultValue ) |
|
inline |
|
inline |
Sets the camera pose in robotspace. The UI camera pose must be set to zeros
|
inline |
Sets the crop window for the camera. The crop window in the UI must be completely open.
| limelightName | Name of the Limelight camera |
| cropXMin | Minimum X value (-1 to 1) |
| cropXMax | Maximum X value (-1 to 1) |
| cropYMin | Minimum Y value (-1 to 1) |
| cropYMax | Maximum Y value (-1 to 1) |
|
inline |
Sets the 3D point-of-interest offset for the current fiducial pipeline. https://docs.limelightvision.io/docs/docs-limelight/pipeline-apriltag/apriltag-3d#point-of-interest-tracking
| limelightName | Name/identifier of the Limelight |
| x | X offset in meters |
| y | Y offset in meters |
| z | Z offset in meters |
|
inline |
Sets 3D offset point for easy 3D targeting.
|
inline |
|
inline |
Configures the IMU mode for MegaTag2 Localization
| limelightName | Name/identifier of the Limelight |
| mode | IMU mode. |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
Sets the robot orientation for mt2.
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
Takes a 6-length array of pose data and converts it to a Pose2d object. Uses only x, y, and yaw components, ignoring z, roll, and pitch. Array format: [x, y, z, roll, pitch, yaw] where angles are in degrees.
| inData | Array containing pose data [x, y, z, roll, pitch, yaw] |
|
inline |
Takes a 6-length array of pose data and converts it to a Pose3d object. Array format: [x, y, z, roll, pitch, yaw] where angles are in degrees.
| inData | Array containing pose data [x, y, z, roll, pitch, yaw] |
|
inline |
|
inline |
|
inline |