Skip to content

Commit

Permalink
Add HiwinRA6201621Reacher task
Browse files Browse the repository at this point in the history
  • Loading branch information
j3soon committed Nov 22, 2023
1 parent 3611b83 commit e553de1
Show file tree
Hide file tree
Showing 25 changed files with 2,426 additions and 9 deletions.
2 changes: 2 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# Ignore everything
*
693 changes: 693 additions & 0 deletions .vscode/settings.json

Large diffs are not rendered by default.

29 changes: 29 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Ref: https://catalog.ngc.nvidia.com/orgs/nvidia/containers/isaac-sim
FROM nvcr.io/nvidia/isaac-sim:2023.1.0-hotfix.1

ENV ISAAC_SIM=/isaac-sim
WORKDIR /root
# Install common tools
RUN apt-get update && apt-get install -y git wget vim \
&& rm -rf /var/lib/apt/lists/*
# Download and Install Anaconda
# Ref: https://www.anaconda.com/products/distribution#Downloads
RUN wget -q https://repo.anaconda.com/archive/Anaconda3-2022.10-Linux-x86_64.sh \
&& bash Anaconda3-2022.10-Linux-x86_64.sh -b -p $HOME/anaconda3
# Patch Isaac Sim 2023.1.0
# Ref: https://github.com/j3soon/isaac-extended
RUN git clone https://github.com/j3soon/isaac-extended.git \
&& cp $ISAAC_SIM/setup_python_env.sh $ISAAC_SIM/setup_python_env.sh.bak \
&& cp ~/isaac-extended/isaac_sim-2023.1.0-patch/linux/setup_python_env.sh $ISAAC_SIM/setup_python_env.sh
# Set up conda environment for Isaac Sim
# Ref: https://docs.omniverse.nvidia.com/isaacsim/latest/installation/install_python.html#advanced-running-with-anaconda
RUN . ~/anaconda3/etc/profile.d/conda.sh \
# conda remove --name isaac-sim --all
&& cd $ISAAC_SIM \
&& conda env create -f environment.yml
# Activation commands for the conda environment
RUN echo ". ~/anaconda3/etc/profile.d/conda.sh" >> ~/.bashrc \
&& echo "conda activate isaac-sim" >> ~/.bashrc \
&& echo ". ${ISAAC_SIM}/setup_conda_env.sh" >> ~/.bashrc

WORKDIR /isaac-sim
1 change: 1 addition & 0 deletions LICENSE.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ Omniverse Isaac Gym Reinforcement Learning Environments for Isaac Sim
BSD 3-Clause License

Copyright (c) 2018-2022, NVIDIA Corporation
Copyright (c) 2022-2023, Johnson Sun
All rights reserved.

Redistribution and use in source and binary forms, with or without
Expand Down
276 changes: 267 additions & 9 deletions README.md

Large diffs are not rendered by default.

Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/media/HiwinRA6201621Reacher-Vectorized.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/media/logos/elsalab.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/media/logos/nvaitc.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/media/logos/rccn.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/media/social-preview.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1 change: 1 addition & 0 deletions omniisaacgymenvs/cfg/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,3 +72,4 @@ hydra:
run:
dir: .

use_urdf: False
126 changes: 126 additions & 0 deletions omniisaacgymenvs/cfg/task/HiwinRA6201621Reacher.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
# Ref: /omniisaacgymenvs/cfg/task/ShadowHand.yaml
# used to create the object
name: HiwinRA6201621Reacher

physics_engine: ${..physics_engine}

# if given, will override the device setting in gym.
env:
numEnvs: ${resolve_default:2048,${...num_envs}}
envSpacing: 4
episodeLength: 600

clipObservations: 5.0
clipActions: 1.0

useRelativeControl: False
dofSpeedScale: 20.0
actionsMovingAverage: 1.0
controlFrequencyInv: 2 # 60 Hz

startPositionNoise: 0.01
startRotationNoise: 0.0

resetPositionNoise: 0.01
resetRotationNoise: 0.0
resetDofPosRandomInterval: 0.2
resetDofVelRandomInterval: 0.0

# Random forces applied to the object
forceScale: 0.0
forceProbRange: [0.001, 0.1]
forceDecay: 0.99
forceDecayInterval: 0.08

# reward -> dictionary
distRewardScale: -2.0
rotRewardScale: 1.0
rotEps: 0.1
actionPenaltyScale: -0.0002
reachGoalBonus: 250
velObsScale: 0.2

observationType: "full" # can only be "full"
successTolerance: 0.1
printNumSuccesses: False
maxConsecutiveSuccesses: 0

useURDF: ${resolve_default:True,${...use_urdf}}

sim:
dt: 0.0083 # 1/120 s
add_ground_plane: True
add_distant_light: False
use_gpu_pipeline: ${eq:${...pipeline},"gpu"}
use_fabric: True
enable_scene_query_support: False
disable_contact_processing: False

# set to True if you use camera sensors in the environment
enable_cameras: False

default_material:
static_friction: 1.0
dynamic_friction: 1.0
restitution: 0.0

physx:
# per-scene
use_gpu: ${eq:${....sim_device},"gpu"} # set to False to run on CPU
worker_thread_count: ${....num_threads}
solver_type: ${....solver_type} # 0: PGS, 1: TGS
bounce_threshold_velocity: 0.2
friction_offset_threshold: 0.04
friction_correlation_distance: 0.025
enable_sleeping: True
enable_stabilization: True
# GPU buffers
gpu_max_rigid_contact_count: 1048576
gpu_max_rigid_patch_count: 33554432
gpu_found_lost_pairs_capacity: 20971520
gpu_found_lost_aggregate_pairs_capacity: 20971520
gpu_total_aggregate_pairs_capacity: 20971520
gpu_max_soft_body_contacts: 1048576
gpu_max_particle_contacts: 1048576
gpu_heap_capacity: 33554432
gpu_temp_buffer_capacity: 16777216
gpu_max_num_partitions: 8

hiwin:
# -1 to use default values
override_usd_defaults: False
enable_self_collisions: False
object:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
sleep_threshold: 0.000
stabilization_threshold: 0.0025
# per-body
density: -1
max_depenetration_velocity: 1000.0
goal_object:
# -1 to use default values
override_usd_defaults: False
make_kinematic: True
enable_self_collisions: False
enable_gyroscopic_forces: True
# also in stage params
# per-actor
solver_position_iteration_count: 8
solver_velocity_iteration_count: 0
sleep_threshold: 0.000
stabilization_threshold: 0.0025
# per-body
density: -1
max_depenetration_velocity: 1000.0
sim2real:
enabled: False
fail_quietely: False
verbose: False
79 changes: 79 additions & 0 deletions omniisaacgymenvs/cfg/train/HiwinRA6201621ReacherPPO.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
# Ref: /omniisaacgymenvs/cfg/train/ShadowHandPPO.yaml
params:
seed: ${...seed}
algo:
name: a2c_continuous

model:
name: continuous_a2c_logstd

network:
name: actor_critic
separate: False

space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0
fixed_sigma: True
mlp:
units: [256, 128, 64]
activation: elu
d2rl: False

initializer:
name: default
regularizer:
name: None

load_checkpoint: ${if:${...checkpoint},True,False}
load_path: ${...checkpoint}

config:
name: ${resolve_default:HiwinRA6201621Reacher,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False
normalize_input: True
normalize_value: True
value_bootstrap: True
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 0.01
normalize_advantage: True
gamma: 0.99
tau: 0.95
learning_rate: 5e-3
lr_schedule: adaptive
schedule_type: standard
kl_threshold: 0.02
score_to_win: 100000
max_epochs: ${resolve_default:5000,${....max_iterations}}
save_best_after: 100
save_frequency: 200
print_stats: True
grad_norm: 1.0
entropy_coef: 0.0
truncate_grads: True
e_clip: 0.2
horizon_length: 64
minibatch_size: 32768
mini_epochs: 5
critic_coef: 4
clip_value: True
seq_length: 4
bounds_loss_coef: 0.0001

player:
deterministic: True
games_num: 100000
print_stats: True
Loading

0 comments on commit e553de1

Please sign in to comment.