diff --git a/allenact/base_abstractions/experiment_config.py b/allenact/base_abstractions/experiment_config.py index 5fd7c75d..72709c1f 100644 --- a/allenact/base_abstractions/experiment_config.py +++ b/allenact/base_abstractions/experiment_config.py @@ -177,9 +177,7 @@ def __setattr__(cls, attr, value): "Changing the values of class-level attributes is disabled in ExperimentConfig classes.\n" "This is to prevent problems that can occur otherwise when using multiprocessing.\n" "If you wish to change the value of a configuration, please do so for an instance of that" - " configuration.\nTriggered by attempting to modify {}".format( - cls.__name__ - ) + f" configuration.\nTriggered by attempting to modify {cls.__name__}'s {attr}." ) else: super().__setattr__(attr, value) diff --git a/allenact/main.py b/allenact/main.py index a26f5c2b..dd4bcf29 100755 --- a/allenact/main.py +++ b/allenact/main.py @@ -446,6 +446,9 @@ def load_config(args) -> Tuple[ExperimentConfig, Dict[str, str]]: config_kwargs, Dict ), "`--config_kwargs` must be a json string (or a path to a .json file) that evaluates to a dictionary." + assert "task_batch_size" not in config_kwargs, "`task_batch_size` is added to the ExperimentConfig's kwargs by AllenAct" + config_kwargs["task_batch_size"] = args.task_batch_size + config = experiments[0](**config_kwargs) sources = _config_source(config_type=experiments[0]) sources[CONFIG_KWARGS_STR] = json.dumps(config_kwargs) diff --git a/tests/make_it_batch/experiment.py b/tests/make_it_batch/experiment.py index 9b4321c4..7a474ffc 100644 --- a/tests/make_it_batch/experiment.py +++ b/tests/make_it_batch/experiment.py @@ -226,18 +226,8 @@ class ObjectNavThorPPOExperimentConfig(ExperimentConfig): # VALID_SCENES = ["FloorPlan1_physics"] # TEST_SCENES = ["FloorPlan1_physics"] - # Setting up sensors and basic environment details + # Setting basic environment details SCREEN_SIZE = 224 - SENSORS = [ - BatchedRGBSensorThor( - height=SCREEN_SIZE, width=SCREEN_SIZE, use_resnet_normalization=True, - ), - # RGBSensorThor( - # height=SCREEN_SIZE, width=SCREEN_SIZE, use_resnet_normalization=True, - # ), # For non-batched - GoalObjectTypeThorSensor(object_types=OBJECT_TYPES), - ] - ENV_ARGS = { "player_screen_height": SCREEN_SIZE, "player_screen_width": SCREEN_SIZE, @@ -249,18 +239,28 @@ class ObjectNavThorPPOExperimentConfig(ExperimentConfig): # VALID_SAMPLES_IN_SCENE = 10 # TEST_SAMPLES_IN_SCENE = 100 - @classmethod - def tag(cls): - return "BatchedObjectNavThorPPO" + def __init__(self, *args, **kwargs): + self.task_batch_size = kwargs["task_batch_size"] + + # Setting up sensors + rgb_sensor_class = BatchedRGBSensorThor if self.task_batch_size > 0 else RGBSensorThor + self.SENSORS = [ + rgb_sensor_class( + height=self.SCREEN_SIZE, width=self.SCREEN_SIZE, use_resnet_normalization=True, + ), + GoalObjectTypeThorSensor(object_types=self.OBJECT_TYPES), + ] + + def tag(self): + return "BatchedObjectNavThorPPO" if self.task_batch_size > 0 else "ObjectNavThorPPO" - @classmethod - def training_pipeline(cls, **kwargs): + def training_pipeline(self, **kwargs): ppo_steps = int(1e6) lr = 2.5e-4 num_mini_batch = 2 if not torch.cuda.is_available() else 6 update_repeats = 4 - num_steps = cls.MAX_STEPS - metric_accumulate_interval = cls.MAX_STEPS * 1 # Log every 10 max length tasks + num_steps = self.MAX_STEPS + metric_accumulate_interval = self.MAX_STEPS * 1 # Log every 10 max length tasks save_interval = 10000 gamma = 0.99 use_gae = True @@ -281,7 +281,7 @@ def training_pipeline(cls, **kwargs): gamma=gamma, use_gae=use_gae, gae_lambda=gae_lambda, - advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD, + advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD, pipeline_stages=[ PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps,), ], @@ -290,8 +290,7 @@ def training_pipeline(cls, **kwargs): ), ) - @classmethod - def machine_params(cls, mode="train", **kwargs): + def machine_params(self, mode="train", **kwargs): num_gpus = torch.cuda.device_count() has_gpu = num_gpus != 0 @@ -309,22 +308,20 @@ def machine_params(cls, mode="train", **kwargs): return MachineParams(nprocesses=nprocesses, devices=gpu_ids,) - @classmethod - def create_model(cls, **kwargs) -> nn.Module: + def create_model(self, **kwargs) -> nn.Module: return ObjectNavActorCritic( action_space=gym.spaces.Discrete( len(ObjectNaviThorGridTask.class_action_names()) ), - observation_space=SensorSuite(cls.SENSORS).observation_spaces, - rgb_uuid=cls.SENSORS[0].uuid, + observation_space=SensorSuite(self.SENSORS).observation_spaces, + rgb_uuid=self.SENSORS[0].uuid, depth_uuid=None, goal_sensor_uuid="goal_object_type_ind", hidden_size=512, object_type_embedding_dim=8, ) - @classmethod - def make_sampler_fn(cls, **kwargs) -> TaskSampler: + def make_sampler_fn(self, **kwargs) -> TaskSampler: return BatchedObjectNavTaskSampler(**kwargs) if kwargs.get("task_batch_size", 0) > 0 else ObjectNavTaskSampler(**kwargs) @staticmethod