@@ -95,18 +95,18 @@ def run_inference_pipeline(
95
95
96
96
97
97
"""
98
- if self .inference_cfg . detection_type == "gt" :
98
+ if self .inference_cfg [ ' detection_type' ] == "gt" :
99
99
detections = gt_detections
100
100
run_detector = False
101
- elif self .inference_cfg . detection_type == "detector" :
101
+ elif self .inference_cfg [ ' detection_type' ] == "detector" :
102
102
detections = None
103
103
run_detector = True
104
104
else :
105
- msg = f"Unknown detection type { self .inference_cfg . detection_type } "
105
+ msg = f"Unknown detection type { self .inference_cfg [ ' detection_type' ] } "
106
106
raise ValueError (msg )
107
107
108
108
coarse_estimates = None
109
- if self .inference_cfg . coarse_estimation_type == "external" :
109
+ if self .inference_cfg [ ' coarse_estimation_type' ] == "external" :
110
110
# TODO (ylabbe): This is hacky, clean this for modelnet eval.
111
111
coarse_estimates = initial_estimates
112
112
coarse_estimates = happypose .toolbox .inference .utils .add_instance_id (
@@ -137,15 +137,15 @@ def run_inference_pipeline(
137
137
all_preds = {}
138
138
data_TCO_refiner = extra_data ["refiner" ]["preds" ]
139
139
140
- k_0 = f"refiner/iteration={ self .inference_cfg . n_refiner_iterations } "
140
+ k_0 = f"refiner/iteration={ self .inference_cfg [ ' n_refiner_iterations' ] } "
141
141
all_preds = {
142
142
"final" : preds ,
143
143
k_0 : data_TCO_refiner ,
144
144
"refiner/final" : data_TCO_refiner ,
145
145
"coarse" : extra_data ["coarse" ]["preds" ],
146
146
}
147
147
148
- if self .inference_cfg . run_depth_refiner :
148
+ if self .inference_cfg [ ' run_depth_refiner' ] :
149
149
all_preds ["depth_refiner" ] = extra_data ["depth_refiner" ]["preds" ]
150
150
151
151
# Remove any mask tensors
@@ -174,43 +174,46 @@ def get_predictions(
174
174
"""
175
175
predictions_list = defaultdict (list )
176
176
for n , data in enumerate (tqdm (self .dataloader )):
177
- # data is a dict
178
- rgb = data ["rgb" ]
179
- depth = None
180
- K = data ["cameras" ].K
181
- gt_detections = data ["gt_detections" ].cuda ()
182
-
183
- initial_data = None
184
- if data ["initial_data" ]:
185
- initial_data = data ["initial_data" ].cuda ()
186
-
187
- obs_tensor = ObservationTensor .from_torch_batched (rgb , depth , K )
188
- obs_tensor = obs_tensor .cuda ()
189
-
190
- # GPU warmup for timing
191
- if n == 0 :
177
+ if n < 3 :
178
+ # data is a dict
179
+ rgb = data ["rgb" ]
180
+ depth = None
181
+ K = data ["cameras" ].K
182
+ gt_detections = data ["gt_detections" ].cuda ()
183
+
184
+ initial_data = None
185
+ if data ["initial_data" ]:
186
+ initial_data = data ["initial_data" ].cuda ()
187
+
188
+ obs_tensor = ObservationTensor .from_torch_batched (rgb , depth , K )
189
+ obs_tensor = obs_tensor .cuda ()
190
+
191
+ # GPU warmup for timing
192
+ if n == 0 :
193
+ with torch .no_grad ():
194
+ self .run_inference_pipeline (
195
+ pose_estimator ,
196
+ obs_tensor ,
197
+ gt_detections ,
198
+ initial_estimates = initial_data ,
199
+ )
200
+
201
+ cuda_timer = CudaTimer ()
202
+ cuda_timer .start ()
192
203
with torch .no_grad ():
193
- self .run_inference_pipeline (
204
+ all_preds = self .run_inference_pipeline (
194
205
pose_estimator ,
195
206
obs_tensor ,
196
207
gt_detections ,
197
208
initial_estimates = initial_data ,
198
209
)
210
+ cuda_timer .end ()
211
+ cuda_timer .elapsed ()
199
212
200
- cuda_timer = CudaTimer ()
201
- cuda_timer .start ()
202
- with torch .no_grad ():
203
- all_preds = self .run_inference_pipeline (
204
- pose_estimator ,
205
- obs_tensor ,
206
- gt_detections ,
207
- initial_estimates = initial_data ,
208
- )
209
- cuda_timer .end ()
210
- cuda_timer .elapsed ()
211
-
212
- for k , v in all_preds .items ():
213
- predictions_list [k ].append (v )
213
+ for k , v in all_preds .items ():
214
+ predictions_list [k ].append (v )
215
+ else :
216
+ break
214
217
215
218
# Concatenate the lists of PandasTensorCollections
216
219
predictions = {}
0 commit comments