{ "type": "act", "n_obs_steps": 1, "input_features": { "observation.state": { "type": "STATE", "shape": [ 36 ] }, "observation.images.egocentric": { "type": "VISUAL", "shape": [ 3, 480, 640 ] } }, "output_features": { "action": { "type": "ACTION", "shape": [ 36 ] } }, "device": "cuda", "use_amp": false, "push_to_hub": true, "repo_id": "steb6/act-lc-stand-wave-sim-split", "private": null, "tags": null, "license": null, "pretrained_path": null, "chunk_size": 20, "n_action_steps": 20, "normalization_mapping": { "VISUAL": "MEAN_STD", "STATE": "MEAN_STD", "ACTION": "MEAN_STD" }, "vision_backbone": "resnet18", "pretrained_backbone_weights": "ResNet18_Weights.IMAGENET1K_V1", "replace_final_stride_with_dilation": false, "pre_norm": false, "dim_model": 512, "n_heads": 8, "dim_feedforward": 3200, "feedforward_activation": "relu", "n_encoder_layers": 4, "n_decoder_layers": 1, "use_vae": true, "latent_dim": 32, "n_vae_encoder_layers": 4, "temporal_ensemble_coeff": null, "dropout": 0.1, "kl_weight": 10.0, "use_language_conditioning": true, "language_encoder_type": "clip", "language_model_name": "openai/clip-vit-base-patch32", "freeze_language_encoder": true, "language_projection_dim": null, "language_dropout": 0.0, "max_token_length": 77, "language_pooling": "cls", "optimizer_lr": 1e-05, "optimizer_weight_decay": 0.0001, "optimizer_lr_backbone": 1e-05 }