{ "loss_names": {"itc": 1}, "encoder_layers": 9, "beit3_vl_layers": 3, "tokenizer_type": "GLMChineseTokenizer", "tokenizer": "./vlmo/tokenizer", "vocab_size": 115244, "whole_word_masking": true, "precision": 32, "test_only": true, "flash_attn": true, "model_path": "m2_encoder_0.4B.ckpt", "modelscope": { "model_id": "M2Cognition/M2-Encoder" }, "model_file": "m2_encoder_0.4B.ckpt" }