[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"tag-expert-routing":3},{"tag":4,"articles":10},{"id":5,"name":6,"slug":7,"article_count":8,"description_zh":9,"description_en":9},"847ce511-4ca9-43c6-8037-1ca270f1a9d6","expert routing","expert-routing",2,null,[11,20],{"id":12,"slug":13,"title":14,"summary":15,"category":16,"image_url":17,"cover_image":17,"language":18,"created_at":19},"1e4ba03d-b371-427a-8d9e-d694f09827b1","unipool-shared-expert-pool-moe-en","UniPool shares MoE experts across layers","UniPool replaces per-layer MoE experts with one shared pool, cutting redundancy and improving validation loss in five LLaMA-scale models.","research","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1778221264459-eh59.png","en","2026-05-08T06:20:40.975202+00:00",{"id":21,"slug":22,"title":23,"summary":24,"category":16,"image_url":25,"cover_image":25,"language":18,"created_at":26},"10a60b90-b59c-47e7-a6e5-a7fba43c353a","multimodal-moe-routing-distraction-en","Why multimodal MoE models get distracted","A study of multimodal MoE models finds visual inputs can derail routing to reasoning experts, and a routing-guided fix improves results.","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1775801394754-ctzn.png","2026-04-10T06:09:35.090825+00:00"]