[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"tag-mixture-of-experts":3},{"tag":4,"articles":9},{"id":5,"name":6,"slug":6,"article_count":7,"description_zh":8,"description_en":8},"3558c57d-a009-4f0e-bf6a-20cea0b81732","mixture-of-experts",2,null,[10,19,26],{"id":11,"slug":12,"title":13,"summary":14,"category":15,"image_url":16,"cover_image":16,"language":17,"created_at":18},"1e4ba03d-b371-427a-8d9e-d694f09827b1","unipool-shared-expert-pool-moe-en","UniPool shares MoE experts across layers","UniPool replaces per-layer MoE experts with one shared pool, cutting redundancy and improving validation loss in five LLaMA-scale models.","research","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1778221264459-eh59.png","en","2026-05-08T06:20:40.975202+00:00",{"id":20,"slug":21,"title":22,"summary":23,"category":15,"image_url":24,"cover_image":24,"language":17,"created_at":25},"cdcfe76f-c9bf-44ac-98d9-e9041d414d6c","sebastian-raschka-llm-architecture-gallery-en","Sebastian Raschka’s LLM Architecture Gallery","Raschka’s gallery compares GPT-2, Llama 3, OLMo 2, DeepSeek, and Qwen stacks with exact layer, cache, and attention data.","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1775121663908-8tcs.png","2026-04-02T07:27:33.848813+00:00",{"id":27,"slug":28,"title":29,"summary":30,"category":31,"image_url":8,"cover_image":32,"language":17,"created_at":33},"d23cd5f6-f875-49f5-b53b-1c5416d13d99","cursor-composer-2-agentic-coding-model-en","Cursor Composer 2 Bets on Agentic Coding","Cursor’s Composer 2 posts 61.3 on CursorBench and 61.7 on Terminal-Bench 2.0, with pricing aimed at high-volume coding teams.","model-release","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1774498610250-zp8n.png","2026-03-28T03:13:06.513673+00:00"]