[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"tag-attention-mechanisms":3},{"tag":4,"articles":10},{"id":5,"name":6,"slug":7,"article_count":8,"description_zh":9,"description_en":9},"d6c8ffd7-70e3-4b99-b34c-da4673fe1960","attention mechanisms","attention-mechanisms",1,null,[11],{"id":12,"slug":13,"title":14,"summary":15,"category":16,"image_url":17,"cover_image":17,"language":18,"created_at":19},"cdcfe76f-c9bf-44ac-98d9-e9041d414d6c","sebastian-raschka-llm-architecture-gallery-en","Sebastian Raschka’s LLM Architecture Gallery","Raschka’s gallery compares GPT-2, Llama 3, OLMo 2, DeepSeek, and Qwen stacks with exact layer, cache, and attention data.","research","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1775121663908-8tcs.png","en","2026-04-02T07:27:33.848813+00:00"]