[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"tag-sequence-modeling":3},{"tag":4,"articles":10},{"id":5,"name":6,"slug":7,"article_count":8,"description_zh":9,"description_en":9},"d523de6b-ce33-4fdc-88a8-56c5da9e9c9b","sequence modeling","sequence-modeling",1,null,[11],{"id":12,"slug":13,"title":14,"summary":15,"category":16,"image_url":17,"cover_image":17,"language":18,"created_at":19},"c1aac50e-0c41-471c-946e-329652f04565","sessa-attention-inside-state-space-memory-en","Sessa: Attention and State-Space Memory for Long Context","Sessa mixes attention with recurrent state-space feedback to improve long-context recall, with power-law memory tails and strong benchmark results.","research","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1776751621598-1d0l.png","en","2026-04-21T06:06:37.564074+00:00"]