[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"tag-evaluation-framework":3},{"tag":4,"articles":10},{"id":5,"name":6,"slug":7,"article_count":8,"description_zh":9,"description_en":9},"d41de7ba-bb7e-4c9f-8ec6-21fce21ef240","evaluation framework","evaluation-framework",1,null,[11],{"id":12,"slug":13,"title":14,"summary":15,"category":16,"image_url":17,"cover_image":17,"language":18,"created_at":19},"b712257f-129d-400a-bc73-5e1c3ab200a4","avise-ai-security-evaluation-framework-en","AVISE tests AI security with modular jailbreak evals","AVISE is an open-source framework for finding AI vulnerabilities, with a 25-case jailbreak test that flagged all nine models as vulnerable.","research","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1776924767358-ocir.png","en","2026-04-23T06:12:31.125572+00:00"]