{"payload":{"header_redesign_enabled":false,"results":[{"id":"646410686","archived":false,"color":"#3572A5","followers":22350,"has_funding_file":false,"hl_name":"hiyouga/LLaMA-Factory","hl_trunc_description":"Unify Efficient Fine-Tuning of 100+ LLMs","language":"Python","mirror":false,"owned_by_organization":false,"public":true,"repo":{"repository":{"id":646410686,"name":"LLaMA-Factory","owner_id":16256802,"owner_login":"hiyouga","updated_at":"2024-05-18T08:13:21.116Z","has_issues":true}},"sponsorable":false,"topics":["agent","ai","transformers","moe","llama","gpt","lora","quantization","language-model","mistral","fine-tuning","peft","large-language-models","llm","rlhf","instruction-tuning","chatglm","qlora","qwen","llama3"],"type":"Public","help_wanted_issues_count":0,"good_first_issue_issues_count":1,"starred_by_current_user":false}],"type":"repositories","page":1,"page_count":1,"elapsed_millis":73,"errors":[],"result_count":1,"facets":[],"protected_org_logins":[],"topics":null,"query_id":"","logged_in":false,"sign_up_path":"/signup?source=code_search_results","sign_in_path":"/login?return_to=https%3A%2F%2Fgithub.com%2Fsearch%3Fq%3Drepo%253Ahiyouga%252FLLaMA-Factory%2B%2Blanguage%253APython","metadata":null,"csrf_tokens":{"/hiyouga/LLaMA-Factory/star":{"post":"llxleL-cyl3Lx4cXhBPS5ezpPX0v_d0OFWWZyB0NEnbvJGBdPbS-Vh5nCDpdbFI8Enwb4zBs_7hWldzwgIyVsw"},"/hiyouga/LLaMA-Factory/unstar":{"post":"aqzqVor44hKCk8XPsB_SwqF-VkjYVvQdp87pcG51jRvvZK-dLzWx1GW1Tk5Muv_AjizZCOwCIK53jjcMujayZQ"},"/sponsors/batch_deferred_sponsor_buttons":{"post":"JbiVQ_764uB8VD_4ciOu8c9LofEHft9eSgLbmewexazkfy206SMktWg1Fs_HJ_NQLWLeMFq9E_BNyI6t_fsq9Q"}}},"title":"Repository search results"}