{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"OFA","owner":"OFA-Sys","isFork":false,"description":"Official repository of OFA (ICML 2022). Paper: OFA: Unifying Architectures, Tasks, and Modalities Through a Simple Sequence-to-Sequence Learning Framework","topicNames":["prompt","chinese","image-captioning","visual-question-answering","multimodal","text-to-image-synthesis","vision-language","pretraining","referring-expression-comprehension","prompt-tuning"],"topicsNotShown":1,"allTopics":["prompt","chinese","image-captioning","visual-question-answering","multimodal","text-to-image-synthesis","vision-language","pretraining","referring-expression-comprehension","prompt-tuning","pretrained-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":3,"issueCount":105,"starsCount":2339,"forksCount":245,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-24T06:20:34.258Z"}},{"type":"Public","name":"gsm8k-ScRel","owner":"OFA-Sys","isFork":false,"description":"Codes and Data for Scaling Relationship on Learning Mathematical Reasoning with Large Language Models","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":4,"starsCount":171,"forksCount":16,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-22T13:40:04.059Z"}},{"type":"Public","name":"Ditto","owner":"OFA-Sys","isFork":false,"description":"A self-ailgnment method for role-play. Benchmark for role-play. Resources for \"Large Language Models are Superpositions of All Characters: Attaining Arbitrary Role-play via Self-Alignment\".","topicNames":["benchmark","roleplay","llm"],"topicsNotShown":0,"allTopics":["benchmark","roleplay","llm"],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":5,"starsCount":113,"forksCount":9,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-01T03:08:11.680Z"}},{"type":"Public","name":"TouchStone","owner":"OFA-Sys","isFork":false,"description":"Touchstone: Evaluating Vision-Language Models by Language Models","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":74,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-18T10:17:57.615Z"}},{"type":"Public","name":"DiverseEvol","owner":"OFA-Sys","isFork":false,"description":"Self-Evolved Diverse Data Sampling for Efficient Instruction Tuning","topicNames":["nlp","efficiency","self-improving","data-centric-ai","large-language-models","instruction-tuning"],"topicsNotShown":0,"allTopics":["nlp","efficiency","self-improving","data-centric-ai","large-language-models","instruction-tuning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":60,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-14T13:54:32.620Z"}},{"type":"Public","name":"ONE-PEACE","owner":"OFA-Sys","isFork":false,"description":"A general representation model across vision, audio, language modalities. Paper: ONE-PEACE: Exploring One General Representation Model Toward Unlimited Modalities","topicNames":["representation-learning","multimodal","vision-and-language","contrastive-loss","vision-language","vision-transformer","foundation-models","audio-language"],"topicsNotShown":0,"allTopics":["representation-learning","multimodal","vision-and-language","contrastive-loss","vision-language","vision-transformer","foundation-models","audio-language"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":8,"starsCount":855,"forksCount":52,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-05T03:58:25.897Z"}},{"type":"Public","name":"Chinese-CLIP","owner":"OFA-Sys","isFork":false,"description":"Chinese version of CLIP which achieves Chinese cross-modal retrieval and representation generation.","topicNames":["nlp","computer-vision","deep-learning","transformers","pytorch","chinese","pretrained-models","multi-modal","clip","coreml-models"],"topicsNotShown":5,"allTopics":["nlp","computer-vision","deep-learning","transformers","pytorch","chinese","pretrained-models","multi-modal","clip","coreml-models","contrastive-loss","vision-language","multi-modal-learning","image-text-retrieval","vision-and-language-pre-training"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":119,"starsCount":3755,"forksCount":403,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-29T16:27:58.517Z"}},{"type":"Public","name":"InsTag","owner":"OFA-Sys","isFork":false,"description":"InsTag: A Tool for Data Analysis in LLM Supervised Fine-tuning","topicNames":["nlp","natural-language-processing","tagging","alignment","llama","large-language-models","llama2"],"topicsNotShown":0,"allTopics":["nlp","natural-language-processing","tagging","alignment","llama","large-language-models","llama2"],"primaryLanguage":null,"pullRequestCount":0,"issueCount":6,"starsCount":127,"forksCount":5,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-08-20T14:57:08.922Z"}},{"type":"Public","name":"ExpertLLaMA","owner":"OFA-Sys","isFork":false,"description":"An opensource ChatBot built with ExpertPrompting which achieves 96% of ChatGPT's capability.","topicNames":["alignment","llama","alpaca","vicuna","chatgpt"],"topicsNotShown":0,"allTopics":["alignment","llama","alpaca","vicuna","chatgpt"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":288,"forksCount":19,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-05-31T09:08:39.191Z"}},{"type":"Public","name":"diffusion-deploy","owner":"OFA-Sys","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":49,"forksCount":6,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-03-12T16:04:00.133Z"}},{"type":"Public archive","name":"MoFusion","owner":"OFA-Sys","isFork":false,"description":"Pretrained Diffusion Models for Unified Human Motion Synthesis","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":null,"pullRequestCount":0,"issueCount":1,"starsCount":18,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-02-28T10:48:30.597Z"}},{"type":"Public","name":"OFASys","owner":"OFA-Sys","isFork":false,"description":"OFASys: A Multi-Modal Multi-Task Learning System for Building Generalist Models","topicNames":["audio","nlp","computer-vision","deep-learning","motion","transformers","pretrained-models","multimodal-learning","vision-and-language","multitask-learning"],"topicsNotShown":1,"allTopics":["audio","nlp","computer-vision","deep-learning","motion","transformers","pretrained-models","multimodal-learning","vision-and-language","multitask-learning","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":7,"starsCount":142,"forksCount":10,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-01-07T09:02:22.634Z"}},{"type":"Public","name":"Demo_CTTS","owner":"OFA-Sys","isFork":false,"description":"present speech demo of FTTS","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"HTML","color":"#e34c26"},"pullRequestCount":0,"issueCount":0,"starsCount":5,"forksCount":0,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-11-04T04:23:50.621Z"}},{"type":"Public","name":"DAFlow","owner":"OFA-Sys","isFork":false,"description":"","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":12,"starsCount":124,"forksCount":16,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-10-09T06:28:57.417Z"}},{"type":"Public","name":"OFA-Compress","owner":"OFA-Sys","isFork":false,"description":"OFA-Compress is a unified framework which provides OFA model finetuning, distillation and inference capabilities in Huggingface version, and is committed to promoting the lightweighting of large models.","topicNames":[],"topicsNotShown":0,"allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":27,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-09-22T05:41:17.464Z"}}],"repositoryCount":15,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"Repositories"}