paddlenlp
1# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14from __future__ import annotations15
16import sys17import unittest18
19from parameterized import parameterized_class20
21from tests.testing_utils import argv_context_guard, load_test_config22
23from .testing_utils import LLMTest24
25
26@parameterized_class(27["model_dir"],28[["llama"], ["chatglm"], ["bloom"], ["chatglm2"], ["qwen"], ["baichuan"]],29)
30class FinetuneTest(LLMTest, unittest.TestCase):31config_path: str = "./tests/fixtures/llm/finetune.yaml"32model_dir: str = None33
34def setUp(self) -> None:35LLMTest.setUp(self)36
37sys.path.insert(0, self.model_dir)38
39def tearDown(self) -> None:40LLMTest.tearDown(self)41
42def test_finetune(self):43finetune_config = load_test_config(self.config_path, "finetune", self.model_dir)44
45finetune_config["dataset_name_or_path"] = self.data_dir46finetune_config["output_dir"] = self.output_dir47
48with argv_context_guard(finetune_config):49from finetune_generation import main50
51main()52
53# TODO(wj-Mcat): disable chatglm2 test temporarily54if self.model_dir not in ["qwen", "baichuan", "chatglm2"]:55self.run_predictor({"inference_model": True})56
57self.run_predictor({"inference_model": False})58