Spaces:
Sleeping
Sleeping
Upload 244 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +25 -0
- Dockerfile +83 -0
- Dockerfile.api +3 -0
- Dockerfile.app +2 -0
- LICENSE +202 -0
- Makefile +39 -0
- Manifest.in +2 -0
- README.md +133 -10
- colabs/AutoTrain.ipynb +50 -0
- colabs/AutoTrain_LLM.ipynb +157 -0
- colabs/AutoTrain_ngrok.ipynb +52 -0
- colabs/image_classification.ipynb +63 -0
- configs/extractive_question_answering/hub_dataset.yml +30 -0
- configs/extractive_question_answering/local_dataset.yml +30 -0
- configs/image_classification/hub_dataset.yml +27 -0
- configs/image_classification/local.yml +27 -0
- configs/image_scoring/hub_dataset.yml +27 -0
- configs/image_scoring/image_quality.yml +27 -0
- configs/image_scoring/local.yml +28 -0
- configs/llm_finetuning/gpt2_sft.yml +32 -0
- configs/llm_finetuning/llama3-70b-orpo-v1.yml +36 -0
- configs/llm_finetuning/llama3-70b-sft.yml +33 -0
- configs/llm_finetuning/llama3-8b-dpo-qlora.yml +36 -0
- configs/llm_finetuning/llama3-8b-orpo-space.yml +36 -0
- configs/llm_finetuning/llama3-8b-orpo.yml +36 -0
- configs/llm_finetuning/llama3-8b-sft-unsloth.yml +36 -0
- configs/llm_finetuning/llama32-1b-sft.yml +34 -0
- configs/llm_finetuning/qwen.yml +34 -0
- configs/llm_finetuning/smollm2.yml +34 -0
- configs/llm_finetuning/smollm2_guanaco.yml +34 -0
- configs/llm_finetuning/smollm2_orpo.yml +36 -0
- configs/object_detection/hub_dataset.yml +31 -0
- configs/object_detection/local.yml +31 -0
- configs/sentence_transformers/local_dataset.yml +29 -0
- configs/sentence_transformers/pair.yml +28 -0
- configs/sentence_transformers/pair_class.yml +29 -0
- configs/sentence_transformers/pair_score.yml +29 -0
- configs/sentence_transformers/qa.yml +28 -0
- configs/sentence_transformers/triplet.yml +29 -0
- configs/seq2seq/hub_dataset.yml +28 -0
- configs/seq2seq/local.yml +29 -0
- configs/text_classification/hub_dataset.yml +28 -0
- configs/text_classification/local_dataset.yml +28 -0
- configs/text_regression/hub_dataset.yml +28 -0
- configs/text_regression/local_dataset.yml +28 -0
- configs/token_classification/hub_dataset.yml +28 -0
- configs/token_classification/local_dataset.yml +28 -0
- configs/vlm/paligemma_vqa.yml +30 -0
- docs/README.md +58 -0
- docs/source/_toctree.yml +46 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,28 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
static/autotrain_homepage.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
static/autotrain_model_choice.png filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
static/autotrain_space.png filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
static/autotrain_text_classification.png filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
static/cost.png filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
static/dreambooth1.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
static/dreambooth2.png filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
static/duplicate_space.png filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
static/ext_qa.png filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
static/hub_model_choice.png filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
static/image_classification_1.png filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
static/img_reg_ui.png filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
static/llm_1.png filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
static/llm_2.png filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
static/llm_3.png filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
static/llm_orpo_example.png filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
static/model_choice_1.png filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
static/param_choice_1.png filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
static/param_choice_2.png filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
static/space_template_1.png filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
static/space_template_2.png filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
static/space_template_3.png filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
static/space_template_4.png filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
static/space_template_5.png filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
static/text_classification_1.png filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM nvidia/cuda:12.1.1-cudnn8-runtime-ubuntu22.04
|
| 2 |
+
|
| 3 |
+
ENV DEBIAN_FRONTEND=noninteractive \
|
| 4 |
+
TZ=UTC \
|
| 5 |
+
HF_HUB_ENABLE_HF_TRANSFER=1
|
| 6 |
+
|
| 7 |
+
ENV PATH="${HOME}/miniconda3/bin:${PATH}"
|
| 8 |
+
ARG PATH="${HOME}/miniconda3/bin:${PATH}"
|
| 9 |
+
ENV PATH="/app/ngc-cli:${PATH}"
|
| 10 |
+
ARG PATH="/app/ngc-cli:${PATH}"
|
| 11 |
+
|
| 12 |
+
RUN mkdir -p /tmp/model && \
|
| 13 |
+
chown -R 1000:1000 /tmp/model && \
|
| 14 |
+
mkdir -p /tmp/data && \
|
| 15 |
+
chown -R 1000:1000 /tmp/data
|
| 16 |
+
|
| 17 |
+
RUN apt-get update && \
|
| 18 |
+
apt-get upgrade -y && \
|
| 19 |
+
apt-get install -y \
|
| 20 |
+
build-essential \
|
| 21 |
+
cmake \
|
| 22 |
+
curl \
|
| 23 |
+
ca-certificates \
|
| 24 |
+
gcc \
|
| 25 |
+
git \
|
| 26 |
+
locales \
|
| 27 |
+
net-tools \
|
| 28 |
+
wget \
|
| 29 |
+
libpq-dev \
|
| 30 |
+
libsndfile1-dev \
|
| 31 |
+
git \
|
| 32 |
+
git-lfs \
|
| 33 |
+
libgl1 \
|
| 34 |
+
unzip \
|
| 35 |
+
libjpeg-dev \
|
| 36 |
+
libpng-dev \
|
| 37 |
+
libgomp1 \
|
| 38 |
+
&& rm -rf /var/lib/apt/lists/* && \
|
| 39 |
+
apt-get clean
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
RUN curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash && \
|
| 43 |
+
git lfs install
|
| 44 |
+
|
| 45 |
+
WORKDIR /app
|
| 46 |
+
RUN mkdir -p /app/.cache
|
| 47 |
+
ENV HF_HOME="/app/.cache"
|
| 48 |
+
RUN useradd -m -u 1000 user
|
| 49 |
+
RUN chown -R user:user /app
|
| 50 |
+
USER user
|
| 51 |
+
ENV HOME=/app
|
| 52 |
+
|
| 53 |
+
ENV PYTHONPATH=$HOME/app \
|
| 54 |
+
PYTHONUNBUFFERED=1 \
|
| 55 |
+
GRADIO_ALLOW_FLAGGING=never \
|
| 56 |
+
GRADIO_NUM_PORTS=1 \
|
| 57 |
+
GRADIO_SERVER_NAME=0.0.0.0 \
|
| 58 |
+
SYSTEM=spaces
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \
|
| 62 |
+
&& sh Miniconda3-latest-Linux-x86_64.sh -b -p /app/miniconda \
|
| 63 |
+
&& rm -f Miniconda3-latest-Linux-x86_64.sh
|
| 64 |
+
ENV PATH /app/miniconda/bin:$PATH
|
| 65 |
+
|
| 66 |
+
RUN conda create -p /app/env -y python=3.10
|
| 67 |
+
|
| 68 |
+
SHELL ["conda", "run","--no-capture-output", "-p","/app/env", "/bin/bash", "-c"]
|
| 69 |
+
|
| 70 |
+
RUN conda install pytorch==2.4.0 torchvision==0.19.0 torchaudio==2.4.0 pytorch-cuda=12.1 -c pytorch -c nvidia && \
|
| 71 |
+
conda clean -ya && \
|
| 72 |
+
conda install -c "nvidia/label/cuda-12.1.1" cuda-nvcc && conda clean -ya && \
|
| 73 |
+
conda install xformers -c xformers && conda clean -ya
|
| 74 |
+
|
| 75 |
+
COPY --chown=1000:1000 . /app/
|
| 76 |
+
|
| 77 |
+
RUN pip install -e . && \
|
| 78 |
+
python -m nltk.downloader punkt && \
|
| 79 |
+
pip install -U ninja && \
|
| 80 |
+
pip install -U flash-attn --no-build-isolation && \
|
| 81 |
+
pip install -U deepspeed && \
|
| 82 |
+
pip install --upgrade --force-reinstall --no-cache-dir "unsloth[cu121-ampere-torch230] @ git+https://github.com/unslothai/unsloth.git" --no-deps && \
|
| 83 |
+
pip cache purge
|
Dockerfile.api
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM huggingface/autotrain-advanced:latest
|
| 2 |
+
|
| 3 |
+
CMD autotrain api --port 7860 --host 0.0.0.0
|
Dockerfile.app
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM huggingface/autotrain-advanced:latest
|
| 2 |
+
CMD uvicorn autotrain.app:app --host 0.0.0.0 --port 7860 --reload --workers 4
|
LICENSE
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
Apache License
|
| 3 |
+
Version 2.0, January 2004
|
| 4 |
+
http://www.apache.org/licenses/
|
| 5 |
+
|
| 6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 7 |
+
|
| 8 |
+
1. Definitions.
|
| 9 |
+
|
| 10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 12 |
+
|
| 13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 14 |
+
the copyright owner that is granting the License.
|
| 15 |
+
|
| 16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 17 |
+
other entities that control, are controlled by, or are under common
|
| 18 |
+
control with that entity. For the purposes of this definition,
|
| 19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 20 |
+
direction or management of such entity, whether by contract or
|
| 21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 23 |
+
|
| 24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 25 |
+
exercising permissions granted by this License.
|
| 26 |
+
|
| 27 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 28 |
+
including but not limited to software source code, documentation
|
| 29 |
+
source, and configuration files.
|
| 30 |
+
|
| 31 |
+
"Object" form shall mean any form resulting from mechanical
|
| 32 |
+
transformation or translation of a Source form, including but
|
| 33 |
+
not limited to compiled object code, generated documentation,
|
| 34 |
+
and conversions to other media types.
|
| 35 |
+
|
| 36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 37 |
+
Object form, made available under the License, as indicated by a
|
| 38 |
+
copyright notice that is included in or attached to the work
|
| 39 |
+
(an example is provided in the Appendix below).
|
| 40 |
+
|
| 41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 42 |
+
form, that is based on (or derived from) the Work and for which the
|
| 43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 45 |
+
of this License, Derivative Works shall not include works that remain
|
| 46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 47 |
+
the Work and Derivative Works thereof.
|
| 48 |
+
|
| 49 |
+
"Contribution" shall mean any work of authorship, including
|
| 50 |
+
the original version of the Work and any modifications or additions
|
| 51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 55 |
+
means any form of electronic, verbal, or written communication sent
|
| 56 |
+
to the Licensor or its representatives, including but not limited to
|
| 57 |
+
communication on electronic mailing lists, source code control systems,
|
| 58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 60 |
+
excluding communication that is conspicuously marked or otherwise
|
| 61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 62 |
+
|
| 63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 65 |
+
subsequently incorporated within the Work.
|
| 66 |
+
|
| 67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 72 |
+
Work and such Derivative Works in Source or Object form.
|
| 73 |
+
|
| 74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 77 |
+
(except as stated in this section) patent license to make, have made,
|
| 78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 79 |
+
where such license applies only to those patent claims licensable
|
| 80 |
+
by such Contributor that are necessarily infringed by their
|
| 81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 83 |
+
institute patent litigation against any entity (including a
|
| 84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 85 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 86 |
+
or contributory patent infringement, then any patent licenses
|
| 87 |
+
granted to You under this License for that Work shall terminate
|
| 88 |
+
as of the date such litigation is filed.
|
| 89 |
+
|
| 90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 91 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 92 |
+
modifications, and in Source or Object form, provided that You
|
| 93 |
+
meet the following conditions:
|
| 94 |
+
|
| 95 |
+
(a) You must give any other recipients of the Work or
|
| 96 |
+
Derivative Works a copy of this License; and
|
| 97 |
+
|
| 98 |
+
(b) You must cause any modified files to carry prominent notices
|
| 99 |
+
stating that You changed the files; and
|
| 100 |
+
|
| 101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 102 |
+
that You distribute, all copyright, patent, trademark, and
|
| 103 |
+
attribution notices from the Source form of the Work,
|
| 104 |
+
excluding those notices that do not pertain to any part of
|
| 105 |
+
the Derivative Works; and
|
| 106 |
+
|
| 107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 108 |
+
distribution, then any Derivative Works that You distribute must
|
| 109 |
+
include a readable copy of the attribution notices contained
|
| 110 |
+
within such NOTICE file, excluding those notices that do not
|
| 111 |
+
pertain to any part of the Derivative Works, in at least one
|
| 112 |
+
of the following places: within a NOTICE text file distributed
|
| 113 |
+
as part of the Derivative Works; within the Source form or
|
| 114 |
+
documentation, if provided along with the Derivative Works; or,
|
| 115 |
+
within a display generated by the Derivative Works, if and
|
| 116 |
+
wherever such third-party notices normally appear. The contents
|
| 117 |
+
of the NOTICE file are for informational purposes only and
|
| 118 |
+
do not modify the License. You may add Your own attribution
|
| 119 |
+
notices within Derivative Works that You distribute, alongside
|
| 120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 121 |
+
that such additional attribution notices cannot be construed
|
| 122 |
+
as modifying the License.
|
| 123 |
+
|
| 124 |
+
You may add Your own copyright statement to Your modifications and
|
| 125 |
+
may provide additional or different license terms and conditions
|
| 126 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 127 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 128 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 129 |
+
the conditions stated in this License.
|
| 130 |
+
|
| 131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 133 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 134 |
+
this License, without any additional terms or conditions.
|
| 135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 136 |
+
the terms of any separate license agreement you may have executed
|
| 137 |
+
with Licensor regarding such Contributions.
|
| 138 |
+
|
| 139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 141 |
+
except as required for reasonable and customary use in describing the
|
| 142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 143 |
+
|
| 144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 145 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 148 |
+
implied, including, without limitation, any warranties or conditions
|
| 149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 151 |
+
appropriateness of using or redistributing the Work and assume any
|
| 152 |
+
risks associated with Your exercise of permissions under this License.
|
| 153 |
+
|
| 154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 155 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 156 |
+
unless required by applicable law (such as deliberate and grossly
|
| 157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 158 |
+
liable to You for damages, including any direct, indirect, special,
|
| 159 |
+
incidental, or consequential damages of any character arising as a
|
| 160 |
+
result of this License or out of the use or inability to use the
|
| 161 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 162 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 163 |
+
other commercial damages or losses), even if such Contributor
|
| 164 |
+
has been advised of the possibility of such damages.
|
| 165 |
+
|
| 166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 169 |
+
or other liability obligations and/or rights consistent with this
|
| 170 |
+
License. However, in accepting such obligations, You may act only
|
| 171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 172 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 173 |
+
defend, and hold each Contributor harmless for any liability
|
| 174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 175 |
+
of your accepting any such warranty or additional liability.
|
| 176 |
+
|
| 177 |
+
END OF TERMS AND CONDITIONS
|
| 178 |
+
|
| 179 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 180 |
+
|
| 181 |
+
To apply the Apache License to your work, attach the following
|
| 182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 183 |
+
replaced with your own identifying information. (Don't include
|
| 184 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 185 |
+
comment syntax for the file format. We also recommend that a
|
| 186 |
+
file or class name and description of purpose be included on the
|
| 187 |
+
same "printed page" as the copyright notice for easier
|
| 188 |
+
identification within third-party archives.
|
| 189 |
+
|
| 190 |
+
Copyright [yyyy] [name of copyright owner]
|
| 191 |
+
|
| 192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 193 |
+
you may not use this file except in compliance with the License.
|
| 194 |
+
You may obtain a copy of the License at
|
| 195 |
+
|
| 196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 197 |
+
|
| 198 |
+
Unless required by applicable law or agreed to in writing, software
|
| 199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 201 |
+
See the License for the specific language governing permissions and
|
| 202 |
+
limitations under the License.
|
Makefile
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.PHONY: quality style test
|
| 2 |
+
|
| 3 |
+
# Check that source code meets quality standards
|
| 4 |
+
|
| 5 |
+
quality:
|
| 6 |
+
black --check --line-length 119 --target-version py38 .
|
| 7 |
+
isort --check-only .
|
| 8 |
+
flake8 --max-line-length 119
|
| 9 |
+
|
| 10 |
+
# Format source code automatically
|
| 11 |
+
|
| 12 |
+
style:
|
| 13 |
+
black --line-length 119 --target-version py38 .
|
| 14 |
+
isort .
|
| 15 |
+
|
| 16 |
+
test:
|
| 17 |
+
pytest -sv ./src/
|
| 18 |
+
|
| 19 |
+
docker:
|
| 20 |
+
docker build -t autotrain-advanced:latest .
|
| 21 |
+
docker tag autotrain-advanced:latest huggingface/autotrain-advanced:latest
|
| 22 |
+
docker push huggingface/autotrain-advanced:latest
|
| 23 |
+
|
| 24 |
+
api:
|
| 25 |
+
docker build -t autotrain-advanced-api:latest -f Dockerfile.api .
|
| 26 |
+
docker tag autotrain-advanced-api:latest public.ecr.aws/z4c3o6n6/autotrain-api:latest
|
| 27 |
+
docker push public.ecr.aws/z4c3o6n6/autotrain-api:latest
|
| 28 |
+
|
| 29 |
+
ngc:
|
| 30 |
+
docker build -t autotrain-advanced:latest .
|
| 31 |
+
docker tag autotrain-advanced:latest nvcr.io/ycymhzotssoi/autotrain-advanced:latest
|
| 32 |
+
docker push nvcr.io/ycymhzotssoi/autotrain-advanced:latest
|
| 33 |
+
|
| 34 |
+
pip:
|
| 35 |
+
rm -rf build/
|
| 36 |
+
rm -rf dist/
|
| 37 |
+
make style && make quality
|
| 38 |
+
python setup.py sdist bdist_wheel
|
| 39 |
+
twine upload dist/* --verbose --repository autotrain-advanced
|
Manifest.in
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
recursive-include src/autotrain/static *
|
| 2 |
+
recursive-include src/autotrain/templates *
|
README.md
CHANGED
|
@@ -1,10 +1,133 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🤗 AutoTrain Advanced
|
| 2 |
+
|
| 3 |
+
AutoTrain Advanced: faster and easier training and deployments of state-of-the-art machine learning models. AutoTrain Advanced is a no-code solution that allows you to train machine learning models in just a few clicks. Please note that you must upload data in correct format for project to be created. For help regarding proper data format and pricing, check out the documentation.
|
| 4 |
+
|
| 5 |
+
NOTE: AutoTrain is free! You only pay for the resources you use in case you decide to run AutoTrain on Hugging Face Spaces. When running locally, you only pay for the resources you use on your own infrastructure.
|
| 6 |
+
|
| 7 |
+
## Supported Tasks
|
| 8 |
+
|
| 9 |
+
| Task | Status | Python Notebook | Example Configs |
|
| 10 |
+
| --- | --- | --- | --- |
|
| 11 |
+
| LLM SFT Finetuning | ✅ | [](https://colab.research.google.com/github/huggingface/autotrain-advanced/blob/main/notebooks/llm_finetuning.ipynb) | [llm_sft_finetune.yaml](https://github.com/huggingface/autotrain-advanced/blob/main/configs/llm_finetuning/smollm2.yml) |
|
| 12 |
+
| LLM ORPO Finetuning | ✅ | [](https://colab.research.google.com/github/huggingface/autotrain-advanced/blob/main/notebooks/llm_finetuning.ipynb) | [llm_orpo_finetune.yaml](https://github.com/huggingface/autotrain-advanced/blob/main/configs/llm_finetuning/llama3-8b-orpo.yml) |
|
| 13 |
+
| LLM DPO Finetuning | ✅ | [](https://colab.research.google.com/github/huggingface/autotrain-advanced/blob/main/notebooks/llm_finetuning.ipynb) | [llm_dpo_finetune.yaml](https://github.com/huggingface/autotrain-advanced/blob/main/configs/llm_finetuning/llama3-8b-dpo-qlora.yml) |
|
| 14 |
+
| LLM Reward Finetuning | ✅ | [](https://colab.research.google.com/github/huggingface/autotrain-advanced/blob/main/notebooks/llm_finetuning.ipynb) | [llm_reward_finetune.yaml](https://github.com/huggingface/autotrain-advanced/blob/main/configs/llm_finetuning/llama32-1b-sft.yml) |
|
| 15 |
+
| LLM Generic/Default Finetuning | ✅ | [](https://colab.research.google.com/github/huggingface/autotrain-advanced/blob/main/notebooks/llm_finetuning.ipynb) | [llm_generic_finetune.yaml](https://github.com/huggingface/autotrain-advanced/blob/main/configs/llm_finetuning/gpt2_sft.yml) |
|
| 16 |
+
| Text Classification | ✅ | [](https://colab.research.google.com/github/huggingface/autotrain-advanced/blob/main/notebooks/text_classification.ipynb) | [text_classification.yaml](https://github.com/huggingface/autotrain-advanced/tree/main/configs/text_classification) |
|
| 17 |
+
| Text Regression | ✅ | [](https://colab.research.google.com/github/huggingface/autotrain-advanced/blob/main/notebooks/text_regression.ipynb) | [text_regression.yaml](https://github.com/huggingface/autotrain-advanced/tree/main/configs/text_regression) |
|
| 18 |
+
| Token Classification | ✅ | Coming Soon | [token_classification.yaml](https://github.com/huggingface/autotrain-advanced/tree/main/configs/token_classification) |
|
| 19 |
+
| Seq2Seq | ✅ | Coming Soon | [seq2seq.yaml](https://github.com/huggingface/autotrain-advanced/tree/main/configs/seq2seq) |
|
| 20 |
+
| Extractive Question Answering | ✅ | Coming Soon | [extractive_qa.yaml](https://github.com/huggingface/autotrain-advanced/tree/main/configs/extractive_question_answering) |
|
| 21 |
+
| Image Classification | ✅ | Coming Soon | [image_classification.yaml](https://github.com/huggingface/autotrain-advanced/tree/main/configs/image_classification) |
|
| 22 |
+
| Image Scoring/Regression | ✅ | Coming Soon | [image_regression.yaml](https://github.com/huggingface/autotrain-advanced/tree/main/configs/image_scoring) |
|
| 23 |
+
| VLM | 🟥 | Coming Soon | [vlm.yaml](https://github.com/huggingface/autotrain-advanced/tree/main/configs/vlm) |
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
## Running UI on Colab or Hugging Face Spaces
|
| 27 |
+
|
| 28 |
+
- Deploy AutoTrain on Hugging Face Spaces: [](https://huggingface.co/login?next=%2Fspaces%2Fautotrain-projects%2Fautotrain-advanced%3Fduplicate%3Dtrue)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
- Run AutoTrain UI on Colab via ngrok: [](https://colab.research.google.com/github/huggingface/autotrain-advanced/blob/main/colabs/AutoTrain_ngrok.ipynb)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
## Local Installation
|
| 35 |
+
|
| 36 |
+
You can Install AutoTrain-Advanced python package via PIP. Please note you will need python >= 3.10 for AutoTrain Advanced to work properly.
|
| 37 |
+
|
| 38 |
+
pip install autotrain-advanced
|
| 39 |
+
|
| 40 |
+
Please make sure that you have git lfs installed. Check out the instructions here: https://github.com/git-lfs/git-lfs/wiki/Installation
|
| 41 |
+
|
| 42 |
+
You also need to install torch, torchaudio and torchvision.
|
| 43 |
+
|
| 44 |
+
The best way to run autotrain is in a conda environment. You can create a new conda environment with the following command:
|
| 45 |
+
|
| 46 |
+
conda create -n autotrain python=3.10
|
| 47 |
+
conda activate autotrain
|
| 48 |
+
pip install autotrain-advanced
|
| 49 |
+
conda install pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia
|
| 50 |
+
conda install -c "nvidia/label/cuda-12.1.0" cuda-nvcc
|
| 51 |
+
|
| 52 |
+
Once done, you can start the application using:
|
| 53 |
+
|
| 54 |
+
autotrain app --port 8080 --host 127.0.0.1
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
If you are not fond of UI, you can use AutoTrain Configs to train using command line or simply AutoTrain CLI.
|
| 58 |
+
|
| 59 |
+
To use config file for training, you can use the following command:
|
| 60 |
+
|
| 61 |
+
autotrain --config <path_to_config_file>
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
You can find sample config files in the `configs` directory of this repository.
|
| 65 |
+
|
| 66 |
+
Example config file for finetuning SmolLM2:
|
| 67 |
+
|
| 68 |
+
```yaml
|
| 69 |
+
task: llm-sft
|
| 70 |
+
base_model: HuggingFaceTB/SmolLM2-1.7B-Instruct
|
| 71 |
+
project_name: autotrain-smollm2-finetune
|
| 72 |
+
log: tensorboard
|
| 73 |
+
backend: local
|
| 74 |
+
|
| 75 |
+
data:
|
| 76 |
+
path: HuggingFaceH4/no_robots
|
| 77 |
+
train_split: train
|
| 78 |
+
valid_split: null
|
| 79 |
+
chat_template: tokenizer
|
| 80 |
+
column_mapping:
|
| 81 |
+
text_column: messages
|
| 82 |
+
|
| 83 |
+
params:
|
| 84 |
+
block_size: 2048
|
| 85 |
+
model_max_length: 4096
|
| 86 |
+
epochs: 2
|
| 87 |
+
batch_size: 1
|
| 88 |
+
lr: 1e-5
|
| 89 |
+
peft: true
|
| 90 |
+
quantization: int4
|
| 91 |
+
target_modules: all-linear
|
| 92 |
+
padding: right
|
| 93 |
+
optimizer: paged_adamw_8bit
|
| 94 |
+
scheduler: linear
|
| 95 |
+
gradient_accumulation: 8
|
| 96 |
+
mixed_precision: bf16
|
| 97 |
+
merge_adapter: true
|
| 98 |
+
|
| 99 |
+
hub:
|
| 100 |
+
username: ${HF_USERNAME}
|
| 101 |
+
token: ${HF_TOKEN}
|
| 102 |
+
push_to_hub: true
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
To fine-tune a model using the config file above, you can use the following command:
|
| 106 |
+
|
| 107 |
+
```bash
|
| 108 |
+
$ export HF_USERNAME=<your_hugging_face_username>
|
| 109 |
+
$ export HF_TOKEN=<your_hugging_face_write_token>
|
| 110 |
+
$ autotrain --config <path_to_config_file>
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
## Documentation
|
| 115 |
+
|
| 116 |
+
Documentation is available at https://hf.co/docs/autotrain/
|
| 117 |
+
|
| 118 |
+
## Citation
|
| 119 |
+
|
| 120 |
+
```
|
| 121 |
+
@inproceedings{thakur-2024-autotrain,
|
| 122 |
+
title = "{A}uto{T}rain: No-code training for state-of-the-art models",
|
| 123 |
+
author = "Thakur, Abhishek",
|
| 124 |
+
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
|
| 125 |
+
month = nov,
|
| 126 |
+
year = "2024",
|
| 127 |
+
address = "Miami, Florida, USA",
|
| 128 |
+
publisher = "Association for Computational Linguistics",
|
| 129 |
+
url = "https://aclanthology.org/2024.emnlp-demo.44",
|
| 130 |
+
pages = "419--423",
|
| 131 |
+
abstract = "With the advancements in open-source models, training(or finetuning) models on custom datasets has become a crucial part of developing solutions which are tailored to specific industrial or open-source applications. Yet, there is no single tool which simplifies the process of training across different types of modalities or tasks.We introduce AutoTrain(aka AutoTrain Advanced){---}an open-source, no code tool/library which can be used to train (or finetune) models for different kinds of tasks such as: large language model (LLM) finetuning, text classification/regression, token classification, sequence-to-sequence task, finetuning of sentence transformers, visual language model (VLM) finetuning, image classification/regression and even classification and regression tasks on tabular data. AutoTrain Advanced is an open-source library providing best practices for training models on custom datasets. The library is available at https://github.com/huggingface/autotrain-advanced. AutoTrain can be used in fully local mode or on cloud machines and works with tens of thousands of models shared on Hugging Face Hub and their variations.",
|
| 132 |
+
}
|
| 133 |
+
```
|
colabs/AutoTrain.ipynb
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {},
|
| 6 |
+
"source": [
|
| 7 |
+
"<img src=\"https://raw.githubusercontent.com/huggingface/autotrain-advanced/main/src/autotrain/app/static/logo.png\" alt=\"AutoTrain\" width=\"200\"/>\n",
|
| 8 |
+
"\n",
|
| 9 |
+
"- Attach proper hardware\n",
|
| 10 |
+
"- Click Runtime > Run all\n",
|
| 11 |
+
"- Read the [docs](https://hf.co/docs/autotrain) for data format, parameters and other questions\n",
|
| 12 |
+
"- GitHub Repo: https://github.com/huggingface/autotrain-advanced"
|
| 13 |
+
]
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"cell_type": "code",
|
| 17 |
+
"execution_count": null,
|
| 18 |
+
"metadata": {},
|
| 19 |
+
"outputs": [],
|
| 20 |
+
"source": [
|
| 21 |
+
"!pip install -U autotrain-advanced > install_logs.txt 2>&1\n",
|
| 22 |
+
"from IPython.display import display\n",
|
| 23 |
+
"from autotrain.app.colab import colab_app\n",
|
| 24 |
+
"elements = colab_app()\n",
|
| 25 |
+
"display(elements)"
|
| 26 |
+
]
|
| 27 |
+
}
|
| 28 |
+
],
|
| 29 |
+
"metadata": {
|
| 30 |
+
"kernelspec": {
|
| 31 |
+
"display_name": "autotrain",
|
| 32 |
+
"language": "python",
|
| 33 |
+
"name": "python3"
|
| 34 |
+
},
|
| 35 |
+
"language_info": {
|
| 36 |
+
"codemirror_mode": {
|
| 37 |
+
"name": "ipython",
|
| 38 |
+
"version": 3
|
| 39 |
+
},
|
| 40 |
+
"file_extension": ".py",
|
| 41 |
+
"mimetype": "text/x-python",
|
| 42 |
+
"name": "python",
|
| 43 |
+
"nbconvert_exporter": "python",
|
| 44 |
+
"pygments_lexer": "ipython3",
|
| 45 |
+
"version": "3.1.-1"
|
| 46 |
+
}
|
| 47 |
+
},
|
| 48 |
+
"nbformat": 4,
|
| 49 |
+
"nbformat_minor": 2
|
| 50 |
+
}
|
colabs/AutoTrain_LLM.ipynb
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": null,
|
| 6 |
+
"metadata": {
|
| 7 |
+
"cellView": "form",
|
| 8 |
+
"collapsed": true,
|
| 9 |
+
"id": "JvMRbVLEJlZT"
|
| 10 |
+
},
|
| 11 |
+
"outputs": [],
|
| 12 |
+
"source": [
|
| 13 |
+
"#@title 🤗 AutoTrain LLM\n",
|
| 14 |
+
"#@markdown In order to use this colab\n",
|
| 15 |
+
"#@markdown - upload train.csv to a folder named `data/`\n",
|
| 16 |
+
"#@markdown - train.csv must contain a `text` column\n",
|
| 17 |
+
"#@markdown - choose a project name if you wish\n",
|
| 18 |
+
"#@markdown - change model if you wish, you can use most of the text-generation models from Hugging Face Hub\n",
|
| 19 |
+
"#@markdown - add huggingface information (token) if you wish to push trained model to huggingface hub\n",
|
| 20 |
+
"#@markdown - update hyperparameters if you wish\n",
|
| 21 |
+
"#@markdown - click `Runtime > Run all` or run each cell individually\n",
|
| 22 |
+
"#@markdown - report issues / feature requests here: https://github.com/huggingface/autotrain-advanced/issues\n",
|
| 23 |
+
"\n",
|
| 24 |
+
"\n",
|
| 25 |
+
"import os\n",
|
| 26 |
+
"!pip install -U autotrain-advanced > install_logs.txt 2>&1\n",
|
| 27 |
+
"!autotrain setup --colab > setup_logs.txt\n",
|
| 28 |
+
"from autotrain import __version__\n",
|
| 29 |
+
"print(f'AutoTrain version: {__version__}')"
|
| 30 |
+
]
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"cell_type": "code",
|
| 34 |
+
"execution_count": null,
|
| 35 |
+
"metadata": {
|
| 36 |
+
"cellView": "form",
|
| 37 |
+
"id": "A2-_lkBS1WKA"
|
| 38 |
+
},
|
| 39 |
+
"outputs": [],
|
| 40 |
+
"source": [
|
| 41 |
+
"#@markdown ---\n",
|
| 42 |
+
"#@markdown #### Project Config\n",
|
| 43 |
+
"#@markdown Note: if you are using a restricted/private model, you need to enter your Hugging Face token in the next step.\n",
|
| 44 |
+
"project_name = 'my-autotrain-llm' # @param {type:\"string\"}\n",
|
| 45 |
+
"model_name = 'abhishek/llama-2-7b-hf-small-shards' # @param {type:\"string\"}\n",
|
| 46 |
+
"\n",
|
| 47 |
+
"#@markdown ---\n",
|
| 48 |
+
"#@markdown #### Push to Hub?\n",
|
| 49 |
+
"#@markdown Use these only if you want to push your trained model to a private repo in your Hugging Face Account\n",
|
| 50 |
+
"#@markdown If you dont use these, the model will be saved in Google Colab and you are required to download it manually.\n",
|
| 51 |
+
"#@markdown Please enter your Hugging Face write token. The trained model will be saved to your Hugging Face account.\n",
|
| 52 |
+
"#@markdown You can find your token here: https://huggingface.co/settings/tokens\n",
|
| 53 |
+
"push_to_hub = False # @param [\"False\", \"True\"] {type:\"raw\"}\n",
|
| 54 |
+
"hf_token = \"hf_XXX\" #@param {type:\"string\"}\n",
|
| 55 |
+
"hf_username = \"abc\" #@param {type:\"string\"}\n",
|
| 56 |
+
"\n",
|
| 57 |
+
"#@markdown ---\n",
|
| 58 |
+
"#@markdown #### Hyperparameters\n",
|
| 59 |
+
"unsloth = False # @param [\"False\", \"True\"] {type:\"raw\"}\n",
|
| 60 |
+
"learning_rate = 2e-4 # @param {type:\"number\"}\n",
|
| 61 |
+
"num_epochs = 1 #@param {type:\"number\"}\n",
|
| 62 |
+
"batch_size = 1 # @param {type:\"slider\", min:1, max:32, step:1}\n",
|
| 63 |
+
"block_size = 1024 # @param {type:\"number\"}\n",
|
| 64 |
+
"trainer = \"sft\" # @param [\"generic\", \"sft\"] {type:\"string\"}\n",
|
| 65 |
+
"warmup_ratio = 0.1 # @param {type:\"number\"}\n",
|
| 66 |
+
"weight_decay = 0.01 # @param {type:\"number\"}\n",
|
| 67 |
+
"gradient_accumulation = 4 # @param {type:\"number\"}\n",
|
| 68 |
+
"mixed_precision = \"fp16\" # @param [\"fp16\", \"bf16\", \"none\"] {type:\"string\"}\n",
|
| 69 |
+
"peft = True # @param [\"False\", \"True\"] {type:\"raw\"}\n",
|
| 70 |
+
"quantization = \"int4\" # @param [\"int4\", \"int8\", \"none\"] {type:\"string\"}\n",
|
| 71 |
+
"lora_r = 16 #@param {type:\"number\"}\n",
|
| 72 |
+
"lora_alpha = 32 #@param {type:\"number\"}\n",
|
| 73 |
+
"lora_dropout = 0.05 #@param {type:\"number\"}\n",
|
| 74 |
+
"\n",
|
| 75 |
+
"os.environ[\"HF_TOKEN\"] = hf_token\n",
|
| 76 |
+
"os.environ[\"HF_USERNAME\"] = hf_username\n",
|
| 77 |
+
"\n",
|
| 78 |
+
"conf = f\"\"\"\n",
|
| 79 |
+
"task: llm-{trainer}\n",
|
| 80 |
+
"base_model: {model_name}\n",
|
| 81 |
+
"project_name: {project_name}\n",
|
| 82 |
+
"log: tensorboard\n",
|
| 83 |
+
"backend: local\n",
|
| 84 |
+
"\n",
|
| 85 |
+
"data:\n",
|
| 86 |
+
" path: data/\n",
|
| 87 |
+
" train_split: train\n",
|
| 88 |
+
" valid_split: null\n",
|
| 89 |
+
" chat_template: null\n",
|
| 90 |
+
" column_mapping:\n",
|
| 91 |
+
" text_column: text\n",
|
| 92 |
+
"\n",
|
| 93 |
+
"params:\n",
|
| 94 |
+
" block_size: {block_size}\n",
|
| 95 |
+
" lr: {learning_rate}\n",
|
| 96 |
+
" warmup_ratio: {warmup_ratio}\n",
|
| 97 |
+
" weight_decay: {weight_decay}\n",
|
| 98 |
+
" epochs: {num_epochs}\n",
|
| 99 |
+
" batch_size: {batch_size}\n",
|
| 100 |
+
" gradient_accumulation: {gradient_accumulation}\n",
|
| 101 |
+
" mixed_precision: {mixed_precision}\n",
|
| 102 |
+
" peft: {peft}\n",
|
| 103 |
+
" quantization: {quantization}\n",
|
| 104 |
+
" lora_r: {lora_r}\n",
|
| 105 |
+
" lora_alpha: {lora_alpha}\n",
|
| 106 |
+
" lora_dropout: {lora_dropout}\n",
|
| 107 |
+
" unsloth: {unsloth}\n",
|
| 108 |
+
"\n",
|
| 109 |
+
"hub:\n",
|
| 110 |
+
" username: ${{HF_USERNAME}}\n",
|
| 111 |
+
" token: ${{HF_TOKEN}}\n",
|
| 112 |
+
" push_to_hub: {push_to_hub}\n",
|
| 113 |
+
"\"\"\"\n",
|
| 114 |
+
"\n",
|
| 115 |
+
"with open(\"conf.yaml\", \"w\") as f:\n",
|
| 116 |
+
" f.write(conf)"
|
| 117 |
+
]
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"cell_type": "code",
|
| 121 |
+
"execution_count": null,
|
| 122 |
+
"metadata": {
|
| 123 |
+
"collapsed": true,
|
| 124 |
+
"id": "g3cd_ED_yXXt"
|
| 125 |
+
},
|
| 126 |
+
"outputs": [],
|
| 127 |
+
"source": [
|
| 128 |
+
"!autotrain --config conf.yaml"
|
| 129 |
+
]
|
| 130 |
+
}
|
| 131 |
+
],
|
| 132 |
+
"metadata": {
|
| 133 |
+
"accelerator": "GPU",
|
| 134 |
+
"colab": {
|
| 135 |
+
"gpuType": "T4",
|
| 136 |
+
"provenance": []
|
| 137 |
+
},
|
| 138 |
+
"kernelspec": {
|
| 139 |
+
"display_name": "Python 3",
|
| 140 |
+
"name": "python3"
|
| 141 |
+
},
|
| 142 |
+
"language_info": {
|
| 143 |
+
"codemirror_mode": {
|
| 144 |
+
"name": "ipython",
|
| 145 |
+
"version": 3
|
| 146 |
+
},
|
| 147 |
+
"file_extension": ".py",
|
| 148 |
+
"mimetype": "text/x-python",
|
| 149 |
+
"name": "python",
|
| 150 |
+
"nbconvert_exporter": "python",
|
| 151 |
+
"pygments_lexer": "ipython3",
|
| 152 |
+
"version": "3.10.14"
|
| 153 |
+
}
|
| 154 |
+
},
|
| 155 |
+
"nbformat": 4,
|
| 156 |
+
"nbformat_minor": 0
|
| 157 |
+
}
|
colabs/AutoTrain_ngrok.ipynb
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": null,
|
| 6 |
+
"metadata": {
|
| 7 |
+
"cellView": "form",
|
| 8 |
+
"id": "II6F7ThkI10I"
|
| 9 |
+
},
|
| 10 |
+
"outputs": [],
|
| 11 |
+
"source": [
|
| 12 |
+
"#@title 🤗 AutoTrain\n",
|
| 13 |
+
"#@markdown In order to use this colab\n",
|
| 14 |
+
"#@markdown - Enter your [Hugging Face Write Token](https://huggingface.co/settings/tokens)\n",
|
| 15 |
+
"#@markdown - Enter your [ngrok auth token](https://dashboard.ngrok.com/get-started/your-authtoken)\n",
|
| 16 |
+
"huggingface_token = '' # @param {type:\"string\"}\n",
|
| 17 |
+
"ngrok_token = \"\" # @param {type:\"string\"}\n",
|
| 18 |
+
"\n",
|
| 19 |
+
"#@markdown\n",
|
| 20 |
+
"#@markdown - Attach appropriate accelerator `Runtime > Change runtime type > Hardware accelerator`\n",
|
| 21 |
+
"#@markdown - click `Runtime > Run all`\n",
|
| 22 |
+
"#@markdown - Follow the link to access the UI\n",
|
| 23 |
+
"#@markdown - Training happens inside this Google Colab\n",
|
| 24 |
+
"#@markdown - report issues / feature requests [here](https://github.com/huggingface/autotrain-advanced/issues)\n",
|
| 25 |
+
"\n",
|
| 26 |
+
"import os\n",
|
| 27 |
+
"os.environ[\"HF_TOKEN\"] = str(huggingface_token)\n",
|
| 28 |
+
"os.environ[\"NGROK_AUTH_TOKEN\"] = str(ngrok_token)\n",
|
| 29 |
+
"os.environ[\"AUTOTRAIN_LOCAL\"] = \"1\"\n",
|
| 30 |
+
"\n",
|
| 31 |
+
"!pip install -U autotrain-advanced > install_logs.txt 2>&1\n",
|
| 32 |
+
"!autotrain app --share"
|
| 33 |
+
]
|
| 34 |
+
}
|
| 35 |
+
],
|
| 36 |
+
"metadata": {
|
| 37 |
+
"accelerator": "GPU",
|
| 38 |
+
"colab": {
|
| 39 |
+
"gpuType": "T4",
|
| 40 |
+
"provenance": []
|
| 41 |
+
},
|
| 42 |
+
"kernelspec": {
|
| 43 |
+
"display_name": "Python 3",
|
| 44 |
+
"name": "python3"
|
| 45 |
+
},
|
| 46 |
+
"language_info": {
|
| 47 |
+
"name": "python"
|
| 48 |
+
}
|
| 49 |
+
},
|
| 50 |
+
"nbformat": 4,
|
| 51 |
+
"nbformat_minor": 0
|
| 52 |
+
}
|
colabs/image_classification.ipynb
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": null,
|
| 6 |
+
"metadata": {},
|
| 7 |
+
"outputs": [],
|
| 8 |
+
"source": [
|
| 9 |
+
"%%writefile config.yml\n",
|
| 10 |
+
"task: image_classification # do not change\n",
|
| 11 |
+
"base_model: google/vit-base-patch16-224 # the model to be used from hugging face hub\n",
|
| 12 |
+
"project_name: autotrain-image-classification-model # the name of the project, must be unique\n",
|
| 13 |
+
"log: tensorboard # do not change\n",
|
| 14 |
+
"backend: local # do not change\n",
|
| 15 |
+
"\n",
|
| 16 |
+
"data:\n",
|
| 17 |
+
" path: data/ # the path to the data folder, this folder consists of `train` and `valid` (if any) folders\n",
|
| 18 |
+
" train_split: train # this folder inside data/ will be used for training, it contains the images in subfolders.\n",
|
| 19 |
+
" valid_split: null # this folder inside data/ will be used for validation, it contains the images in subfolders. If not available, set it to null\n",
|
| 20 |
+
" column_mapping: # do not change\n",
|
| 21 |
+
" image_column: image\n",
|
| 22 |
+
" target_column: labels\n",
|
| 23 |
+
"\n",
|
| 24 |
+
"params:\n",
|
| 25 |
+
" epochs: 2\n",
|
| 26 |
+
" batch_size: 4\n",
|
| 27 |
+
" lr: 2e-5\n",
|
| 28 |
+
" optimizer: adamw_torch\n",
|
| 29 |
+
" scheduler: linear\n",
|
| 30 |
+
" gradient_accumulation: 1\n",
|
| 31 |
+
" mixed_precision: fp16\n",
|
| 32 |
+
"\n",
|
| 33 |
+
"hub:\n",
|
| 34 |
+
" username: ${HF_USERNAME} # please set HF_USERNAME in colab secrets\n",
|
| 35 |
+
" token: ${HF_TOKEN} # please set HF_TOKEN in colab secrets, must be valid hugging face write token\n",
|
| 36 |
+
" push_to_hub: true # set to true if you want to push the model to the hub"
|
| 37 |
+
]
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"cell_type": "code",
|
| 41 |
+
"execution_count": null,
|
| 42 |
+
"metadata": {},
|
| 43 |
+
"outputs": [],
|
| 44 |
+
"source": [
|
| 45 |
+
"import os\n",
|
| 46 |
+
"from google.colab import userdata\n",
|
| 47 |
+
"HF_USERNAME = userdata.get('HF_USERNAME')\n",
|
| 48 |
+
"HF_TOKEN = userdata.get('HF_TOKEN')\n",
|
| 49 |
+
"os.environ['HF_USERNAME'] = HF_USERNAME\n",
|
| 50 |
+
"\n",
|
| 51 |
+
"os.environ['HF_TOKEN'] = HF_TOKEN\n",
|
| 52 |
+
"!autotrain --config config.yml"
|
| 53 |
+
]
|
| 54 |
+
}
|
| 55 |
+
],
|
| 56 |
+
"metadata": {
|
| 57 |
+
"language_info": {
|
| 58 |
+
"name": "python"
|
| 59 |
+
}
|
| 60 |
+
},
|
| 61 |
+
"nbformat": 4,
|
| 62 |
+
"nbformat_minor": 2
|
| 63 |
+
}
|
configs/extractive_question_answering/hub_dataset.yml
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: extractive-qa
|
| 2 |
+
base_model: google-bert/bert-base-uncased
|
| 3 |
+
project_name: autotrain-bert-ex-qa1
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: lhoestq/squad
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: validation
|
| 11 |
+
column_mapping:
|
| 12 |
+
text_column: context
|
| 13 |
+
question_column: question
|
| 14 |
+
answer_column: answers
|
| 15 |
+
|
| 16 |
+
params:
|
| 17 |
+
max_seq_length: 512
|
| 18 |
+
max_doc_stride: 128
|
| 19 |
+
epochs: 3
|
| 20 |
+
batch_size: 4
|
| 21 |
+
lr: 2e-5
|
| 22 |
+
optimizer: adamw_torch
|
| 23 |
+
scheduler: linear
|
| 24 |
+
gradient_accumulation: 1
|
| 25 |
+
mixed_precision: fp16
|
| 26 |
+
|
| 27 |
+
hub:
|
| 28 |
+
username: ${HF_USERNAME}
|
| 29 |
+
token: ${HF_TOKEN}
|
| 30 |
+
push_to_hub: true
|
configs/extractive_question_answering/local_dataset.yml
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: extractive-qa
|
| 2 |
+
base_model: google-bert/bert-base-uncased
|
| 3 |
+
project_name: autotrain-bert-ex-qa2
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: data/ # this must be the path to the directory containing the train and valid files
|
| 9 |
+
train_split: train # this must be either train.csv or train.json
|
| 10 |
+
valid_split: valid # this must be either valid.csv or valid.json
|
| 11 |
+
column_mapping:
|
| 12 |
+
text_column: context
|
| 13 |
+
question_column: question
|
| 14 |
+
answer_column: answers
|
| 15 |
+
|
| 16 |
+
params:
|
| 17 |
+
max_seq_length: 512
|
| 18 |
+
max_doc_stride: 128
|
| 19 |
+
epochs: 3
|
| 20 |
+
batch_size: 4
|
| 21 |
+
lr: 2e-5
|
| 22 |
+
optimizer: adamw_torch
|
| 23 |
+
scheduler: linear
|
| 24 |
+
gradient_accumulation: 1
|
| 25 |
+
mixed_precision: fp16
|
| 26 |
+
|
| 27 |
+
hub:
|
| 28 |
+
username: ${HF_USERNAME}
|
| 29 |
+
token: ${HF_TOKEN}
|
| 30 |
+
push_to_hub: true
|
configs/image_classification/hub_dataset.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: image_classification
|
| 2 |
+
base_model: google/vit-base-patch16-224
|
| 3 |
+
project_name: autotrain-cats-vs-dogs-finetuned
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: cats_vs_dogs
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
column_mapping:
|
| 12 |
+
image_column: image
|
| 13 |
+
target_column: labels
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
epochs: 2
|
| 17 |
+
batch_size: 4
|
| 18 |
+
lr: 2e-5
|
| 19 |
+
optimizer: adamw_torch
|
| 20 |
+
scheduler: linear
|
| 21 |
+
gradient_accumulation: 1
|
| 22 |
+
mixed_precision: fp16
|
| 23 |
+
|
| 24 |
+
hub:
|
| 25 |
+
username: ${HF_USERNAME}
|
| 26 |
+
token: ${HF_TOKEN}
|
| 27 |
+
push_to_hub: true
|
configs/image_classification/local.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: image_classification
|
| 2 |
+
base_model: google/vit-base-patch16-224
|
| 3 |
+
project_name: autotrain-image-classification-model
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: data/
|
| 9 |
+
train_split: train # this folder inside data/ will be used for training, it contains the images in subfolders.
|
| 10 |
+
valid_split: null
|
| 11 |
+
column_mapping:
|
| 12 |
+
image_column: image
|
| 13 |
+
target_column: label
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
epochs: 2
|
| 17 |
+
batch_size: 4
|
| 18 |
+
lr: 2e-5
|
| 19 |
+
optimizer: adamw_torch
|
| 20 |
+
scheduler: linear
|
| 21 |
+
gradient_accumulation: 1
|
| 22 |
+
mixed_precision: fp16
|
| 23 |
+
|
| 24 |
+
hub:
|
| 25 |
+
username: ${HF_USERNAME}
|
| 26 |
+
token: ${HF_TOKEN}
|
| 27 |
+
push_to_hub: true
|
configs/image_scoring/hub_dataset.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: image_regression
|
| 2 |
+
base_model: google/vit-base-patch16-224
|
| 3 |
+
project_name: autotrain-cats-vs-dogs-finetuned
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: cats_vs_dogs
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
column_mapping:
|
| 12 |
+
image_column: image
|
| 13 |
+
target_column: labels
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
epochs: 2
|
| 17 |
+
batch_size: 4
|
| 18 |
+
lr: 2e-5
|
| 19 |
+
optimizer: adamw_torch
|
| 20 |
+
scheduler: linear
|
| 21 |
+
gradient_accumulation: 1
|
| 22 |
+
mixed_precision: fp16
|
| 23 |
+
|
| 24 |
+
hub:
|
| 25 |
+
username: ${HF_USERNAME}
|
| 26 |
+
token: ${HF_TOKEN}
|
| 27 |
+
push_to_hub: true
|
configs/image_scoring/image_quality.yml
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: image_regression
|
| 2 |
+
base_model: microsoft/resnet-50
|
| 3 |
+
project_name: autotrain-img-quality-resnet50
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: abhishek/img-quality-full
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
column_mapping:
|
| 12 |
+
image_column: image
|
| 13 |
+
target_column: target
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
epochs: 10
|
| 17 |
+
batch_size: 8
|
| 18 |
+
lr: 2e-3
|
| 19 |
+
optimizer: adamw_torch
|
| 20 |
+
scheduler: cosine
|
| 21 |
+
gradient_accumulation: 1
|
| 22 |
+
mixed_precision: fp16
|
| 23 |
+
|
| 24 |
+
hub:
|
| 25 |
+
username: ${HF_USERNAME}
|
| 26 |
+
token: ${HF_TOKEN}
|
| 27 |
+
push_to_hub: true
|
configs/image_scoring/local.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: image_regression
|
| 2 |
+
base_model: google/vit-base-patch16-224
|
| 3 |
+
project_name: autotrain-image-regression-model
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: data/
|
| 9 |
+
train_split: train # this folder inside data/ will be used for training, it contains the images and metadata.jsonl
|
| 10 |
+
valid_split: valid # this folder inside data/ will be used for validation, it contains the images and metadata.jsonl. can be set to null
|
| 11 |
+
# column mapping should not be changed for local datasets
|
| 12 |
+
column_mapping:
|
| 13 |
+
image_column: image
|
| 14 |
+
target_column: target
|
| 15 |
+
|
| 16 |
+
params:
|
| 17 |
+
epochs: 2
|
| 18 |
+
batch_size: 4
|
| 19 |
+
lr: 2e-5
|
| 20 |
+
optimizer: adamw_torch
|
| 21 |
+
scheduler: linear
|
| 22 |
+
gradient_accumulation: 1
|
| 23 |
+
mixed_precision: fp16
|
| 24 |
+
|
| 25 |
+
hub:
|
| 26 |
+
username: ${HF_USERNAME}
|
| 27 |
+
token: ${HF_TOKEN}
|
| 28 |
+
push_to_hub: true
|
configs/llm_finetuning/gpt2_sft.yml
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: llm-sft
|
| 2 |
+
base_model: openai-community/gpt2
|
| 3 |
+
project_name: autotrain-gpt2-finetuned-guanaco
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: timdettmers/openassistant-guanaco
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
chat_template: null
|
| 12 |
+
column_mapping:
|
| 13 |
+
text_column: text
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
block_size: 1024
|
| 17 |
+
model_max_length: 2048
|
| 18 |
+
max_prompt_length: 512
|
| 19 |
+
epochs: 3
|
| 20 |
+
batch_size: 2
|
| 21 |
+
lr: 3e-5
|
| 22 |
+
padding: right
|
| 23 |
+
optimizer: adamw_torch
|
| 24 |
+
scheduler: linear
|
| 25 |
+
gradient_accumulation: 4
|
| 26 |
+
mixed_precision: fp16
|
| 27 |
+
merge_adapter: true
|
| 28 |
+
|
| 29 |
+
hub:
|
| 30 |
+
username: ${HF_USERNAME}
|
| 31 |
+
token: ${HF_TOKEN}
|
| 32 |
+
push_to_hub: false
|
configs/llm_finetuning/llama3-70b-orpo-v1.yml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: llm-orpo
|
| 2 |
+
base_model: meta-llama/Meta-Llama-3-70B-Instruct
|
| 3 |
+
project_name: autotrain-llama3-70b-orpo-v1
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: argilla/distilabel-capybara-dpo-7k-binarized
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: valid
|
| 11 |
+
chat_template: chatml
|
| 12 |
+
column_mapping:
|
| 13 |
+
text_column: chosen
|
| 14 |
+
rejected_text_column: rejected
|
| 15 |
+
prompt_text_column: prompt
|
| 16 |
+
|
| 17 |
+
params:
|
| 18 |
+
block_size: 2048
|
| 19 |
+
model_max_length: 8192
|
| 20 |
+
max_prompt_length: 1024
|
| 21 |
+
epochs: 3
|
| 22 |
+
batch_size: 1
|
| 23 |
+
lr: 1e-5
|
| 24 |
+
peft: true
|
| 25 |
+
quantization: null
|
| 26 |
+
target_modules: all-linear
|
| 27 |
+
padding: right
|
| 28 |
+
optimizer: paged_adamw_8bit
|
| 29 |
+
scheduler: linear
|
| 30 |
+
gradient_accumulation: 4
|
| 31 |
+
mixed_precision: bf16
|
| 32 |
+
|
| 33 |
+
hub:
|
| 34 |
+
username: ${HF_USERNAME}
|
| 35 |
+
token: ${HF_TOKEN}
|
| 36 |
+
push_to_hub: true
|
configs/llm_finetuning/llama3-70b-sft.yml
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: llm-sft
|
| 2 |
+
base_model: meta-llama/Meta-Llama-3-70B-Instruct
|
| 3 |
+
project_name: autotrain-llama3-70b-math-v1
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: rishiraj/guanaco-style-metamath-40k
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
chat_template: null
|
| 12 |
+
column_mapping:
|
| 13 |
+
text_column: text
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
block_size: 2048
|
| 17 |
+
model_max_length: 8192
|
| 18 |
+
epochs: 2
|
| 19 |
+
batch_size: 1
|
| 20 |
+
lr: 1e-5
|
| 21 |
+
peft: true
|
| 22 |
+
quantization: null
|
| 23 |
+
target_modules: all-linear
|
| 24 |
+
padding: right
|
| 25 |
+
optimizer: paged_adamw_8bit
|
| 26 |
+
scheduler: linear
|
| 27 |
+
gradient_accumulation: 8
|
| 28 |
+
mixed_precision: bf16
|
| 29 |
+
|
| 30 |
+
hub:
|
| 31 |
+
username: ${HF_USERNAME}
|
| 32 |
+
token: ${HF_TOKEN}
|
| 33 |
+
push_to_hub: true
|
configs/llm_finetuning/llama3-8b-dpo-qlora.yml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: llm-dpo
|
| 2 |
+
base_model: meta-llama/Meta-Llama-3-8B-Instruct
|
| 3 |
+
project_name: autotrain-llama3-8b-dpo-qlora
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: mlabonne/orpo-dpo-mix-40k
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
chat_template: chatml
|
| 12 |
+
column_mapping:
|
| 13 |
+
text_column: chosen
|
| 14 |
+
rejected_text_column: rejected
|
| 15 |
+
prompt_text_column: prompt
|
| 16 |
+
|
| 17 |
+
params:
|
| 18 |
+
block_size: 1024
|
| 19 |
+
model_max_length: 2048
|
| 20 |
+
max_prompt_length: 512
|
| 21 |
+
epochs: 3
|
| 22 |
+
batch_size: 2
|
| 23 |
+
lr: 3e-5
|
| 24 |
+
peft: true
|
| 25 |
+
quantization: int4
|
| 26 |
+
target_modules: all-linear
|
| 27 |
+
padding: right
|
| 28 |
+
optimizer: adamw_torch
|
| 29 |
+
scheduler: linear
|
| 30 |
+
gradient_accumulation: 4
|
| 31 |
+
mixed_precision: fp16
|
| 32 |
+
|
| 33 |
+
hub:
|
| 34 |
+
username: ${HF_USERNAME}
|
| 35 |
+
token: ${HF_TOKEN}
|
| 36 |
+
push_to_hub: false
|
configs/llm_finetuning/llama3-8b-orpo-space.yml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: llm-orpo
|
| 2 |
+
base_model: meta-llama/Meta-Llama-3-8B-Instruct
|
| 3 |
+
project_name: autotrain-llama3-8b-orpo-t1
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: spaces-a10g-largex4
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: argilla/distilabel-capybara-dpo-7k-binarized
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
chat_template: chatml
|
| 12 |
+
column_mapping:
|
| 13 |
+
text_column: chosen
|
| 14 |
+
rejected_text_column: rejected
|
| 15 |
+
prompt_text_column: prompt
|
| 16 |
+
|
| 17 |
+
params:
|
| 18 |
+
block_size: 1024
|
| 19 |
+
model_max_length: 8192
|
| 20 |
+
max_prompt_length: 512
|
| 21 |
+
epochs: 3
|
| 22 |
+
batch_size: 2
|
| 23 |
+
lr: 3e-5
|
| 24 |
+
peft: true
|
| 25 |
+
quantization: int4
|
| 26 |
+
target_modules: all-linear
|
| 27 |
+
padding: right
|
| 28 |
+
optimizer: adamw_torch
|
| 29 |
+
scheduler: linear
|
| 30 |
+
gradient_accumulation: 4
|
| 31 |
+
mixed_precision: fp16
|
| 32 |
+
|
| 33 |
+
hub:
|
| 34 |
+
username: ${HF_USERNAME}
|
| 35 |
+
token: ${HF_TOKEN}
|
| 36 |
+
push_to_hub: true
|
configs/llm_finetuning/llama3-8b-orpo.yml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: llm-orpo
|
| 2 |
+
base_model: meta-llama/Meta-Llama-3-8B-Instruct
|
| 3 |
+
project_name: autotrain-llama3-8b-orpo
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: argilla/distilabel-capybara-dpo-7k-binarized
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
chat_template: chatml
|
| 12 |
+
column_mapping:
|
| 13 |
+
text_column: chosen
|
| 14 |
+
rejected_text_column: rejected
|
| 15 |
+
prompt_text_column: prompt
|
| 16 |
+
|
| 17 |
+
params:
|
| 18 |
+
block_size: 1024
|
| 19 |
+
model_max_length: 8192
|
| 20 |
+
max_prompt_length: 512
|
| 21 |
+
epochs: 3
|
| 22 |
+
batch_size: 2
|
| 23 |
+
lr: 3e-5
|
| 24 |
+
peft: true
|
| 25 |
+
quantization: int4
|
| 26 |
+
target_modules: all-linear
|
| 27 |
+
padding: right
|
| 28 |
+
optimizer: adamw_torch
|
| 29 |
+
scheduler: linear
|
| 30 |
+
gradient_accumulation: 4
|
| 31 |
+
mixed_precision: fp16
|
| 32 |
+
|
| 33 |
+
hub:
|
| 34 |
+
username: ${HF_USERNAME}
|
| 35 |
+
token: ${HF_TOKEN}
|
| 36 |
+
push_to_hub: true
|
configs/llm_finetuning/llama3-8b-sft-unsloth.yml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: llm-sft
|
| 2 |
+
base_model: meta-llama/Meta-Llama-3-8B-Instruct
|
| 3 |
+
project_name: autotrain-llama3-8b-sft-unsloth
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: rishiraj/guanaco-style-metamath-40k
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
chat_template: null
|
| 12 |
+
column_mapping:
|
| 13 |
+
text_column: text
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
block_size: 1024
|
| 17 |
+
model_max_length: 8192
|
| 18 |
+
max_prompt_length: 512
|
| 19 |
+
epochs: 3
|
| 20 |
+
batch_size: 2
|
| 21 |
+
lr: 3e-5
|
| 22 |
+
peft: true
|
| 23 |
+
quantization: int4
|
| 24 |
+
target_modules: all-linear
|
| 25 |
+
padding: right
|
| 26 |
+
optimizer: adamw_torch
|
| 27 |
+
scheduler: linear
|
| 28 |
+
gradient_accumulation: 4
|
| 29 |
+
mixed_precision: fp16
|
| 30 |
+
unsloth: true
|
| 31 |
+
lora_dropout: 0
|
| 32 |
+
|
| 33 |
+
hub:
|
| 34 |
+
username: ${HF_USERNAME}
|
| 35 |
+
token: ${HF_TOKEN}
|
| 36 |
+
push_to_hub: true
|
configs/llm_finetuning/llama32-1b-sft.yml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: llm-sft
|
| 2 |
+
base_model: meta-llama/Llama-3.2-1B
|
| 3 |
+
project_name: autotrain-llama32-1b-finetune
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: HuggingFaceH4/no_robots
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
chat_template: tokenizer
|
| 12 |
+
column_mapping:
|
| 13 |
+
text_column: messages
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
block_size: 2048
|
| 17 |
+
model_max_length: 4096
|
| 18 |
+
epochs: 2
|
| 19 |
+
batch_size: 1
|
| 20 |
+
lr: 1e-5
|
| 21 |
+
peft: true
|
| 22 |
+
quantization: int4
|
| 23 |
+
target_modules: all-linear
|
| 24 |
+
padding: right
|
| 25 |
+
optimizer: paged_adamw_8bit
|
| 26 |
+
scheduler: linear
|
| 27 |
+
gradient_accumulation: 8
|
| 28 |
+
mixed_precision: bf16
|
| 29 |
+
merge_adapter: true
|
| 30 |
+
|
| 31 |
+
hub:
|
| 32 |
+
username: ${HF_USERNAME}
|
| 33 |
+
token: ${HF_TOKEN}
|
| 34 |
+
push_to_hub: true
|
configs/llm_finetuning/qwen.yml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: llm-sft
|
| 2 |
+
base_model: Qwen/Qwen2.5-Coder-7B-Instruct
|
| 3 |
+
project_name: autotrain-qwen-finetune
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: HuggingFaceH4/no_robots
|
| 9 |
+
train_split: test
|
| 10 |
+
valid_split: null
|
| 11 |
+
chat_template: tokenizer
|
| 12 |
+
column_mapping:
|
| 13 |
+
text_column: messages
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
block_size: 2048
|
| 17 |
+
model_max_length: 4096
|
| 18 |
+
epochs: 1
|
| 19 |
+
batch_size: 1
|
| 20 |
+
lr: 1e-5
|
| 21 |
+
peft: true
|
| 22 |
+
quantization: int4
|
| 23 |
+
target_modules: all-linear
|
| 24 |
+
padding: right
|
| 25 |
+
optimizer: adamw_torch
|
| 26 |
+
scheduler: linear
|
| 27 |
+
gradient_accumulation: 1
|
| 28 |
+
mixed_precision: fp16
|
| 29 |
+
merge_adapter: true
|
| 30 |
+
|
| 31 |
+
hub:
|
| 32 |
+
username: ${HF_USERNAME}
|
| 33 |
+
token: ${HF_TOKEN}
|
| 34 |
+
push_to_hub: true
|
configs/llm_finetuning/smollm2.yml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: llm-sft
|
| 2 |
+
base_model: HuggingFaceTB/SmolLM2-1.7B-Instruct
|
| 3 |
+
project_name: autotrain-smollm2-finetune
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: HuggingFaceH4/no_robots
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
chat_template: tokenizer
|
| 12 |
+
column_mapping:
|
| 13 |
+
text_column: messages
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
block_size: 2048
|
| 17 |
+
model_max_length: 4096
|
| 18 |
+
epochs: 2
|
| 19 |
+
batch_size: 1
|
| 20 |
+
lr: 1e-5
|
| 21 |
+
peft: true
|
| 22 |
+
quantization: int4
|
| 23 |
+
target_modules: all-linear
|
| 24 |
+
padding: right
|
| 25 |
+
optimizer: paged_adamw_8bit
|
| 26 |
+
scheduler: linear
|
| 27 |
+
gradient_accumulation: 8
|
| 28 |
+
mixed_precision: bf16
|
| 29 |
+
merge_adapter: true
|
| 30 |
+
|
| 31 |
+
hub:
|
| 32 |
+
username: ${HF_USERNAME}
|
| 33 |
+
token: ${HF_TOKEN}
|
| 34 |
+
push_to_hub: true
|
configs/llm_finetuning/smollm2_guanaco.yml
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: llm-sft
|
| 2 |
+
base_model: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 3 |
+
project_name: autotrain-smollm2-135m-finetune-guanaco
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: timdettmers/openassistant-guanaco
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
chat_template: null
|
| 12 |
+
column_mapping:
|
| 13 |
+
text_column: text
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
block_size: 1024
|
| 17 |
+
model_max_length: 2048
|
| 18 |
+
epochs: 1
|
| 19 |
+
batch_size: 1
|
| 20 |
+
lr: 1e-5
|
| 21 |
+
peft: true
|
| 22 |
+
quantization: int4
|
| 23 |
+
target_modules: all-linear
|
| 24 |
+
padding: right
|
| 25 |
+
optimizer: paged_adamw_8bit
|
| 26 |
+
scheduler: linear
|
| 27 |
+
gradient_accumulation: 8
|
| 28 |
+
mixed_precision: bf16
|
| 29 |
+
merge_adapter: true
|
| 30 |
+
|
| 31 |
+
hub:
|
| 32 |
+
username: ${HF_USERNAME}
|
| 33 |
+
token: ${HF_TOKEN}
|
| 34 |
+
push_to_hub: true
|
configs/llm_finetuning/smollm2_orpo.yml
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: llm-orpo
|
| 2 |
+
base_model: HuggingFaceTB/SmolLM2-1.7B-Instruct
|
| 3 |
+
project_name: autotrain-smallm2-orpo
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: argilla/distilabel-capybara-dpo-7k-binarized
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
chat_template: chatml
|
| 12 |
+
column_mapping:
|
| 13 |
+
text_column: chosen
|
| 14 |
+
rejected_text_column: rejected
|
| 15 |
+
prompt_text_column: prompt
|
| 16 |
+
|
| 17 |
+
params:
|
| 18 |
+
block_size: 1024
|
| 19 |
+
model_max_length: 2048
|
| 20 |
+
max_prompt_length: 512
|
| 21 |
+
epochs: 3
|
| 22 |
+
batch_size: 2
|
| 23 |
+
lr: 3e-5
|
| 24 |
+
peft: true
|
| 25 |
+
quantization: int4
|
| 26 |
+
target_modules: all-linear
|
| 27 |
+
padding: right
|
| 28 |
+
optimizer: adamw_torch
|
| 29 |
+
scheduler: linear
|
| 30 |
+
gradient_accumulation: 4
|
| 31 |
+
mixed_precision: fp16
|
| 32 |
+
|
| 33 |
+
hub:
|
| 34 |
+
username: ${HF_USERNAME}
|
| 35 |
+
token: ${HF_TOKEN}
|
| 36 |
+
push_to_hub: false
|
configs/object_detection/hub_dataset.yml
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: object_detection
|
| 2 |
+
base_model: facebook/detr-resnet-50
|
| 3 |
+
project_name: autotrain-obj-det-cppe5-2
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: cppe-5
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: test
|
| 11 |
+
column_mapping:
|
| 12 |
+
image_column: image
|
| 13 |
+
objects_column: objects
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
image_square_size: 600
|
| 17 |
+
epochs: 100
|
| 18 |
+
batch_size: 8
|
| 19 |
+
lr: 5e-5
|
| 20 |
+
weight_decay: 1e-4
|
| 21 |
+
optimizer: adamw_torch
|
| 22 |
+
scheduler: linear
|
| 23 |
+
gradient_accumulation: 1
|
| 24 |
+
mixed_precision: fp16
|
| 25 |
+
early_stopping_patience: 50
|
| 26 |
+
early_stopping_threshold: 0.001
|
| 27 |
+
|
| 28 |
+
hub:
|
| 29 |
+
username: ${HF_USERNAME}
|
| 30 |
+
token: ${HF_TOKEN}
|
| 31 |
+
push_to_hub: true
|
configs/object_detection/local.yml
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: object_detection
|
| 2 |
+
base_model: facebook/detr-resnet-50
|
| 3 |
+
project_name: autotrain-obj-det-local-dataset
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: data/ # this contains the train and validation folders
|
| 9 |
+
train_split: train # this is the folder name inside the data path, contains images and metadata.jsonl
|
| 10 |
+
valid_split: validation # this is the folder name inside the data path, contains images and metadata.jsonl, optional
|
| 11 |
+
column_mapping:
|
| 12 |
+
image_column: image
|
| 13 |
+
objects_column: objects
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
image_square_size: 600
|
| 17 |
+
epochs: 100
|
| 18 |
+
batch_size: 8
|
| 19 |
+
lr: 5e-5
|
| 20 |
+
weight_decay: 1e-4
|
| 21 |
+
optimizer: adamw_torch
|
| 22 |
+
scheduler: linear
|
| 23 |
+
gradient_accumulation: 1
|
| 24 |
+
mixed_precision: fp16
|
| 25 |
+
early_stopping_patience: 50
|
| 26 |
+
early_stopping_threshold: 0.001
|
| 27 |
+
|
| 28 |
+
hub:
|
| 29 |
+
username: ${HF_USERNAME}
|
| 30 |
+
token: ${HF_TOKEN}
|
| 31 |
+
push_to_hub: true
|
configs/sentence_transformers/local_dataset.yml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: sentence-transformers:pair_score
|
| 2 |
+
base_model: microsoft/mpnet-base
|
| 3 |
+
project_name: autotrain-st-pair-score-local-dataset
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: /path/to/your/dataset # this must be the path to the directory containing the train and valid files
|
| 9 |
+
train_split: train # this is the name of the train file (csv or jsonl)
|
| 10 |
+
valid_split: null # this is the name of the valid file (csv or jsonl), optional
|
| 11 |
+
column_mapping:
|
| 12 |
+
sentence1_column: input_sentence
|
| 13 |
+
sentence2_column: target_sentence
|
| 14 |
+
target_column: score
|
| 15 |
+
|
| 16 |
+
params:
|
| 17 |
+
max_seq_length: 512
|
| 18 |
+
epochs: 5
|
| 19 |
+
batch_size: 8
|
| 20 |
+
lr: 2e-5
|
| 21 |
+
optimizer: adamw_torch
|
| 22 |
+
scheduler: linear
|
| 23 |
+
gradient_accumulation: 1
|
| 24 |
+
mixed_precision: fp16
|
| 25 |
+
|
| 26 |
+
hub:
|
| 27 |
+
username: ${HF_USERNAME}
|
| 28 |
+
token: ${HF_TOKEN}
|
| 29 |
+
push_to_hub: true
|
configs/sentence_transformers/pair.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: sentence-transformers:pair
|
| 2 |
+
base_model: microsoft/mpnet-base
|
| 3 |
+
project_name: autotrain-st-pair
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: sentence-transformers/all-nli
|
| 9 |
+
train_split: pair:train
|
| 10 |
+
valid_split: pair:dev
|
| 11 |
+
column_mapping:
|
| 12 |
+
sentence1_column: anchor
|
| 13 |
+
sentence2_column: positive
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
max_seq_length: 512
|
| 17 |
+
epochs: 5
|
| 18 |
+
batch_size: 8
|
| 19 |
+
lr: 2e-5
|
| 20 |
+
optimizer: adamw_torch
|
| 21 |
+
scheduler: linear
|
| 22 |
+
gradient_accumulation: 1
|
| 23 |
+
mixed_precision: fp16
|
| 24 |
+
|
| 25 |
+
hub:
|
| 26 |
+
username: ${HF_USERNAME}
|
| 27 |
+
token: ${HF_TOKEN}
|
| 28 |
+
push_to_hub: true
|
configs/sentence_transformers/pair_class.yml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: sentence-transformers:pair_class
|
| 2 |
+
base_model: google-bert/bert-base-uncased
|
| 3 |
+
project_name: autotrain-st-pair-class
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: sentence-transformers/all-nli
|
| 9 |
+
train_split: pair-class:train
|
| 10 |
+
valid_split: pair-class:test
|
| 11 |
+
column_mapping:
|
| 12 |
+
sentence1_column: premise
|
| 13 |
+
sentence2_column: hypothesis
|
| 14 |
+
target_column: label
|
| 15 |
+
|
| 16 |
+
params:
|
| 17 |
+
max_seq_length: 512
|
| 18 |
+
epochs: 5
|
| 19 |
+
batch_size: 8
|
| 20 |
+
lr: 2e-5
|
| 21 |
+
optimizer: adamw_torch
|
| 22 |
+
scheduler: linear
|
| 23 |
+
gradient_accumulation: 1
|
| 24 |
+
mixed_precision: fp16
|
| 25 |
+
|
| 26 |
+
hub:
|
| 27 |
+
username: ${HF_USERNAME}
|
| 28 |
+
token: ${HF_TOKEN}
|
| 29 |
+
push_to_hub: true
|
configs/sentence_transformers/pair_score.yml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: sentence-transformers:pair_score
|
| 2 |
+
base_model: microsoft/mpnet-base
|
| 3 |
+
project_name: autotrain-st-pair-score
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: sentence-transformers/all-nli
|
| 9 |
+
train_split: pair-score:train
|
| 10 |
+
valid_split: pair-score:dev
|
| 11 |
+
column_mapping:
|
| 12 |
+
sentence1_column: sentence1
|
| 13 |
+
sentence2_column: sentence2
|
| 14 |
+
target_column: score
|
| 15 |
+
|
| 16 |
+
params:
|
| 17 |
+
max_seq_length: 512
|
| 18 |
+
epochs: 5
|
| 19 |
+
batch_size: 8
|
| 20 |
+
lr: 2e-5
|
| 21 |
+
optimizer: adamw_torch
|
| 22 |
+
scheduler: linear
|
| 23 |
+
gradient_accumulation: 1
|
| 24 |
+
mixed_precision: fp16
|
| 25 |
+
|
| 26 |
+
hub:
|
| 27 |
+
username: ${HF_USERNAME}
|
| 28 |
+
token: ${HF_TOKEN}
|
| 29 |
+
push_to_hub: true
|
configs/sentence_transformers/qa.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: sentence-transformers:qa
|
| 2 |
+
base_model: microsoft/mpnet-base
|
| 3 |
+
project_name: autotrain-st-qa
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: sentence-transformers/natural-questions
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
column_mapping:
|
| 12 |
+
sentence1_column: query
|
| 13 |
+
sentence2_column: answer
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
max_seq_length: 512
|
| 17 |
+
epochs: 5
|
| 18 |
+
batch_size: 8
|
| 19 |
+
lr: 2e-5
|
| 20 |
+
optimizer: adamw_torch
|
| 21 |
+
scheduler: linear
|
| 22 |
+
gradient_accumulation: 1
|
| 23 |
+
mixed_precision: fp16
|
| 24 |
+
|
| 25 |
+
hub:
|
| 26 |
+
username: ${HF_USERNAME}
|
| 27 |
+
token: ${HF_TOKEN}
|
| 28 |
+
push_to_hub: true
|
configs/sentence_transformers/triplet.yml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: sentence-transformers:triplet
|
| 2 |
+
base_model: microsoft/mpnet-base
|
| 3 |
+
project_name: autotrain-st-triplet
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: sentence-transformers/all-nli
|
| 9 |
+
train_split: triplet:train
|
| 10 |
+
valid_split: triplet:dev
|
| 11 |
+
column_mapping:
|
| 12 |
+
sentence1_column: anchor
|
| 13 |
+
sentence2_column: positive
|
| 14 |
+
sentence3_column: negative
|
| 15 |
+
|
| 16 |
+
params:
|
| 17 |
+
max_seq_length: 512
|
| 18 |
+
epochs: 5
|
| 19 |
+
batch_size: 8
|
| 20 |
+
lr: 2e-5
|
| 21 |
+
optimizer: adamw_torch
|
| 22 |
+
scheduler: linear
|
| 23 |
+
gradient_accumulation: 1
|
| 24 |
+
mixed_precision: fp16
|
| 25 |
+
|
| 26 |
+
hub:
|
| 27 |
+
username: ${HF_USERNAME}
|
| 28 |
+
token: ${HF_TOKEN}
|
| 29 |
+
push_to_hub: true
|
configs/seq2seq/hub_dataset.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: seq2seq
|
| 2 |
+
base_model: google/flan-t5-base
|
| 3 |
+
project_name: autotrain-seq2seq-hub-dataset
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: samsum
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: test
|
| 11 |
+
column_mapping:
|
| 12 |
+
text_column: dialogue
|
| 13 |
+
target_column: summary
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
max_seq_length: 512
|
| 17 |
+
epochs: 3
|
| 18 |
+
batch_size: 4
|
| 19 |
+
lr: 2e-5
|
| 20 |
+
optimizer: adamw_torch
|
| 21 |
+
scheduler: linear
|
| 22 |
+
gradient_accumulation: 1
|
| 23 |
+
mixed_precision: none
|
| 24 |
+
|
| 25 |
+
hub:
|
| 26 |
+
username: ${HF_USERNAME}
|
| 27 |
+
token: ${HF_TOKEN}
|
| 28 |
+
push_to_hub: true
|
configs/seq2seq/local.yml
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: seq2seq
|
| 2 |
+
base_model: google/flan-t5-base
|
| 3 |
+
project_name: autotrain-seq2seq-local
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: path/to/your/dataset csv/jsonl files
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: test
|
| 11 |
+
column_mapping:
|
| 12 |
+
text_column: text
|
| 13 |
+
target_column: target
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
params:
|
| 17 |
+
max_seq_length: 512
|
| 18 |
+
epochs: 3
|
| 19 |
+
batch_size: 4
|
| 20 |
+
lr: 2e-5
|
| 21 |
+
optimizer: adamw_torch
|
| 22 |
+
scheduler: linear
|
| 23 |
+
gradient_accumulation: 1
|
| 24 |
+
mixed_precision: none
|
| 25 |
+
|
| 26 |
+
hub:
|
| 27 |
+
username: ${HF_USERNAME}
|
| 28 |
+
token: ${HF_TOKEN}
|
| 29 |
+
push_to_hub: true
|
configs/text_classification/hub_dataset.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: text_classification
|
| 2 |
+
base_model: google-bert/bert-base-uncased
|
| 3 |
+
project_name: autotrain-bert-imdb-finetuned
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: stanfordnlp/imdb
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: test
|
| 11 |
+
column_mapping:
|
| 12 |
+
text_column: text
|
| 13 |
+
target_column: label
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
max_seq_length: 512
|
| 17 |
+
epochs: 3
|
| 18 |
+
batch_size: 4
|
| 19 |
+
lr: 2e-5
|
| 20 |
+
optimizer: adamw_torch
|
| 21 |
+
scheduler: linear
|
| 22 |
+
gradient_accumulation: 1
|
| 23 |
+
mixed_precision: fp16
|
| 24 |
+
|
| 25 |
+
hub:
|
| 26 |
+
username: ${HF_USERNAME}
|
| 27 |
+
token: ${HF_TOKEN}
|
| 28 |
+
push_to_hub: true
|
configs/text_classification/local_dataset.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: text_classification
|
| 2 |
+
base_model: google-bert/bert-base-uncased
|
| 3 |
+
project_name: autotrain-bert-imdb-finetuned
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: data/ # this must be the path to the directory containing the train and valid files
|
| 9 |
+
train_split: train # this must be either train.csv or train.json
|
| 10 |
+
valid_split: valid # this must be either valid.csv or valid.json
|
| 11 |
+
column_mapping:
|
| 12 |
+
text_column: text # this must be the name of the column containing the text
|
| 13 |
+
target_column: label # this must be the name of the column containing the target
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
max_seq_length: 512
|
| 17 |
+
epochs: 3
|
| 18 |
+
batch_size: 4
|
| 19 |
+
lr: 2e-5
|
| 20 |
+
optimizer: adamw_torch
|
| 21 |
+
scheduler: linear
|
| 22 |
+
gradient_accumulation: 1
|
| 23 |
+
mixed_precision: fp16
|
| 24 |
+
|
| 25 |
+
hub:
|
| 26 |
+
username: ${HF_USERNAME}
|
| 27 |
+
token: ${HF_TOKEN}
|
| 28 |
+
push_to_hub: true
|
configs/text_regression/hub_dataset.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: text_regression
|
| 2 |
+
base_model: google-bert/bert-base-uncased
|
| 3 |
+
project_name: autotrain-bert-sms-spam-finetuned
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: sms_spam
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: null
|
| 11 |
+
column_mapping:
|
| 12 |
+
text_column: sms
|
| 13 |
+
target_column: label
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
max_seq_length: 512
|
| 17 |
+
epochs: 3
|
| 18 |
+
batch_size: 4
|
| 19 |
+
lr: 2e-5
|
| 20 |
+
optimizer: adamw_torch
|
| 21 |
+
scheduler: linear
|
| 22 |
+
gradient_accumulation: 1
|
| 23 |
+
mixed_precision: fp16
|
| 24 |
+
|
| 25 |
+
hub:
|
| 26 |
+
username: ${HF_USERNAME}
|
| 27 |
+
token: ${HF_TOKEN}
|
| 28 |
+
push_to_hub: true
|
configs/text_regression/local_dataset.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: text_regression
|
| 2 |
+
base_model: google-bert/bert-base-uncased
|
| 3 |
+
project_name: autotrain-bert-custom-finetuned
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: data/ # this must be the path to the directory containing the train and valid files
|
| 9 |
+
train_split: train # this must be either train.csv or train.json
|
| 10 |
+
valid_split: valid # this must be either valid.csv or valid.json
|
| 11 |
+
column_mapping:
|
| 12 |
+
text_column: text # this must be the name of the column containing the text
|
| 13 |
+
target_column: label # this must be the name of the column containing the target
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
max_seq_length: 512
|
| 17 |
+
epochs: 3
|
| 18 |
+
batch_size: 4
|
| 19 |
+
lr: 2e-5
|
| 20 |
+
optimizer: adamw_torch
|
| 21 |
+
scheduler: linear
|
| 22 |
+
gradient_accumulation: 1
|
| 23 |
+
mixed_precision: fp16
|
| 24 |
+
|
| 25 |
+
hub:
|
| 26 |
+
username: ${HF_USERNAME}
|
| 27 |
+
token: ${HF_TOKEN}
|
| 28 |
+
push_to_hub: true
|
configs/token_classification/hub_dataset.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: token_classification
|
| 2 |
+
base_model: google-bert/bert-base-uncased
|
| 3 |
+
project_name: autotrain-bert-conll2003-finetuned
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: conll2003
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: validation
|
| 11 |
+
column_mapping:
|
| 12 |
+
tokens_column: tokens
|
| 13 |
+
tags_column: ner_tags
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
max_seq_length: 512
|
| 17 |
+
epochs: 3
|
| 18 |
+
batch_size: 4
|
| 19 |
+
lr: 2e-5
|
| 20 |
+
optimizer: adamw_torch
|
| 21 |
+
scheduler: linear
|
| 22 |
+
gradient_accumulation: 1
|
| 23 |
+
mixed_precision: fp16
|
| 24 |
+
|
| 25 |
+
hub:
|
| 26 |
+
username: ${HF_USERNAME}
|
| 27 |
+
token: ${HF_TOKEN}
|
| 28 |
+
push_to_hub: true
|
configs/token_classification/local_dataset.yml
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: token_classification
|
| 2 |
+
base_model: google-bert/bert-base-uncased
|
| 3 |
+
project_name: autotrain-bert-custom-finetuned
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: data/ # this must be the path to the directory containing the train and valid files
|
| 9 |
+
train_split: train # this must be either train.json
|
| 10 |
+
valid_split: test # this must be either valid.json, can also be set to null
|
| 11 |
+
column_mapping:
|
| 12 |
+
tokens_column: tokens # this must be the name of the column containing the text
|
| 13 |
+
tags_column: tags # this must be the name of the column containing the target
|
| 14 |
+
|
| 15 |
+
params:
|
| 16 |
+
max_seq_length: 512
|
| 17 |
+
epochs: 3
|
| 18 |
+
batch_size: 4
|
| 19 |
+
lr: 2e-5
|
| 20 |
+
optimizer: adamw_torch
|
| 21 |
+
scheduler: linear
|
| 22 |
+
gradient_accumulation: 1
|
| 23 |
+
mixed_precision: fp16
|
| 24 |
+
|
| 25 |
+
hub:
|
| 26 |
+
username: ${HF_USERNAME}
|
| 27 |
+
token: ${HF_TOKEN}
|
| 28 |
+
push_to_hub: true
|
configs/vlm/paligemma_vqa.yml
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
task: vlm:vqa
|
| 2 |
+
base_model: google/paligemma-3b-pt-224
|
| 3 |
+
project_name: autotrain-paligemma-finetuned-vqa
|
| 4 |
+
log: tensorboard
|
| 5 |
+
backend: local
|
| 6 |
+
|
| 7 |
+
data:
|
| 8 |
+
path: abhishek/vqa_small
|
| 9 |
+
train_split: train
|
| 10 |
+
valid_split: validation
|
| 11 |
+
column_mapping:
|
| 12 |
+
image_column: image
|
| 13 |
+
text_column: multiple_choice_answer
|
| 14 |
+
prompt_text_column: question
|
| 15 |
+
|
| 16 |
+
params:
|
| 17 |
+
epochs: 3
|
| 18 |
+
batch_size: 2
|
| 19 |
+
lr: 2e-5
|
| 20 |
+
optimizer: adamw_torch
|
| 21 |
+
scheduler: linear
|
| 22 |
+
gradient_accumulation: 4
|
| 23 |
+
mixed_precision: fp16
|
| 24 |
+
peft: true
|
| 25 |
+
quantization: int4
|
| 26 |
+
|
| 27 |
+
hub:
|
| 28 |
+
username: ${HF_USERNAME}
|
| 29 |
+
token: ${HF_TOKEN}
|
| 30 |
+
push_to_hub: true
|
docs/README.md
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generating the documentation
|
| 2 |
+
|
| 3 |
+
To generate the documentation, you have to build it. Several packages are necessary to build the doc.
|
| 4 |
+
|
| 5 |
+
First, you need to install the project itself by running the following command at the root of the code repository:
|
| 6 |
+
|
| 7 |
+
```bash
|
| 8 |
+
pip install -e .
|
| 9 |
+
```
|
| 10 |
+
|
| 11 |
+
You also need to install 2 extra packages:
|
| 12 |
+
|
| 13 |
+
```bash
|
| 14 |
+
# `hf-doc-builder` to build the docs
|
| 15 |
+
pip install git+https://github.com/huggingface/doc-builder@main
|
| 16 |
+
# `watchdog` for live reloads
|
| 17 |
+
pip install watchdog
|
| 18 |
+
```
|
| 19 |
+
|
| 20 |
+
---
|
| 21 |
+
**NOTE**
|
| 22 |
+
|
| 23 |
+
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
| 24 |
+
check how they look before committing for instance). You don't have to commit the built documentation.
|
| 25 |
+
|
| 26 |
+
---
|
| 27 |
+
|
| 28 |
+
## Building the documentation
|
| 29 |
+
|
| 30 |
+
Once you have setup the `doc-builder` and additional packages with the pip install command above,
|
| 31 |
+
you can generate the documentation by typing the following command:
|
| 32 |
+
|
| 33 |
+
```bash
|
| 34 |
+
doc-builder build autotrain docs/source/ --build_dir ~/tmp/test-build
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate
|
| 38 |
+
the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite
|
| 39 |
+
Markdown editor.
|
| 40 |
+
|
| 41 |
+
## Previewing the documentation
|
| 42 |
+
|
| 43 |
+
To preview the docs, run the following command:
|
| 44 |
+
|
| 45 |
+
```bash
|
| 46 |
+
doc-builder preview autotrain docs/source/
|
| 47 |
+
```
|
| 48 |
+
|
| 49 |
+
The docs will be viewable at [http://localhost:5173](http://localhost:5173). You can also preview the docs once you
|
| 50 |
+
have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives.
|
| 51 |
+
|
| 52 |
+
---
|
| 53 |
+
**NOTE**
|
| 54 |
+
|
| 55 |
+
The `preview` command only works with existing doc files. When you add a completely new file, you need to update
|
| 56 |
+
`_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again).
|
| 57 |
+
|
| 58 |
+
---
|
docs/source/_toctree.yml
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- sections:
|
| 2 |
+
- local: index
|
| 3 |
+
title: 🤗 AutoTrain
|
| 4 |
+
- local: cost
|
| 5 |
+
title: How much does it cost?
|
| 6 |
+
- local: support
|
| 7 |
+
title: Get help and support
|
| 8 |
+
- local: faq
|
| 9 |
+
title: Frequently Asked Questions
|
| 10 |
+
title: Getting Started
|
| 11 |
+
- sections:
|
| 12 |
+
- local: quickstart_spaces
|
| 13 |
+
title: Train on Spaces
|
| 14 |
+
- local: quickstart_py
|
| 15 |
+
title: Python SDK
|
| 16 |
+
- local: quickstart
|
| 17 |
+
title: Train Locally
|
| 18 |
+
- local: config
|
| 19 |
+
title: Config File
|
| 20 |
+
title: Quickstart
|
| 21 |
+
- sections:
|
| 22 |
+
- local: tasks/llm_finetuning
|
| 23 |
+
title: LLM Finetuning
|
| 24 |
+
- local: tasks/text_classification_regression
|
| 25 |
+
title: Text Classification/Regression
|
| 26 |
+
- local: tasks/extractive_qa
|
| 27 |
+
title: Extractive QA
|
| 28 |
+
- local: tasks/sentence_transformer
|
| 29 |
+
title: Sentence Transformer
|
| 30 |
+
- local: tasks/image_classification_regression
|
| 31 |
+
title: Image Classification / Regression
|
| 32 |
+
- local: tasks/object_detection
|
| 33 |
+
title: Object Detection
|
| 34 |
+
- local: tasks/seq2seq
|
| 35 |
+
title: Seq2Seq
|
| 36 |
+
- local: tasks/token_classification
|
| 37 |
+
title: Token Classification
|
| 38 |
+
- local: tasks/tabular
|
| 39 |
+
title: Tabular
|
| 40 |
+
title: Tasks
|
| 41 |
+
- sections:
|
| 42 |
+
- local: col_map
|
| 43 |
+
title: Understanding Column Mapping
|
| 44 |
+
- local: autotrain_api
|
| 45 |
+
title: AutoTrain API
|
| 46 |
+
title: Miscellaneous
|