Add/update the quantized ONNX model files and README.md for Transformers.js v3
#1
by
whitphx
HF Staff
- opened
- README.md +4 -4
- onnx/model_bnb4.onnx +3 -0
- onnx/model_q4.onnx +3 -0
- onnx/model_q4f16.onnx +3 -0
- onnx/model_uint8.onnx +3 -0
README.md
CHANGED
|
@@ -7,18 +7,18 @@ https://huggingface.co/caidas/swin2SR-compressed-sr-x4-48 with ONNX weights to b
|
|
| 7 |
|
| 8 |
## Usage (Transformers.js)
|
| 9 |
|
| 10 |
-
If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@
|
| 11 |
```bash
|
| 12 |
-
npm i @
|
| 13 |
```
|
| 14 |
|
| 15 |
**Example:** Upscale an image with `Xenova/swin2SR-compressed-sr-x4-48`.
|
| 16 |
```js
|
| 17 |
-
import { pipeline } from '@
|
| 18 |
|
| 19 |
// Create image-to-image pipeline
|
| 20 |
const upscaler = await pipeline('image-to-image', 'Xenova/swin2SR-compressed-sr-x4-48', {
|
| 21 |
-
|
| 22 |
});
|
| 23 |
|
| 24 |
// Upscale an image
|
|
|
|
| 7 |
|
| 8 |
## Usage (Transformers.js)
|
| 9 |
|
| 10 |
+
If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@huggingface/transformers) using:
|
| 11 |
```bash
|
| 12 |
+
npm i @huggingface/transformers
|
| 13 |
```
|
| 14 |
|
| 15 |
**Example:** Upscale an image with `Xenova/swin2SR-compressed-sr-x4-48`.
|
| 16 |
```js
|
| 17 |
+
import { pipeline } from '@huggingface/transformers';
|
| 18 |
|
| 19 |
// Create image-to-image pipeline
|
| 20 |
const upscaler = await pipeline('image-to-image', 'Xenova/swin2SR-compressed-sr-x4-48', {
|
| 21 |
+
dtype: 'fp32', // Change this line to use the non-quantized version
|
| 22 |
});
|
| 23 |
|
| 24 |
// Upscale an image
|
onnx/model_bnb4.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f168628b6da479b84647300c66a559afe2c5864652446ef812c9c0778e51416d
|
| 3 |
+
size 22821552
|
onnx/model_q4.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:303c18dc74b0b1070074e1b282910db2177c3bc938943a46e616527cc2b8a403
|
| 3 |
+
size 23791680
|
onnx/model_q4f16.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e36ae7be76907934b7d0580086e58e16a9de6d42f21bf9c37aeca0bf5a2da01
|
| 3 |
+
size 15905239
|
onnx/model_uint8.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:23ffaa0119b15ae11db6cc5b1ca86d7b2ff77a140beccbd4ee07e27375a91183
|
| 3 |
+
size 19179579
|