adhikjoshi commited on
Commit
2573246
1 Parent(s): 9be3389

Upload StableDiffusionXLPipeline

Browse files
README.md CHANGED
@@ -1,10 +1,11 @@
1
  ---
 
2
  license: creativeml-openrail-m
3
  tags:
4
- - modelslab.com
5
- - stable-diffusion-api
6
- - text-to-image
7
- - ultra-realistic
8
  pinned: true
9
  ---
10
 
 
1
  ---
2
+ library_name: diffusers
3
  license: creativeml-openrail-m
4
  tags:
5
+ - modelslab.com
6
+ - stable-diffusion-api
7
+ - text-to-image
8
+ - ultra-realistic
9
  pinned: true
10
  ---
11
 
model_index.json CHANGED
@@ -1,19 +1,15 @@
1
  {
2
- "_class_name": "StableDiffusionPipeline",
3
  "_diffusers_version": "0.29.0.dev0",
4
  "feature_extractor": [
5
  null,
6
  null
7
  ],
 
8
  "image_encoder": [
9
  null,
10
  null
11
  ],
12
- "requires_safety_checker": true,
13
- "safety_checker": [
14
- null,
15
- null
16
- ],
17
  "scheduler": [
18
  "diffusers",
19
  "EulerDiscreteScheduler"
@@ -22,10 +18,18 @@
22
  "transformers",
23
  "CLIPTextModel"
24
  ],
 
 
 
 
25
  "tokenizer": [
26
  "transformers",
27
  "CLIPTokenizer"
28
  ],
 
 
 
 
29
  "unet": [
30
  "diffusers",
31
  "UNet2DConditionModel"
 
1
  {
2
+ "_class_name": "StableDiffusionXLPipeline",
3
  "_diffusers_version": "0.29.0.dev0",
4
  "feature_extractor": [
5
  null,
6
  null
7
  ],
8
+ "force_zeros_for_empty_prompt": true,
9
  "image_encoder": [
10
  null,
11
  null
12
  ],
 
 
 
 
 
13
  "scheduler": [
14
  "diffusers",
15
  "EulerDiscreteScheduler"
 
18
  "transformers",
19
  "CLIPTextModel"
20
  ],
21
+ "text_encoder_2": [
22
+ "transformers",
23
+ "CLIPTextModelWithProjection"
24
+ ],
25
  "tokenizer": [
26
  "transformers",
27
  "CLIPTokenizer"
28
  ],
29
+ "tokenizer_2": [
30
+ "transformers",
31
+ "CLIPTokenizer"
32
+ ],
33
  "unet": [
34
  "diffusers",
35
  "UNet2DConditionModel"
text_encoder_2/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModelWithProjection"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_size": 1280,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5120,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 20,
18
+ "num_hidden_layers": 32,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 1280,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.41.2",
23
+ "vocab_size": 49408
24
+ }
text_encoder_2/model.fp16.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60e6af871df3870ce5dc7f1720e158f2f27806ad5ee50fb12d92c418d7ce5647
3
+ size 1389382176
tokenizer_2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer_2/tokenizer_config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "!",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "49406": {
13
+ "content": "<|startoftext|>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "49407": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "bos_token": "<|startoftext|>",
30
+ "clean_up_tokenization_spaces": true,
31
+ "do_lower_case": true,
32
+ "eos_token": "<|endoftext|>",
33
+ "errors": "replace",
34
+ "model_max_length": 77,
35
+ "pad_token": "!",
36
+ "tokenizer_class": "CLIPTokenizer",
37
+ "unk_token": "<|endoftext|>"
38
+ }
tokenizer_2/vocab.json ADDED
The diff for this file is too large to render. See raw diff