File size: 1,889 Bytes
8ad7857
 
 
 
 
 
 
 
88519b2
5353b21
 
88519b2
5353b21
 
88519b2
5353b21
 
3c9285e
5353b21
 
88519b2
5353b21
 
3c9285e
5353b21
 
88519b2
5353b21
 
88519b2
5353b21
 
3c9285e
5353b21
 
88519b2
5353b21
 
5cb052e
5353b21
 
86b2218
5353b21
 
e17f7fb
5353b21
 
 
 
d44a1b8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e17f7fb
 
8ad7857
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
---
dataset_info:
  features:
  - name: day
    dtype: string
  - name: num_downloads
    dtype: int64
  splits:
  - name: accelerate
    num_bytes: 27302
    num_examples: 1241
  - name: datasets
    num_bytes: 27302
    num_examples: 1241
  - name: diffusers
    num_bytes: 16676
    num_examples: 758
  - name: evaluate
    num_bytes: 19272
    num_examples: 876
  - name: gradio
    num_bytes: 30668
    num_examples: 1394
  - name: huggingface_hub
    num_bytes: 28182
    num_examples: 1281
  - name: optimum
    num_bytes: 22286
    num_examples: 1013
  - name: peft
    num_bytes: 11528
    num_examples: 524
  - name: pytorch_image_models
    num_bytes: 30668
    num_examples: 1394
  - name: safetensors
    num_bytes: 12738
    num_examples: 579
  - name: tokenizers
    num_bytes: 30668
    num_examples: 1394
  - name: transformers
    num_bytes: 31350
    num_examples: 1425
  - name: sentence_transformers
    num_bytes: 5654
    num_examples: 257
  download_size: 171360
  dataset_size: 294294
configs:
- config_name: default
  data_files:
  - split: accelerate
    path: data/accelerate-*
  - split: datasets
    path: data/datasets-*
  - split: diffusers
    path: data/diffusers-*
  - split: evaluate
    path: data/evaluate-*
  - split: gradio
    path: data/gradio-*
  - split: huggingface_hub
    path: data/huggingface_hub-*
  - split: optimum
    path: data/optimum-*
  - split: peft
    path: data/peft-*
  - split: pytorch_image_models
    path: data/pytorch_image_models-*
  - split: safetensors
    path: data/safetensors-*
  - split: tokenizers
    path: data/tokenizers-*
  - split: transformers
    path: data/transformers-*
  - split: sentence_transformers
    path: data/sentence_transformers-*
---
# Dataset Card for "pip"

[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)