pip / README.md
lysandre's picture
lysandre HF staff
Upload dataset
5353b21 verified
|
raw
history blame
No virus
1.89 kB
---
dataset_info:
features:
- name: day
dtype: string
- name: num_downloads
dtype: int64
splits:
- name: accelerate
num_bytes: 27302
num_examples: 1241
- name: datasets
num_bytes: 27302
num_examples: 1241
- name: diffusers
num_bytes: 16676
num_examples: 758
- name: evaluate
num_bytes: 19272
num_examples: 876
- name: gradio
num_bytes: 30668
num_examples: 1394
- name: huggingface_hub
num_bytes: 28182
num_examples: 1281
- name: optimum
num_bytes: 22286
num_examples: 1013
- name: peft
num_bytes: 11528
num_examples: 524
- name: pytorch_image_models
num_bytes: 30668
num_examples: 1394
- name: safetensors
num_bytes: 12738
num_examples: 579
- name: tokenizers
num_bytes: 30668
num_examples: 1394
- name: transformers
num_bytes: 31350
num_examples: 1425
- name: sentence_transformers
num_bytes: 5654
num_examples: 257
download_size: 171360
dataset_size: 294294
configs:
- config_name: default
data_files:
- split: accelerate
path: data/accelerate-*
- split: datasets
path: data/datasets-*
- split: diffusers
path: data/diffusers-*
- split: evaluate
path: data/evaluate-*
- split: gradio
path: data/gradio-*
- split: huggingface_hub
path: data/huggingface_hub-*
- split: optimum
path: data/optimum-*
- split: peft
path: data/peft-*
- split: pytorch_image_models
path: data/pytorch_image_models-*
- split: safetensors
path: data/safetensors-*
- split: tokenizers
path: data/tokenizers-*
- split: transformers
path: data/transformers-*
- split: sentence_transformers
path: data/sentence_transformers-*
---
# Dataset Card for "pip"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)