bandhit commited on
Commit
33cbc90
1 Parent(s): c080ac7

Added more more comment

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -12,7 +12,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
12
  DESCRIPTION = "# Typhoon 7B via 4-bits Quantization"
13
 
14
  if not torch.cuda.is_available():
15
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU, please clone this space and run on your local device.</p>"
16
 
17
  MAX_MAX_NEW_TOKENS = 2048
18
  DEFAULT_MAX_NEW_TOKENS = 128
 
12
  DESCRIPTION = "# Typhoon 7B via 4-bits Quantization"
13
 
14
  if not torch.cuda.is_available():
15
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU, please clone this space and run on your local device, GPU Ampere architecture with 8 GB VRAM is required.</p>"
16
 
17
  MAX_MAX_NEW_TOKENS = 2048
18
  DEFAULT_MAX_NEW_TOKENS = 128