ariG23498 HF Staff commited on
Commit
e62dd09
·
verified ·
1 Parent(s): abe9590

Upload utter-project_EuroLLM-1.7B_1.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. utter-project_EuroLLM-1.7B_1.txt +56 -0
utter-project_EuroLLM-1.7B_1.txt ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```CODE:
2
+ # Load model directly
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+
5
+ tokenizer = AutoTokenizer.from_pretrained("utter-project/EuroLLM-1.7B")
6
+ model = AutoModelForCausalLM.from_pretrained("utter-project/EuroLLM-1.7B")
7
+ ```
8
+
9
+ ERROR:
10
+ Traceback (most recent call last):
11
+ File "/tmp/.cache/uv/environments-v2/ba0678a5bc755c9c/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2343, in _from_pretrained
12
+ tokenizer = cls(*init_inputs, **init_kwargs)
13
+ File "/tmp/.cache/uv/environments-v2/ba0678a5bc755c9c/lib/python3.13/site-packages/transformers/models/llama/tokenization_llama_fast.py", line 154, in __init__
14
+ super().__init__(
15
+ ~~~~~~~~~~~~~~~~^
16
+ vocab_file=vocab_file,
17
+ ^^^^^^^^^^^^^^^^^^^^^^
18
+ ...<10 lines>...
19
+ **kwargs,
20
+ ^^^^^^^^^
21
+ )
22
+ ^
23
+ File "/tmp/.cache/uv/environments-v2/ba0678a5bc755c9c/lib/python3.13/site-packages/transformers/tokenization_utils_fast.py", line 108, in __init__
24
+ raise ValueError(
25
+ ...<2 lines>...
26
+ )
27
+ ValueError: Cannot instantiate this tokenizer from a slow version. If it's based on sentencepiece, make sure you have sentencepiece installed.
28
+
29
+ During handling of the above exception, another exception occurred:
30
+
31
+ Traceback (most recent call last):
32
+ File "/tmp/utter-project_EuroLLM-1.7B_1VzFfd1.py", line 19, in <module>
33
+ tokenizer = AutoTokenizer.from_pretrained("utter-project/EuroLLM-1.7B")
34
+ File "/tmp/.cache/uv/environments-v2/ba0678a5bc755c9c/lib/python3.13/site-packages/transformers/models/auto/tokenization_auto.py", line 1140, in from_pretrained
35
+ return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
36
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
37
+ File "/tmp/.cache/uv/environments-v2/ba0678a5bc755c9c/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2097, in from_pretrained
38
+ return cls._from_pretrained(
39
+ ~~~~~~~~~~~~~~~~~~~~^
40
+ resolved_vocab_files,
41
+ ^^^^^^^^^^^^^^^^^^^^^
42
+ ...<9 lines>...
43
+ **kwargs,
44
+ ^^^^^^^^^
45
+ )
46
+ ^
47
+ File "/tmp/.cache/uv/environments-v2/ba0678a5bc755c9c/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2344, in _from_pretrained
48
+ except import_protobuf_decode_error():
49
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^
50
+ File "/tmp/.cache/uv/environments-v2/ba0678a5bc755c9c/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 87, in import_protobuf_decode_error
51
+ raise ImportError(PROTOBUF_IMPORT_ERROR.format(error_message))
52
+ ImportError:
53
+ requires the protobuf library but it was not found in your environment. Check out the instructions on the
54
+ installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones
55
+ that match your environment. Please note that you may need to restart your runtime after installation.
56
+