Skip to content

Commit ce81ba1

Browse files
committed
check windows on precommit
1 parent 93be036 commit ce81ba1

File tree

1 file changed

+6
-2
lines changed

1 file changed

+6
-2
lines changed

.github/workflows/causal_lm_cpp.yml

+6-2
Original file line numberDiff line numberDiff line change
@@ -194,14 +194,18 @@ jobs:
194194
shell: cmd
195195
run: |
196196
call w_openvino_toolkit_windows_2024.1.0.15008.f4afc983258_x86_64\setupvars.bat
197-
198-
.\build\Release\beam_search_causal_lm.exe .\TinyLlama-1.1B-Chat-v1.0\ "69" > .\pred.txt
197+
.\build\text_generation\causal_lm\cpp\Release\beam_search_causal_lm.exe .\TinyLlama-1.1B-Chat-v1.0\ "69" > .\pred.txt
198+
echo "sample outputs"
199+
type .\pred.txt
200+
echo "huggingface outputs"
201+
199202
echo import transformers > ref.py
200203
echo predictions = open('pred.txt', 'r').read() >> ref.py
201204
echo tokenizer = transformers.LlamaTokenizer.from_pretrained('TinyLlama/TinyLlama-1.1B-Chat-v1.0') >> ref.py
202205
echo tokenized = tokenizer('69', return_tensors='pt') >> ref.py
203206
echo for beam in transformers.LlamaForCausalLM.from_pretrained('TinyLlama/TinyLlama-1.1B-Chat-v1.0').generate(**tokenized, num_beam_groups=3, num_beams=15, num_return_sequences=15, diversity_penalty=1.0, max_new_tokens=20, early_stopping=False, length_penalty=1.0, no_repeat_ngram_size=9**9, do_sample=False): >> ref.py
204207
echo ref = ': ' + tokenizer.decode(beam[tokenized['input_ids'].numel():], skip_special_tokens=True) >> ref.py
208+
echo print(ref) >> ref.py
205209
echo idx = predictions.find(ref) >> ref.py
206210
echo if -1 == idx: >> ref.py
207211
echo raise RuntimeError(f'Missing "{ref=}" from predictions') >> ref.py

0 commit comments

Comments
 (0)