|
| 1 | +#Requires -Version 5.0 |
| 2 | + |
| 3 | +<# |
| 4 | +.SYNOPSIS |
| 5 | +Counts tokens of a prompt file for a specific model. |
| 6 | +
|
| 7 | +.DESCRIPTION |
| 8 | +This script counts tokens of a prompt file for a specific model. |
| 9 | +
|
| 10 | +.PARAMETER model |
| 11 | +Specifies the path to the GGUF model file. |
| 12 | +
|
| 13 | +.PARAMETER file |
| 14 | +Specifies the path to the prompt text file. |
| 15 | +
|
| 16 | +.PARAMETER prompt |
| 17 | +Specifies the prompt. |
| 18 | +
|
| 19 | +.PARAMETER debug |
| 20 | +Logs the result of the tokenization. |
| 21 | +
|
| 22 | +.EXAMPLE |
| 23 | +.\count_tokens.ps1 -model "C:\models\openchat-3.5-0106.Q5_K_M.gguf" -file "C:\prompts\chat_with_llm.txt" |
| 24 | +
|
| 25 | +.EXAMPLE |
| 26 | +.\count_tokens.ps1 -model "C:\models\openchat-3.5-0106.Q5_K_M.gguf" -prompt "Hello world!" |
| 27 | +
|
| 28 | +.EXAMPLE |
| 29 | +.\count_tokens.ps1 -model "C:\models\openchat-3.5-0106.Q5_K_M.gguf" -prompt "Hello world!" -debug |
| 30 | +#> |
| 31 | + |
| 32 | +Param ( |
| 33 | + |
| 34 | + [Parameter( |
| 35 | + HelpMessage="The path to the GGUF model file.", |
| 36 | + Mandatory=$true |
| 37 | + )] |
| 38 | + [String] |
| 39 | + $model, |
| 40 | + |
| 41 | + [Parameter( |
| 42 | + HelpMessage="The path to the prompt text file." |
| 43 | + )] |
| 44 | + [String] |
| 45 | + $file, |
| 46 | + |
| 47 | + [Parameter( |
| 48 | + HelpMessage="The prompt input." |
| 49 | + )] |
| 50 | + [String] |
| 51 | + $prompt |
| 52 | +) |
| 53 | + |
| 54 | +if ((!$file -and !$prompt) -or ($file -and $prompt)) { |
| 55 | + throw "One prompt text to tokenize is required: Either specify the -file or the -prompt parameter." |
| 56 | +} |
| 57 | + |
| 58 | +$debug = $PSCmdlet.MyInvocation.BoundParameters["Debug"].IsPresent -eq $true |
| 59 | +$verbose = $PSCmdlet.MyInvocation.BoundParameters["Verbose"].IsPresent -eq $true |
| 60 | + |
| 61 | +# We are resolving the absolute path to the llama.cpp project directory. |
| 62 | +$llamaCppPath = Resolve-Path -Path "${PSScriptRoot}\..\vendor\llama.cpp" |
| 63 | + |
| 64 | +$modelPath = Resolve-Path -Path "${model}" |
| 65 | + |
| 66 | +if ($file) { |
| 67 | + $filePath = Resolve-Path -Path "${file}" |
| 68 | +} |
| 69 | + |
| 70 | +if ($debug) { |
| 71 | + |
| 72 | + # For debugging purposes we are logging the default output of the tokenization. |
| 73 | + Invoke-Expression "${llamaCppPath}\build\bin\Release\tokenize.exe `` |
| 74 | + $(if ($modelPath) {"--model '${modelPath}'"}) `` |
| 75 | + $(if ($filePath) {"--file '${filePath}'"} else {"--prompt '${prompt}'"})" |
| 76 | +} |
| 77 | + |
| 78 | +# We are only interested in the numerical token IDs array format like [1, 2, 3]. |
| 79 | +$tokensPythonArrayString = Invoke-Expression "${llamaCppPath}\build\bin\Release\tokenize.exe `` |
| 80 | + --log-disable `` |
| 81 | + --ids `` |
| 82 | + $(if ($modelPath) {"--model '${modelPath}'"}) `` |
| 83 | + $(if ($filePath) {"--file '${filePath}'"} else {"--prompt '${prompt}'"})" |
| 84 | + |
| 85 | +# We are converting the Python array string into an PowerShell array. |
| 86 | +$tokens = "${tokensPythonArrayString}".Trim('[', ']') -split ',' | % { [int]$_ } |
| 87 | + |
| 88 | +Write-Host $tokens.Length |
0 commit comments