Add non-working partial yzma implementation

This commit is contained in:
Chandler Swift 2025-12-17 20:17:59 -06:00
parent a542f436f3
commit c62d126bdc
3 changed files with 61 additions and 0 deletions

8
go.mod
View file

@ -1,3 +1,11 @@
module git.chandlerswift.com/chandlerswift/svs-services-server module git.chandlerswift.com/chandlerswift/svs-services-server
go 1.25.4 go 1.25.4
require github.com/hybridgroup/yzma v1.3.0
require (
github.com/ebitengine/purego v0.9.1 // indirect
github.com/jupiterrider/ffi v0.5.1 // indirect
golang.org/x/sys v0.38.0 // indirect
)

8
go.sum
View file

@ -0,0 +1,8 @@
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/hybridgroup/yzma v1.3.0 h1:5dw9qEcFEGEJq+tA12Ooa6D/e0PROqv7Ix6VfSR9MQI=
github.com/hybridgroup/yzma v1.3.0/go.mod h1:UUYw+DLlrgtBYm+B+9XD3boB1ZcDpfbAnYHKW3VKKZ4=
github.com/jupiterrider/ffi v0.5.1 h1:l7ANXU+Ex33LilVa283HNaf/sTzCrrht7D05k6T6nlc=
github.com/jupiterrider/ffi v0.5.1/go.mod h1:x7xdNKo8h0AmLuXfswDUBxUsd2OqUP4ekC8sCnsmbvo=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=

45
main.go
View file

@ -2,9 +2,12 @@ package main
import ( import (
"embed" "embed"
"fmt"
"html/template" "html/template"
"net/http" "net/http"
"time" "time"
"github.com/hybridgroup/yzma/pkg/llama"
) )
//go:embed templates/index.html //go:embed templates/index.html
@ -99,5 +102,47 @@ func main() {
// Serve embedded headshots file // Serve embedded headshots file
http.Handle("/headshots/", http.FileServer(http.FS(headshots))) http.Handle("/headshots/", http.FileServer(http.FS(headshots)))
// TODO: derive from something?
libPath := "/nix/store/jml3vhvay9yy94qj8bmmhbf2dhx6q2n1-llama-cpp-7356/lib"
modelFile := "./SmolLM-135M.Q2_K.gguf"
prompt := "Are you ready to go?"
responseLength := int32(128)
llama.Load(libPath)
llama.LogSet(llama.LogSilent())
llama.Init()
model, _ := llama.ModelLoadFromFile(modelFile, llama.ModelDefaultParams())
lctx, _ := llama.InitFromModel(model, llama.ContextDefaultParams())
vocab := llama.ModelGetVocab(model)
// get tokens from the prompt
tokens := llama.Tokenize(vocab, prompt, true, false)
batch := llama.BatchGetOne(tokens)
sampler := llama.SamplerChainInit(llama.SamplerChainDefaultParams())
llama.SamplerChainAdd(sampler, llama.SamplerInitGreedy())
for pos := int32(0); pos < responseLength; pos += batch.NTokens {
llama.Decode(lctx, batch)
token := llama.SamplerSample(sampler, lctx, -1)
if llama.VocabIsEOG(vocab, token) {
fmt.Println()
break
}
buf := make([]byte, 36)
len := llama.TokenToPiece(vocab, token, buf, 0, true)
fmt.Print(string(buf[:len]))
batch = llama.BatchGetOne([]llama.Token{token})
}
fmt.Println()
http.ListenAndServe(":8080", nil) http.ListenAndServe(":8080", nil)
} }