simplify safetensors reading

This commit is contained in:
Michael Yang
2024-05-20 09:47:01 -07:00
parent 3591bbe56f
commit 171eb040fc
6 changed files with 49 additions and 81 deletions

View File

@ -119,7 +119,7 @@ func (llm *ggla) decode(rs io.ReadSeeker) error {
t.Offset = uint64(offset)
if _, err := rs.Seek(int64(t.size()), io.SeekCurrent); err != nil {
if _, err := rs.Seek(int64(t.Size()), io.SeekCurrent); err != nil {
return err
}

View File

@ -106,7 +106,7 @@ type Layer map[string]*Tensor
func (l Layer) size() (size uint64) {
for _, t := range l {
size += t.size()
size += t.Size()
}
return size
@ -185,7 +185,7 @@ func (t Tensor) parameters() uint64 {
return count
}
func (t Tensor) size() uint64 {
func (t Tensor) Size() uint64 {
return t.parameters() * t.typeSize() / t.blockSize()
}
@ -288,7 +288,7 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
// mixtral 8x22b
ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32))
partialOffload = max(
3*ffnGateExpsWeight.size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV),
3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV),
4*(context*batch*heads+context*embedding/heads*headsKV+batch*1024+embedding/heads*headsKV*batch),
)
} else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok {

View File

@ -241,11 +241,11 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error {
}
for _, tensor := range llm.tensors {
if _, err := rs.Seek(int64(tensor.size()), io.SeekCurrent); err != nil {
if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil {
return err
}
padding := llm.padding(int64(tensor.size()), int64(alignment))
padding := llm.padding(int64(tensor.Size()), int64(alignment))
if _, err := rs.Seek(padding, io.SeekCurrent); err != nil {
return err
}