Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

Go: Excessive memory usage, memory leak

I am very, very memory careful as I have to write programs that need to cope with massive datasets.

Currently my application quickly reaches 32GB of memory, starts swapping, and then gets killed by the system.

I do not understand how this can be since all variables are collectable (in functions and quickly released) except TokensStruct and TokensCount in the Trainer struct. TokensCount is just a uint. TokensStruct is a 1,000,000 row slice of [5]uint32 and string, so that means 20 bytes + string, which we could call a maximum of 50 bytes per record. 50*1000000 = 50MB of memory required. So this script should therefore not use much more than 50MB + overhead + temporary collectable variables in the functions (maybe another 50MB max.) The maximum potential size of TokensStruct is 5,000,000, as this is the size of dictionary, but even then it would be only 250MB of memory. dictionary is a map and apparently uses around 600MB of memory, as that is how the app starts, but this is not an issue because dictionary is only loaded once and never written to again.

Instead it uses 32GB of memory then dies. By the speed that it does this I expect it would happily get to 1TB of memory if it could. The memory appears to increase in a linear fashion with the size of the files being loaded, meaning that it appears to never clear any memory at all. Everything that enters the app is allocated more memory and memory is never freed.

I tried implementing runtime.GC() in case the garbage collection wasn't running often enough, but this made no difference.

Since the memory usage increases in a linear fashion then this would imply that there is a memory leak in GetTokens() or LoadZip(). I don't know how this could be, since they are both functions and only do one task and then close. Or it could be that the tokens variable in Start() is the cause of the leak. Basically it looks like every file that is loaded and parsed is never released from memory, as that is the only way that the memory could fill up in a linear fashion and keep on rising up to 32GB++.

Absolute nightmare! What's wrong with Go? Any way to fix this?

package main

import (
    "bytes"
    "code.google.com/p/go.text/transform"
    "code.google.com/p/go.text/unicode/norm"
    "compress/zlib"
    "encoding/gob"
    "fmt"
    "github.com/AlasdairF/BinSearch"
    "io/ioutil"
    "os"
    "regexp"
    "runtime"
    "strings"
    "unicode"
    "unicode/utf8"
)

type TokensStruct struct {
    binsearch.Key_string
    Value [][5]uint32
}

type Trainer struct {
    Tokens      TokensStruct
    TokensCount uint
}

func checkErr(err error) {
    if err == nil {
        return
    }
    fmt.Println(`Some Error:`, err)
    panic(err)
}

// Local helper function for normalization of UTF8 strings.
func isMn(r rune) bool {
    return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
}

// This map is used by RemoveAccents function to convert non-accented characters.
var transliterations = map[rune]string{'Æ': "E", 'Ð': "D", 'Ł': "L", 'Ø': "OE", 'Þ': "Th", 'ß': "ss", 'æ': "e", 'ð': "d", 'ł': "l", 'ø': "oe", 'þ': "th", 'Œ': "OE", 'œ': "oe"}

//  removeAccentsBytes converts accented UTF8 characters into their non-accented equivalents, from a []byte.
func removeAccentsBytesDashes(b []byte) ([]byte, error) {
    mnBuf := make([]byte, len(b))
    t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
    n, _, err := t.Transform(mnBuf, b, true)
    if err != nil {
        return nil, err
    }
    mnBuf = mnBuf[:n]
    tlBuf := bytes.NewBuffer(make([]byte, 0, len(mnBuf)*2))
    for i, w := 0, 0; i < len(mnBuf); i += w {
        r, width := utf8.DecodeRune(mnBuf[i:])
        if r == '-' {
            tlBuf.WriteByte(' ')
        } else {
            if d, ok := transliterations[r]; ok {
                tlBuf.WriteString(d)
            } else {
                tlBuf.WriteRune(r)
            }
        }
        w = width
    }
    return tlBuf.Bytes(), nil
}

func LoadZip(filename string) ([]byte, error) {
    // Open file for reading
    fi, err := os.Open(filename)
    if err != nil {
        return nil, err
    }
    defer fi.Close()
    // Attach ZIP reader
    fz, err := zlib.NewReader(fi)
    if err != nil {
        return nil, err
    }
    defer fz.Close()
    // Pull
    data, err := ioutil.ReadAll(fz)
    if err != nil {
        return nil, err
    }
    return norm.NFC.Bytes(data), nil // return normalized
}

func getTokens(pibn string) []string {
    var data []byte
    var err error
    data, err = LoadZip(`/storedir/` + pibn + `/text.zip`)
    checkErr(err)
    data, err = removeAccentsBytesDashes(data)
    checkErr(err)
    data = bytes.ToLower(data)
    data = reg2.ReplaceAll(data, []byte("$2")) // remove contractions
    data = reg.ReplaceAllLiteral(data, nil)
    tokens := strings.Fields(string(data))
    return tokens
}

func (t *Trainer) Start() {
    data, err := ioutil.ReadFile(`list.txt`)
    checkErr(err)
    pibns := bytes.Fields(data)
    for i, pibn := range pibns {
        tokens := getTokens(string(pibn))
        t.addTokens(tokens)
        if i%100 == 0 {
            runtime.GC() // I added this just to try to stop the memory craziness, but it makes no difference
        }
    }
}

func (t *Trainer) addTokens(tokens []string) {
    for _, tok := range tokens {
        if _, ok := dictionary[tok]; ok {
            if indx, ok2 := t.Tokens.Find(tok); ok2 {
                ar := t.Tokens.Value[indx]
                ar[0]++
                t.Tokens.Value[indx] = ar
                t.TokensCount++
            } else {
                t.Tokens.AddKeyAt(tok, indx)
                t.Tokens.Value = append(t.Tokens.Value, [5]uint32{0, 0, 0, 0, 0})
                copy(t.Tokens.Value[indx+1:], t.Tokens.Value[indx:])
                t.Tokens.Value[indx] = [5]uint32{1, 0, 0, 0, 0}
                t.TokensCount++
            }
        }
    }
    return
}

func LoadDictionary() {
    dictionary = make(map[string]bool)
    data, err := ioutil.ReadFile(`dictionary`)
    checkErr(err)
    words := bytes.Fields(data)
    for _, word := range words {
        strword := string(word)
        dictionary[strword] = false
    }
}

var reg = regexp.MustCompile(`[^a-z0-9\s]`)
var reg2 = regexp.MustCompile(`\b(c|l|all|dall|dell|nell|sull|coll|pell|gl|agl|dagl|degl|negl|sugl|un|m|t|s|v|d|qu|n|j)'([a-z])`) //contractions
var dictionary map[string]bool

func main() {
    trainer := new(Trainer)
    LoadDictionary()
    trainer.Start()
}
like image 932
Alasdair Avatar asked Nov 01 '22 19:11

Alasdair


1 Answers

Make sure that if you're tokenizing from a large string, to avoid memory pinning. From the comments above, it sounds like the tokens are substrings of a large string.

You may need to add a little extra in your getTokens() function so it guarantees the tokens aren't pinning memory.

func getTokens(...) {
    // near the end of your program
    for i, t := range(tokens) {
        tokens[i] = string([]byte(t))
    }
}

By the way, reading the whole file into memory using ioutil.ReadFile all at once looks dubious. Are you sure you can't use bufio.Scanner?

I'm looking at the code more closely... if you are truly concerned about memory, take advantage of io.Reader. You should try to avoid sucking in the content of a whole file at once. Use io.Reader and the transform "along the grain". The way you're using it now is against the grain of its intent. The whole point of the transform package you're using is to construct flexible Readers that can stream through data.

For example, here's a simplification of what you're doing:

package main

import (
    "bufio"
    "bytes"
    "fmt"
    "unicode/utf8"

    "code.google.com/p/go.text/transform"
)

type AccentsTransformer map[rune]string

func (a AccentsTransformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
    for nSrc < len(src) {
        // If we're at the edge, note this and return.
        if !atEOF && !utf8.FullRune(src[nSrc:]) {
            err = transform.ErrShortSrc
            return
        }
        r, width := utf8.DecodeRune(src[nSrc:])
        if r == utf8.RuneError && width == 1 {
            err = fmt.Errorf("Decoding error")
            return
        }
        if d, ok := a[r]; ok {
            if nDst+len(d) > len(dst) {
                err = transform.ErrShortDst
                return
            }
            copy(dst[nDst:], d)
            nSrc += width
            nDst += len(d)
            continue
        }

        if nDst+width > len(dst) {
            err = transform.ErrShortDst
            return
        }
        copy(dst[nDst:], src[nSrc:nSrc+width])
        nDst += width
        nSrc += width
    }
    return
}

func main() {
    transliterations := AccentsTransformer{'Æ': "E", 'Ø': "OE"}
    testString := "cØØl beÆns"
    b := transform.NewReader(bytes.NewBufferString(testString), transliterations)
    scanner := bufio.NewScanner(b)
    scanner.Split(bufio.ScanWords)
    for scanner.Scan() {
        fmt.Println("token:", scanner.Text())
    }
}

It becomes really easy then to chain transformers together. So, for example, if we wanted to remove all hyphens from the input stream, it's just a matter of using transform.Chain appropriately:

func main() {
    transliterations := AccentsTransformer{'Æ': "E", 'Ø': "OE"}
    removeHyphens := transform.RemoveFunc(func(r rune) bool {
        return r == '-'
    })
    allTransforms := transform.Chain(transliterations, removeHyphens)

    testString := "cØØl beÆns - the next generation"
    b := transform.NewReader(bytes.NewBufferString(testString), allTransforms)
    scanner := bufio.NewScanner(b)
    scanner.Split(bufio.ScanWords)
    for scanner.Scan() {
        fmt.Println("token:", scanner.Text())
    }
}

I have not exhaustively tested the code above, so please don't just copy-and-paste it without sufficient tests. :P I just cooked it up fast. But this kind of approach --- avoiding whole-file reading --- will scale better because it will read the file in chunks.

like image 130
dyoo Avatar answered Nov 13 '22 23:11

dyoo