By Ohm Patel
当今世界的任何计算机系统每天都会生成大量的日志或数据。随着系统的增长,将调试数据存储到数据库中是不可行的,因为它们是不可变的,而且只用于分析和故障解决目的。因此,组织倾向于将其存储在文件中,这些文件驻留在本地磁盘存储中。
我们将使用Golang从16 GB的.txt或.log文件中提取数百万行日志。
Lets Code…! 开始编码...!
让我们先打开文件。我们将使用标准的Go os.File用于任何文件IO。
f, err := os.Open(fileName) if err != nil {
fmt.Println("cannot able to read the file", err)
return
}// UPDATE: close after checking error
defer file.Close() //Do not forget to close the file
一旦文件被打开,我们有以下两个选项继续进行
- 逐行读取文件,这有助于减少对内存的压力,但将花费更多的时间在IO。
- 一次将整个文件读入内存并处理该文件,这会消耗更多内存,但会显著增加时间。
当文件太大时,比如16GB,我们无法将整个文件加载到内存中。但是第一个选项对我们来说也是不可行的,因为我们希望在几秒钟内处理文件。
但你猜怎么着,还有第三种选择。瞧…!在将整个文件加载到内存时,我们将使用bufio.NewReader()块加载文件,在Go中可用。
r := bufio.NewReader(f)for {buf := make([]byte,4*1024) //the chunk sizen, err := r.Read(buf) //loading chunk into buffer
buf = buf[:n]if n == 0 {
if err != nil {
fmt.Println(err)
break
}
if err == io.EOF {
break
}
return err
}
}
一旦我们有了数据块,我们将fork一个线程,即Go例程,来与其他数据块并发地处理每个数据块。以上代码将更改为-
//sync pools to reuse the memory and decrease the preassure on //Garbage CollectorlinesPool := sync.Pool{New: func() interface{} {
lines := make([]byte, 500*1024)
return lines
}}stringPool := sync.Pool{New: func() interface{} {
lines := ""
return lines
}}slicePool := sync.Pool{New: func() interface{} {
lines := make([]string, 100)
return lines
}}r := bufio.NewReader(f)var wg sync.WaitGroup //wait group to keep track off all threadsfor {
buf := linesPool.Get().([]byte)
n, err := r.Read(buf)
buf = buf[:n]if n == 0 {
if err != nil {
fmt.Println(err)
break
}
if err == io.EOF {
break
}
return err
}nextUntillNewline, err := r.ReadBytes('\n')//read entire line
if err != io.EOF {
buf = append(buf, nextUntillNewline...)
}
wg.Add(1)
go func() {
//process each chunk concurrently
//start -> log start time, end -> log end time
ProcessChunk(buf, &linesPool, &stringPool, &slicePool, start, end)wg.Done()
}()
}wg.Wait()}
上面的代码引入了两个新的优化:-
- sync.Pool是一个强大的实例池,可以重用它来减少垃圾收集器的压力。我们将重新使用分配给各个片的内存。它帮助我们减少内存消耗,使我们的工作速度显著加快。
- 帮助我们并行处理缓冲区块的 Go Routines ,大大提高了处理速度。
现在让我们实现ProcessChunk函数,它将处理日志行,这些日志行是这种格式的
2020-01-31T20:12:38.1234Z, Some Field, Other Field, And so on, Till new line,...\n
我们将根据命令行提供的时间戳提取日志。
func ProcessChunk(chunk []byte, linesPool *sync.Pool, stringPool *sync.Pool, slicePool *sync.Pool, start time.Time, end time.Time) {//another wait group to process every chunk further
var wg2 sync.WaitGrouplogs := stringPool.Get().(string)logs = string(chunk)linesPool.Put(chunk) //put back the chunk in pool//split the string by "\n", so that we have slice of logs
logsSlice := strings.Split(logs, "\n")stringPool.Put(logs) //put back the string poolchunkSize := 100 //process the bunch of 100 logs in threadn := len(logsSlice)noOfThread := n / chunkSizeif n%chunkSize != 0 { //check for overflow
noOfThread++
}length := len(logsSlice)//traverse the chunk
for i := 0; i < length; i += chunkSize {
wg2.Add(1)//process each chunk in saperate chunk
go func(s int, e int) {
for i:= s; i<e;i++{
text := logsSlice[i]if len(text) == 0 {
continue
}
logParts := strings.SplitN(text, ",", 2)
logCreationTimeString := logParts[0]
logCreationTime, err := time.Parse("2006-01- 02T15:04:05.0000Z", logCreationTimeString)if err != nil {
fmt.Printf("\n Could not able to parse the time :%s for log : %v", logCreationTimeString, text)
return
}// check if log's timestamp is inbetween our desired period
if logCreationTime.After(start) && logCreationTime.Before(end) {
fmt.Println(text)
}
}
textSlice = nil
wg2.Done()
}(i*chunkSize, int(math.Min(float64((i+1)*chunkSize), float64(len(logsSlice)))))
//passing the indexes for processing}
wg2.Wait() //wait for a chunk to finish
logsSlice = nil}
上面的代码使用16GB的日志文件进行基准测试。
提取日志所需的时间约为25秒。
下面是整个项目的代码.
func main() {
s := time.Now()
args := os.Args[1:]
if len(args) != 6 { // for format LogExtractor.exe -f "From Time" -t "To Time" -i "Log file directory location"
fmt.Println("Please give proper command line arguments")
return
}
startTimeArg := args[1]
finishTimeArg := args[3]
fileName := args[5]
file, err := os.Open(fileName)
if err != nil {
fmt.Println("cannot able to read the file", err)
return
}
defer file.Close() //close after checking err
queryStartTime, err := time.Parse("2006-01-02T15:04:05.0000Z", startTimeArg)
if err != nil {
fmt.Println("Could not able to parse the start time", startTimeArg)
return
}
queryFinishTime, err := time.Parse("2006-01-02T15:04:05.0000Z", finishTimeArg)
if err != nil {
fmt.Println("Could not able to parse the finish time", finishTimeArg)
return
}
filestat, err := file.Stat()
if err != nil {
fmt.Println("Could not able to get the file stat")
return
}
fileSize := filestat.Size()
offset := fileSize - 1
lastLineSize := 0
for {
b := make([]byte, 1)
n, err := file.ReadAt(b, offset)
if err != nil {
fmt.Println("Error reading file ", err)
break
}
char := string(b[0])
if char == "\n" {
break
}
offset--
lastLineSize += n
}
lastLine := make([]byte, lastLineSize)
_, err = file.ReadAt(lastLine, offset+1)
if err != nil {
fmt.Println("Could not able to read last line with offset", offset, "and lastline size", lastLineSize)
return
}
logSlice := strings.SplitN(string(lastLine), ",", 2)
logCreationTimeString := logSlice[0]
lastLogCreationTime, err := time.Parse("2006-01-02T15:04:05.0000Z", logCreationTimeString)
if err != nil {
fmt.Println("can not able to parse time : ", err)
}
if lastLogCreationTime.After(queryStartTime) && lastLogCreationTime.Before(queryFinishTime) {
Process(file, queryStartTime, queryFinishTime)
}
fmt.Println("\nTime taken - ", time.Since(s))
}
func Process(f *os.File, start time.Time, end time.Time) error {
linesPool := sync.Pool{New: func() interface{} {
lines := make([]byte, 250*1024)
return lines
}}
stringPool := sync.Pool{New: func() interface{} {
lines := ""
return lines
}}
r := bufio.NewReader(f)
var wg sync.WaitGroup
for {
buf := linesPool.Get().([]byte)
n, err := r.Read(buf)
buf = buf[:n]
if n == 0 {
if err != nil {
fmt.Println(err)
break
}
if err == io.EOF {
break
}
return err
}
nextUntillNewline, err := r.ReadBytes('\n')
if err != io.EOF {
buf = append(buf, nextUntillNewline...)
}
wg.Add(1)
go func() {
ProcessChunk(buf, &linesPool, &stringPool, start, end)
wg.Done()
}()
}
wg.Wait()
return nil
}
func ProcessChunk(chunk []byte, linesPool *sync.Pool, stringPool *sync.Pool, start time.Time, end time.Time) {
var wg2 sync.WaitGroup
logs := stringPool.Get().(string)
logs = string(chunk)
linesPool.Put(chunk)
logsSlice := strings.Split(logs, "\n")
stringPool.Put(logs)
chunkSize := 300
n := len(logsSlice)
noOfThread := n / chunkSize
if n%chunkSize != 0 {
noOfThread++
}
for i := 0; i < (noOfThread); i++ {
wg2.Add(1)
go func(s int, e int) {
defer wg2.Done() //to avaoid deadlocks
for i := s; i < e; i++ {
text := logsSlice[i]
if len(text) == 0 {
continue
}
logSlice := strings.SplitN(text, ",", 2)
logCreationTimeString := logSlice[0]
logCreationTime, err := time.Parse("2006-01-02T15:04:05.0000Z", logCreationTimeString)
if err != nil {
fmt.Printf("\n Could not able to parse the time :%s for log : %v", logCreationTimeString, text)
return
}
if logCreationTime.After(start) && logCreationTime.Before(end) {
//fmt.Println(text)
}
}
}(i*chunkSize, int(math.Min(float64((i+1)*chunkSize), float64(len(logsSlice)))))
}
wg2.Wait()
logsSlice = nil
}
你可以通过ohm.patel1997@gmail.com联系我。
任何疑问和改进是最受欢迎的。😉
你也可以发表评论下面进一步怀疑和赞扬总是受欢迎的。😁🙈
有疑问加站长微信联系(非本文作者)