-
Notifications
You must be signed in to change notification settings - Fork 2.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[receiver/filelog] fix record counting with header #35870
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
# Use this changelog template to create an entry for release notes. | ||
|
||
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' | ||
change_type: bug_fix | ||
|
||
# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) | ||
component: receiver/filelog | ||
|
||
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). | ||
note: fix record counting with header | ||
|
||
# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. | ||
issues: [35869] | ||
|
||
# (Optional) One or more lines of additional information to render under the primary note. | ||
# These lines will be padded with 2 spaces and then inserted directly into the document. | ||
# Use pipe (|) for multiline entries. | ||
subtext: | ||
|
||
# If your change doesn't affect end users or the exported elements of any package, | ||
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. | ||
# Optional: The change log or logs in which this entry should be included. | ||
# e.g. '[user]' or '[user, api]' | ||
# Include 'user' if the change is relevant to end users. | ||
# Include 'api' if there is a change to a library API. | ||
# Default: '[user]' | ||
change_logs: [] |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -70,7 +70,7 @@ func (r *Reader) ReadToEnd(ctx context.Context) { | |
// SectionReader can only read a fixed window (from previous offset to EOF). | ||
info, err := r.file.Stat() | ||
if err != nil { | ||
r.set.Logger.Error("Failed to stat", zap.Error(err)) | ||
r.set.Logger.Error("failed to stat", zap.Error(err)) | ||
return | ||
} | ||
currentEOF := info.Size() | ||
|
@@ -80,7 +80,7 @@ func (r *Reader) ReadToEnd(ctx context.Context) { | |
gzipReader, err := gzip.NewReader(io.NewSectionReader(r.file, r.Offset, currentEOF)) | ||
if err != nil { | ||
if !errors.Is(err, io.EOF) { | ||
r.set.Logger.Error("Failed to create gzip reader", zap.Error(err)) | ||
r.set.Logger.Error("failed to create gzip reader", zap.Error(err)) | ||
} | ||
return | ||
} else { | ||
|
@@ -96,7 +96,7 @@ func (r *Reader) ReadToEnd(ctx context.Context) { | |
} | ||
|
||
if _, err := r.file.Seek(r.Offset, 0); err != nil { | ||
r.set.Logger.Error("Failed to seek", zap.Error(err)) | ||
r.set.Logger.Error("failed to seek", zap.Error(err)) | ||
return | ||
} | ||
|
||
|
@@ -106,9 +106,89 @@ func (r *Reader) ReadToEnd(ctx context.Context) { | |
} | ||
}() | ||
|
||
doneReadingFile := r.readHeader(ctx) | ||
if doneReadingFile { | ||
return | ||
} | ||
|
||
// Reset position in file to r.Offest after the header scanner might have moved it past a content token. | ||
if _, err := r.file.Seek(r.Offset, 0); err != nil { | ||
r.set.Logger.Error("failed to seek post-header", zap.Error(err)) | ||
return | ||
} | ||
|
||
r.readContents(ctx) | ||
} | ||
|
||
func (r *Reader) readHeader(ctx context.Context) (doneReadingFile bool) { | ||
if r.headerReader == nil { | ||
r.set.Logger.Debug("no need to read header", zap.Bool("header_finalized", r.HeaderFinalized)) | ||
return false | ||
} | ||
|
||
s := scanner.New(r, r.maxLogSize, r.initialBufferSize, r.Offset, r.splitFunc) | ||
|
||
// Iterate over the tokenized file, emitting entries as we go | ||
// Read the tokens from the file until no more header tokens are found or the end of file is reached. | ||
for { | ||
select { | ||
case <-ctx.Done(): | ||
return true | ||
default: | ||
} | ||
|
||
ok := s.Scan() | ||
if !ok { | ||
if err := s.Error(); err != nil { | ||
r.set.Logger.Error("failed during header scan", zap.Error(err)) | ||
} else { | ||
r.set.Logger.Debug("end of file reached", zap.Bool("delete_at_eof", r.deleteAtEOF)) | ||
if r.deleteAtEOF { | ||
r.delete() | ||
} | ||
} | ||
// Either end of file was reached, or file cannot be scanned. | ||
return true | ||
} | ||
|
||
token, err := r.decoder.Decode(s.Bytes()) | ||
if err != nil { | ||
r.set.Logger.Error("decode header: %w", zap.Error(err)) | ||
r.Offset = s.Pos() // move past the bad token or we may be stuck | ||
continue | ||
} | ||
|
||
err = r.headerReader.Process(ctx, token, r.FileAttributes) | ||
if err != nil { | ||
if errors.Is(err, header.ErrEndOfHeader) { | ||
// End of header reached. | ||
break | ||
} | ||
r.set.Logger.Error("process header: %w", zap.Error(err)) | ||
} | ||
|
||
r.Offset = s.Pos() | ||
} | ||
|
||
// Clean up the header machinery | ||
if err := r.headerReader.Stop(); err != nil { | ||
r.set.Logger.Error("failed to stop header pipeline during finalization", zap.Error(err)) | ||
} | ||
r.headerReader = nil | ||
r.HeaderFinalized = true | ||
r.initialBufferSize = scanner.DefaultBufferSize | ||
|
||
// Switch to the normal split and process functions. | ||
r.splitFunc = r.lineSplitFunc | ||
r.processFunc = r.emitFunc | ||
|
||
return false | ||
} | ||
|
||
func (r *Reader) readContents(ctx context.Context) { | ||
// Create the scanner to read the contents of the file. | ||
s := scanner.New(r, r.maxLogSize, r.initialBufferSize, r.Offset, r.splitFunc) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Note that I recreate the scanner again with As far as I understand the "initial buffer size" is only used in tests and I'm not sure why those tests are actually needed. |
||
|
||
// Iterate over the contents of the file. | ||
for { | ||
select { | ||
case <-ctx.Done(): | ||
|
@@ -119,7 +199,7 @@ func (r *Reader) ReadToEnd(ctx context.Context) { | |
ok := s.Scan() | ||
if !ok { | ||
if err := s.Error(); err != nil { | ||
r.set.Logger.Error("Failed during scan", zap.Error(err)) | ||
r.set.Logger.Error("failed during scan", zap.Error(err)) | ||
} else if r.deleteAtEOF { | ||
r.delete() | ||
} | ||
|
@@ -139,36 +219,11 @@ func (r *Reader) ReadToEnd(ctx context.Context) { | |
} | ||
|
||
err = r.processFunc(ctx, token, r.FileAttributes) | ||
if err == nil { | ||
r.Offset = s.Pos() // successful emit, update offset | ||
continue | ||
} | ||
|
||
if !errors.Is(err, header.ErrEndOfHeader) { | ||
if err != nil { | ||
r.set.Logger.Error("process: %w", zap.Error(err)) | ||
r.Offset = s.Pos() // move past the bad token or we may be stuck | ||
continue | ||
} | ||
|
||
// Clean up the header machinery | ||
if err = r.headerReader.Stop(); err != nil { | ||
r.set.Logger.Error("Failed to stop header pipeline during finalization", zap.Error(err)) | ||
} | ||
r.headerReader = nil | ||
r.HeaderFinalized = true | ||
|
||
// Switch to the normal split and process functions. | ||
r.splitFunc = r.lineSplitFunc | ||
r.processFunc = r.emitFunc | ||
|
||
// Recreate the scanner with the normal split func. | ||
// Do not use the updated offset from the old scanner, as the most recent token | ||
// could be split differently with the new splitter. | ||
if _, err = r.file.Seek(r.Offset, 0); err != nil { | ||
r.set.Logger.Error("Failed to seek post-header", zap.Error(err)) | ||
return | ||
} | ||
s = scanner.New(r, r.maxLogSize, scanner.DefaultBufferSize, r.Offset, r.splitFunc) | ||
r.Offset = s.Pos() | ||
} | ||
} | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This "juggling" of function references could probably be avoided altogether after the refactoring.