Skip to content
32 changes: 32 additions & 0 deletions changelog/fragments/1767789965-upload-limit.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Kind can be one of:
# - breaking-change: a change to previously-documented behavior
# - deprecation: functionality that is being removed in a later release
# - bug-fix: fixes a problem in a previous version
# - enhancement: extends functionality but does not break or fix existing behavior
# - feature: new functionality
# - known-issue: problems that we are aware of in a given version
# - security: impacts on the security of a product or a user’s deployment.
# - upgrade: important information for someone upgrading from a prior version
# - other: does not fit into any of the other categories
kind: security

# Change summary; a 80ish characters long description of the change.
summary: Enforce maximum limits on UploadBegin and UploadComplete request body sizes

# Long description; in case the summary is not enough to describe the change
# this field accommodate a description without length limits.
# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment.
#description:

# Affected component; usually one of "elastic-agent", "fleet-server", "filebeat", "metricbeat", "auditbeat", "all", etc.
component: fleet-server

# PR URL; optional; the PR number that added the changeset.
# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added.
# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number.
# Please provide it if you are adding a fragment for a different PR.
#pr: https://github.com/owner/repo/1234

# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of).
# If not present is automatically filled by the tooling with the issue linked to the PR number.
#issue: https://github.com/owner/repo/1234
9 changes: 9 additions & 0 deletions internal/pkg/api/error.go
Original file line number Diff line number Diff line change
Expand Up @@ -505,6 +505,15 @@ func NewHTTPErrResp(err error) HTTPErrResp {
zerolog.InfoLevel,
},
},
{
target: uploader.ErrPayloadSizeTooLarge,
meta: HTTPErrResp{
StatusCode: http.StatusRequestEntityTooLarge,
Error: "ErrPayloadSizeTooLarge",
Message: "the request body exceeds the maximum allowed size",
Level: zerolog.InfoLevel,
},
},
}

for _, e := range errTable {
Expand Down
22 changes: 22 additions & 0 deletions internal/pkg/api/handleUpload.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ var (

// FIXME Should we use the structs in openapi.gen.go instead of the generic ones? Will need to rework the uploader if we do
type UploadT struct {
cfg *config.Server
bulker bulk.Bulk
chunkClient *elasticsearch.Client
cache cache.Cache
Expand All @@ -53,6 +54,7 @@ type UploadT struct {

func NewUploadT(cfg *config.Server, bulker bulk.Bulk, chunkClient *elasticsearch.Client, cache cache.Cache) *UploadT {
return &UploadT{
cfg: cfg,
chunkClient: chunkClient,
bulker: bulker,
cache: cache,
Expand All @@ -71,6 +73,11 @@ func (ut *UploadT) validateUploadBeginRequest(ctx context.Context, reader io.Rea
if errors.Is(err, io.EOF) {
return nil, "", fmt.Errorf("%w: %w", ErrFileInfoBodyRequired, err)
}

var maxBytesErr *http.MaxBytesError
if errors.As(err, &maxBytesErr) {
return nil, "", uploader.ErrPayloadSizeTooLarge
}
return nil, "", &BadRequestErr{msg: "unable to decode upload begin request", nextErr: err}
}

Expand All @@ -83,6 +90,11 @@ func (ut *UploadT) validateUploadBeginRequest(ctx context.Context, reader io.Rea
}

func (ut *UploadT) handleUploadBegin(_ zerolog.Logger, w http.ResponseWriter, r *http.Request) error {
// ensure body is not excessively large to prevent memory exhaustion DoS attach
if ut.cfg.Limits.UploadStartLimit.MaxBody > 0 {
r.Body = http.MaxBytesReader(w, r.Body, ut.cfg.Limits.UploadStartLimit.MaxBody)
}

// decode early to match agentID in the payload
payload, agentID, err := ut.validateUploadBeginRequest(r.Context(), r.Body)
if err != nil {
Expand Down Expand Up @@ -179,6 +191,11 @@ func (ut *UploadT) validateUploadCompleteRequest(r *http.Request, id string) (st

var req UploadCompleteRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
var maxBytesErr *http.MaxBytesError
if errors.As(err, &maxBytesErr) {
return "", uploader.ErrPayloadSizeTooLarge
}

return "", &BadRequestErr{msg: "unable to decode upload complete request"}
}

Expand All @@ -190,6 +207,11 @@ func (ut *UploadT) validateUploadCompleteRequest(r *http.Request, id string) (st
}

func (ut *UploadT) handleUploadComplete(_ zerolog.Logger, w http.ResponseWriter, r *http.Request, uplID string) error {
// ensure body is not excessively large to prevent memory exhaustion DoS attach
if ut.cfg.Limits.UploadEndLimit.MaxBody > 0 {
r.Body = http.MaxBytesReader(w, r.Body, ut.cfg.Limits.UploadEndLimit.MaxBody)
}

hash, err := ut.validateUploadCompleteRequest(r, uplID)
if err != nil {
return err
Expand Down
82 changes: 82 additions & 0 deletions internal/pkg/api/handleUpload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"testing"
"time"

"github.com/docker/go-units"
"github.com/elastic/fleet-server/v7/internal/pkg/apikey"
"github.com/elastic/fleet-server/v7/internal/pkg/bulk"
"github.com/elastic/fleet-server/v7/internal/pkg/cache"
Expand Down Expand Up @@ -107,6 +108,10 @@ func TestUploadBeginValidation(t *testing.T) {
"src": "agent"
}`,
},
{
"UploadBegin request payload that is too large is rejected", http.StatusRequestEntityTooLarge, "the request body exceeds the maximum allowed size",
generateLargePayload(2 * units.KB),
},
{"file name is required", http.StatusBadRequest, "file.name is required",
`{
"file": {
Expand Down Expand Up @@ -1025,6 +1030,61 @@ func TestUploadCompleteBadRequests(t *testing.T) {
assert.Equal(t, rec.Body.String(), "{\"statusCode\":400,\"error\":\"BadRequest\",\"message\":\"Bad request: unable to decode upload complete request\"}")
}

func TestUploadCompletePayloadSize(t *testing.T) {
mockUploadID := "abc123"

hr, _, fakebulk, _ := prepareUploaderMock(t)
mockInfo := file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: file.MaxChunkSize,
Total: file.MaxChunkSize * 3,
Count: 3,
Start: time.Now().Add(-time.Minute),
Status: file.StatusProgress,
Source: "agent",
AgentID: "foo",
ActionID: "bar",
}

mockUploadedFile(fakebulk, mockInfo, []file.ChunkInfo{
{
Last: false,
BID: mockInfo.DocID,
Size: int(file.MaxChunkSize),
Pos: 0,
SHA2: "0c4a81b85a6b7ff00bde6c32e1e8be33b4b793b3b7b5cb03db93f77f7c9374d1", // sample value
},
{
Last: true,
BID: mockInfo.DocID,
Size: int(file.MaxChunkSize),
Pos: 1,
SHA2: "0c4a81b85a6b7ff00bde6c32e1e8be33b4b793b3b7b5cb03db93f77f7c9374d1", // sample value
},
{
Last: true,
BID: mockInfo.DocID,
Size: int(file.MaxChunkSize),
Pos: 2,
SHA2: "0c4a81b85a6b7ff00bde6c32e1e8be33b4b793b3b7b5cb03db93f77f7c9374d1", // sample value
},
})

longHash := strings.Repeat("a", 2*units.KB)
rec := httptest.NewRecorder()
req := httptest.NewRequest(
http.MethodPost,
"/api/fleet/uploads/"+mockUploadID,
strings.NewReader(`{"transithash": {"sha256": "`+longHash+`"}}`),
)

hr.ServeHTTP(rec, req)

assert.Equal(t, http.StatusRequestEntityTooLarge, rec.Code)
assert.Equal(t, rec.Body.String(), `{"statusCode":413,"error":"ErrPayloadSizeTooLarge","message":"the request body exceeds the maximum allowed size"}`)
}

/*
Helpers and mocks
*/
Expand Down Expand Up @@ -1065,9 +1125,15 @@ func configureUploaderMock(t *testing.T, fileSize *uint64) (http.Handler, apiSer
c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000})
require.NoError(t, err)

cfg := config.Server{Limits: config.ServerLimits{
UploadStartLimit: config.Limit{MaxBody: 1 * units.KB},
UploadEndLimit: config.Limit{MaxBody: 1 * units.KB},
}}

// create an apiServer with an UploadT that will handle the incoming requests
si := apiServer{
ut: &UploadT{
cfg: &cfg,
bulker: fakebulk,
chunkClient: es,
cache: c,
Expand Down Expand Up @@ -1234,3 +1300,19 @@ func size_ptr(x int) *uint64 {
y := uint64(x) //nolint:gosec // disable G115
return &y
}

func generateLargePayload(paddingSize int) string {
payload := `{
"file": {
"size": 1,
"name": "foo.png",
"mime_type": "image/png"
},
"agent_id": "foo",
"action_id": "123",
"src": "agent",
"pad": "%s"
}`
padding := strings.Repeat("a", paddingSize)
return fmt.Sprintf(payload, padding)
}
2 changes: 2 additions & 0 deletions internal/pkg/file/uploader/upload.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ var (
ErrFileSizeRequired = errors.New("file.size is required")
ErrInvalidFileSize = errors.New("invalid filesize")
ErrFieldRequired = errors.New("field required")

ErrPayloadSizeTooLarge = errors.New("payload size too large")
)

type Uploader struct {
Expand Down