diff --git a/SHELL_FEATURES.md b/SHELL_FEATURES.md index 2eb5d070..6f56ebf6 100644 --- a/SHELL_FEATURES.md +++ b/SHELL_FEATURES.md @@ -14,7 +14,7 @@ Blocked features are rejected before execution with exit code 2. - ✅ `false` — return exit code 1 - ✅ `grep [-EFGivclLnHhoqsxw] [-e PATTERN] [-m NUM] [-A NUM] [-B NUM] [-C NUM] PATTERN [FILE]...` — print lines that match patterns; uses RE2 regex engine (linear-time, no backtracking) - ✅ `head [-n N|-c N] [-q|-v] [FILE]...` — output the first part of files (default: first 10 lines); `-z`/`--zero-terminated` and `--follow` are rejected -- ✅ `ls [-1aAdFhlpRrSt] [FILE]...` — list directory contents +- ✅ `ls [-1aAdFhlpRrSt] [--offset N] [--limit N] [FILE]...` — list directory contents; `--offset`/`--limit` are non-standard pagination flags (single-directory only, silently ignored with `-R` or multiple arguments, capped at 1,000 entries per call); offset operates on filesystem order (not sorted order) for O(n) memory - ✅ `printf FORMAT [ARGUMENT]...` — format and print data to stdout; supports `%s`, `%b`, `%c`, `%d`, `%i`, `%o`, `%u`, `%x`, `%X`, `%e`, `%E`, `%f`, `%F`, `%g`, `%G`, `%%`; format reuse for excess arguments; `%n` rejected (security risk); `-v` rejected - ✅ `strings [-a] [-n MIN] [-t o|d|x] [-o] [-f] [-s SEP] [FILE]...` — print printable character sequences in files (default min length 4); offsets via `-t`/`-o`; filename prefix via `-f`; custom separator via `-s` - ✅ `tail [-n N|-c N] [-q|-v] [-z] [FILE]...` — output the last part of files (default: last 10 lines); supports `+N` offset mode; `-f`/`--follow` is rejected diff --git a/interp/allowed_paths.go b/interp/allowed_paths.go index 44270a1c..a9cef4dc 100644 --- a/interp/allowed_paths.go +++ b/interp/allowed_paths.go @@ -7,6 +7,7 @@ package interp import ( "context" + "errors" "fmt" "io" "io/fs" @@ -192,6 +193,98 @@ func (s *pathSandbox) readDir(ctx context.Context, path string) ([]fs.DirEntry, return entries, nil } +// readDirLimited reads directory entries, skipping the first offset entries +// and returning up to maxRead entries sorted by name within the read window. +// Returns (entries, truncated, error). When truncated is true, the directory +// contained more entries beyond the returned set. +// +// The offset skips raw directory entries during reading (before sorting). +// This means offset does NOT correspond to positions in a sorted listing — +// pages may overlap or miss entries. This is an acceptable tradeoff to achieve +// O(n) memory regardless of offset value, where n = min(maxRead, entries). +func (s *pathSandbox) readDirLimited(ctx context.Context, path string, offset, maxRead int) ([]fs.DirEntry, bool, error) { + absPath := toAbs(path, HandlerCtx(ctx).Dir) + root, relPath, ok := s.resolve(absPath) + if !ok { + return nil, false, &os.PathError{Op: "readdir", Path: path, Err: os.ErrPermission} + } + f, err := root.Open(relPath) + if err != nil { + return nil, false, portablePathError(err) + } + defer f.Close() + + // Defense-in-depth: clamp non-positive values. + if offset < 0 { + offset = 0 + } + if maxRead <= 0 { + return nil, false, nil + } + + const batchSize = 256 + entries, truncated, lastErr := collectDirEntries(func(n int) ([]fs.DirEntry, error) { + return f.ReadDir(n) + }, batchSize, offset, maxRead) + + if lastErr != nil { + return entries, truncated, portablePathError(lastErr) + } + return entries, truncated, nil +} + +// collectDirEntries reads directory entries in batches using readBatch, +// skipping the first offset entries and collecting up to maxRead entries. +// Returns (entries, truncated, lastErr). Entries are sorted by name. +// +// NOTE: We intentionally truncate before reading all entries. For directories +// larger than maxRead, the returned entries are sorted within the read window +// but may not be the globally-smallest names. Reading all entries to get +// globally-correct sorting would defeat the DoS protection — a directory with +// millions of files would OOM or stall. The truncation warning communicates +// that output is incomplete. +func collectDirEntries(readBatch func(n int) ([]fs.DirEntry, error), batchSize, offset, maxRead int) ([]fs.DirEntry, bool, error) { + entries := make([]fs.DirEntry, 0, maxRead) + truncated := false + skipped := 0 + var lastErr error + + for { + batch, err := readBatch(batchSize) + for _, e := range batch { + if skipped < offset { + skipped++ + continue + } + entries = append(entries, e) + } + // Capture non-EOF errors before checking truncation, since + // ReadDir can return partial entries alongside an error. + if err != nil && !errors.Is(err, io.EOF) { + lastErr = err + } + if len(entries) > maxRead { + truncated = true + break + } + if err != nil { + break + } + } + + // Sort collected entries by name. + slices.SortFunc(entries, func(a, b fs.DirEntry) int { + return strings.Compare(a.Name(), b.Name()) + }) + + // Trim to exactly maxRead if we overshot. + if truncated && len(entries) > maxRead { + entries = entries[:maxRead] + } + + return entries, truncated, lastErr +} + // stat implements the restricted stat policy. It uses os.Root.Stat for // metadata-only access — no file descriptor is opened, so it works on // unreadable files and does not block on special files (e.g. FIFOs). diff --git a/interp/allowed_paths_internal_test.go b/interp/allowed_paths_internal_test.go index 5fbb9d90..fe34cf51 100644 --- a/interp/allowed_paths_internal_test.go +++ b/interp/allowed_paths_internal_test.go @@ -9,18 +9,41 @@ import ( "bytes" "context" "errors" + "fmt" + "io" + "io/fs" "os" "os/exec" "path/filepath" "runtime" "strings" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "mvdan.cc/sh/v3/syntax" ) +// fakeDirEntry is a minimal fs.DirEntry for testing collectDirEntries. +type fakeDirEntry struct { + name string +} + +func (f fakeDirEntry) Name() string { return f.name } +func (f fakeDirEntry) IsDir() bool { return false } +func (f fakeDirEntry) Type() fs.FileMode { return 0 } +func (f fakeDirEntry) Info() (fs.FileInfo, error) { return fakeFileInfo{name: f.name}, nil } + +type fakeFileInfo struct{ name string } + +func (f fakeFileInfo) Name() string { return f.name } +func (f fakeFileInfo) Size() int64 { return 0 } +func (f fakeFileInfo) Mode() fs.FileMode { return 0644 } +func (f fakeFileInfo) ModTime() time.Time { return time.Time{} } +func (f fakeFileInfo) IsDir() bool { return false } +func (f fakeFileInfo) Sys() any { return nil } + func systemExecAllowedPaths(t *testing.T) []string { t.Helper() if runtime.GOOS == "windows" { @@ -197,3 +220,218 @@ func TestPathSandboxOpenRejectsWriteFlags(t *testing.T) { require.NoError(t, err) f.Close() } + +func TestReadDirLimited(t *testing.T) { + dir := t.TempDir() + + // Create 10 files. + for i := range 10 { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + + sb, err := newPathSandbox([]string{dir}) + require.NoError(t, err) + defer sb.Close() + + ctx := context.WithValue(context.Background(), handlerCtxKey{}, HandlerContext{Dir: dir}) + + t.Run("maxRead below count returns truncated with first N entries", func(t *testing.T) { + entries, truncated, err := sb.readDirLimited(ctx, ".", 0, 5) + require.NoError(t, err) + assert.True(t, truncated) + assert.Len(t, entries, 5) + // Entries are sorted within the read window. + for i := 1; i < len(entries); i++ { + assert.True(t, entries[i-1].Name() < entries[i].Name(), "entries should be sorted") + } + }) + + t.Run("maxRead above count returns all entries not truncated", func(t *testing.T) { + entries, truncated, err := sb.readDirLimited(ctx, ".", 0, 20) + require.NoError(t, err) + assert.False(t, truncated) + assert.Len(t, entries, 10) + }) + + t.Run("empty directory", func(t *testing.T) { + emptyDir := filepath.Join(dir, "empty") + require.NoError(t, os.Mkdir(emptyDir, 0755)) + + entries, truncated, err := sb.readDirLimited(ctx, "empty", 0, 10) + require.NoError(t, err) + assert.False(t, truncated) + assert.Empty(t, entries) + }) + + t.Run("path outside sandbox returns permission error", func(t *testing.T) { + outsideDir := t.TempDir() + _, _, err := sb.readDirLimited(ctx, outsideDir, 0, 10) + require.Error(t, err) + assert.ErrorIs(t, err, os.ErrPermission) + }) + + t.Run("io.EOF is not returned as error", func(t *testing.T) { + // Use a fresh directory to avoid interference from other subtests. + eofDir := filepath.Join(dir, "eoftest") + require.NoError(t, os.Mkdir(eofDir, 0755)) + for i := range 5 { + require.NoError(t, os.WriteFile(filepath.Join(eofDir, fmt.Sprintf("g%02d", i)), nil, 0644)) + } + entries, truncated, err := sb.readDirLimited(ctx, "eoftest", 0, 1000) + require.NoError(t, err, "io.EOF should not be returned as error") + assert.False(t, truncated) + assert.Len(t, entries, 5) + }) + + t.Run("non-positive maxRead returns empty", func(t *testing.T) { + entries, truncated, err := sb.readDirLimited(ctx, ".", 0, 0) + require.NoError(t, err) + assert.False(t, truncated) + assert.Empty(t, entries) + + entries, truncated, err = sb.readDirLimited(ctx, ".", 0, -5) + require.NoError(t, err) + assert.False(t, truncated) + assert.Empty(t, entries) + }) + + t.Run("offset skips entries", func(t *testing.T) { + // Use a fresh directory to avoid interference from other subtests. + offsetDir := filepath.Join(dir, "offsettest") + require.NoError(t, os.Mkdir(offsetDir, 0755)) + for i := range 10 { + require.NoError(t, os.WriteFile(filepath.Join(offsetDir, fmt.Sprintf("h%02d", i)), nil, 0644)) + } + + // Read all 10 entries with no offset for reference. + all, _, err := sb.readDirLimited(ctx, "offsettest", 0, 100) + require.NoError(t, err) + assert.Len(t, all, 10) + + // Skip first 5 entries, read up to 100. + entries, truncated, err := sb.readDirLimited(ctx, "offsettest", 5, 100) + require.NoError(t, err) + assert.False(t, truncated) + assert.Len(t, entries, 5, "should return remaining 5 entries after skipping 5") + }) + + t.Run("offset beyond count returns empty", func(t *testing.T) { + entries, truncated, err := sb.readDirLimited(ctx, "offsettest", 100, 10) + require.NoError(t, err) + assert.False(t, truncated) + assert.Empty(t, entries) + }) + + t.Run("offset plus maxRead with truncation", func(t *testing.T) { + // Skip 3, read 3 out of 10 => should get 3 entries, truncated. + entries, truncated, err := sb.readDirLimited(ctx, "offsettest", 3, 3) + require.NoError(t, err) + assert.True(t, truncated, "should be truncated since 10 - 3 > 3") + assert.Len(t, entries, 3) + // Entries should be sorted within the window. + for i := 1; i < len(entries); i++ { + assert.True(t, entries[i-1].Name() < entries[i].Name(), "entries should be sorted") + } + }) + + t.Run("negative offset clamped to zero", func(t *testing.T) { + entries, truncated, err := sb.readDirLimited(ctx, "offsettest", -10, 100) + require.NoError(t, err) + assert.False(t, truncated) + assert.Len(t, entries, 10, "negative offset should be treated as 0") + }) +} + +func TestCollectDirEntries(t *testing.T) { + makeEntries := func(names ...string) []fs.DirEntry { + out := make([]fs.DirEntry, len(names)) + for i, n := range names { + out[i] = fakeDirEntry{name: n} + } + return out + } + + t.Run("error in same batch as truncation is preserved", func(t *testing.T) { + ioErr := errors.New("disk I/O error") + callCount := 0 + reader := func(n int) ([]fs.DirEntry, error) { + callCount++ + // First batch: 5 entries, no error. + // Second batch: 3 entries + I/O error. + // With maxRead=6, the second batch pushes us to 8 > 6, triggering truncation. + // The error must still be preserved. + if callCount == 1 { + return makeEntries("f01", "f02", "f03", "f04", "f05"), nil + } + return makeEntries("f06", "f07", "f08"), ioErr + } + + entries, truncated, err := collectDirEntries(reader, 10, 0, 6) + assert.True(t, truncated, "should be truncated") + assert.Len(t, entries, 6, "should trim to maxRead") + assert.ErrorIs(t, err, ioErr, "I/O error must be preserved even when truncation occurs") + }) + + t.Run("EOF is not returned as error", func(t *testing.T) { + callCount := 0 + reader := func(n int) ([]fs.DirEntry, error) { + callCount++ + if callCount == 1 { + return makeEntries("f01", "f02"), io.EOF + } + return nil, io.EOF + } + + entries, truncated, err := collectDirEntries(reader, 10, 0, 100) + assert.False(t, truncated) + assert.Len(t, entries, 2) + assert.NoError(t, err, "io.EOF should not be returned as error") + }) + + t.Run("offset skips entries across batches", func(t *testing.T) { + callCount := 0 + reader := func(n int) ([]fs.DirEntry, error) { + callCount++ + if callCount == 1 { + return makeEntries("f01", "f02", "f03"), nil + } + if callCount == 2 { + return makeEntries("f04", "f05"), io.EOF + } + return nil, io.EOF + } + + entries, truncated, err := collectDirEntries(reader, 10, 2, 100) + assert.False(t, truncated) + assert.NoError(t, err) + assert.Len(t, entries, 3, "should skip first 2, return remaining 3") + assert.Equal(t, "f03", entries[0].Name()) + assert.Equal(t, "f04", entries[1].Name()) + assert.Equal(t, "f05", entries[2].Name()) + }) + + t.Run("error without truncation is preserved", func(t *testing.T) { + ioErr := errors.New("permission denied") + reader := func(n int) ([]fs.DirEntry, error) { + return makeEntries("f01", "f02"), ioErr + } + + entries, truncated, err := collectDirEntries(reader, 10, 0, 100) + assert.False(t, truncated) + assert.Len(t, entries, 2) + assert.ErrorIs(t, err, ioErr) + }) + + t.Run("entries are sorted by name", func(t *testing.T) { + reader := func(n int) ([]fs.DirEntry, error) { + return makeEntries("cherry", "apple", "banana"), io.EOF + } + + entries, truncated, err := collectDirEntries(reader, 10, 0, 100) + assert.False(t, truncated) + assert.NoError(t, err) + assert.Equal(t, "apple", entries[0].Name()) + assert.Equal(t, "banana", entries[1].Name()) + assert.Equal(t, "cherry", entries[2].Name()) + }) +} diff --git a/interp/builtins/builtins.go b/interp/builtins/builtins.go index dc17174d..9789fd18 100644 --- a/interp/builtins/builtins.go +++ b/interp/builtins/builtins.go @@ -90,6 +90,12 @@ type CallContext struct { // Entries are returned sorted by name. ReadDir func(ctx context.Context, path string) ([]fs.DirEntry, error) + // ReadDirLimited reads directory entries, skipping the first offset entries + // and returning up to maxRead entries sorted by name within the read window. + // Returns (entries, truncated, error). When truncated is true, the directory + // contained more entries beyond the returned set. + ReadDirLimited func(ctx context.Context, path string, offset, maxRead int) ([]fs.DirEntry, bool, error) + // StatFile returns file info within the shell's path restrictions (follows symlinks). StatFile func(ctx context.Context, path string) (fs.FileInfo, error) diff --git a/interp/builtins/ls/builtin_ls_pentest_test.go b/interp/builtins/ls/builtin_ls_pentest_test.go new file mode 100644 index 00000000..f312fc30 --- /dev/null +++ b/interp/builtins/ls/builtin_ls_pentest_test.go @@ -0,0 +1,745 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2026-present Datadog, Inc. + +package ls_test + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/DataDog/rshell/interp" + "github.com/DataDog/rshell/interp/builtins/ls" + "github.com/DataDog/rshell/interp/builtins/testutil" +) + +const pentestTimeout = 10 * time.Second + +func lsRun(t *testing.T, script, dir string, extraPaths ...string) (string, string, int) { + t.Helper() + paths := append([]string{dir}, extraPaths...) + return testutil.RunScript(t, script, dir, interp.AllowedPaths(paths)) +} + +func mustNotHang(t *testing.T, fn func()) { + t.Helper() + done := make(chan struct{}) + go func() { + fn() + close(done) + }() + select { + case <-done: + case <-time.After(pentestTimeout): + t.Fatal("operation did not complete within timeout") + } +} + +// --- Symlink loop and follow tests --- + +func TestLsPentestSymlinkLoopRecursive(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("symlinks not reliably supported on Windows") + } + dir := t.TempDir() + sub := filepath.Join(dir, "sub") + require.NoError(t, os.Mkdir(sub, 0755)) + require.NoError(t, os.Symlink(sub, filepath.Join(sub, "loop"))) + + mustNotHang(t, func() { + stdout, _, code := lsRun(t, "ls -R", dir) + assert.Equal(t, 0, code) + // The symlink "loop" should appear in listing but not be recursed into. + assert.Contains(t, stdout, "loop") + // If recursed, we'd see "loop" repeated in subdir headers. + // Count occurrences of "./sub" headers — should be exactly one. + count := strings.Count(stdout, "./sub:") + assert.Equal(t, 1, count, "symlink should not be recursed into; got stdout:\n%s", stdout) + }) +} + +func TestLsPentestSymlinkToDirectoryRecursive(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("symlinks not reliably supported on Windows") + } + dir := t.TempDir() + // Create a real subdir with a file. + realSub := filepath.Join(dir, "real") + require.NoError(t, os.Mkdir(realSub, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(realSub, "file.txt"), []byte("hello"), 0644)) + // Create a symlink to the real subdir. + require.NoError(t, os.Symlink(realSub, filepath.Join(dir, "link"))) + + stdout, _, code := lsRun(t, "ls -R", dir) + assert.Equal(t, 0, code) + // "link" should appear in the listing. + assert.Contains(t, stdout, "link") + // "real" subdir should be recursed into. + assert.Contains(t, stdout, "./real:") + // "link" should NOT be recursed into (no "./link:" header). + assert.NotContains(t, stdout, "./link:") +} + +// --- Recursion depth --- + +func TestLsPentestDeepNesting(t *testing.T) { + dir := t.TempDir() + // Create directories 300 levels deep (exceeds maxRecursionDepth=256). + cur := dir + for i := 0; i < 300; i++ { + cur = filepath.Join(cur, "d") + require.NoError(t, os.Mkdir(cur, 0755)) + } + + mustNotHang(t, func() { + _, stderr, code := lsRun(t, "ls -R", dir) + assert.Equal(t, 1, code) + assert.Contains(t, stderr, "recursion depth limit exceeded") + }) +} + +// --- MaxDirEntries --- + +func TestLsPentestMaxDirEntries(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + dir := t.TempDir() + // Create MaxDirEntries+1 files. + count := ls.MaxDirEntries + 1 + for i := 0; i < count; i++ { + name := fmt.Sprintf("f%06d", i) + require.NoError(t, os.WriteFile(filepath.Join(dir, name), nil, 0644)) + } + + mustNotHang(t, func() { + stdout, stderr, code := lsRun(t, "ls", dir) + assert.Equal(t, 1, code, "should exit 1 on truncation") + assert.Contains(t, stderr, "too many entries") + assert.Contains(t, stderr, "output truncated") + assert.NotEmpty(t, stdout, "should show truncated output, not suppress it") + + // Should contain the first entry but not the last. + assert.Contains(t, stdout, "f000000") + lastEntry := fmt.Sprintf("f%06d", ls.MaxDirEntries) + assert.NotContains(t, stdout, lastEntry, "entry beyond limit should not appear") + + // Exactly MaxDirEntries lines of output. + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, ls.MaxDirEntries, len(lines), "should have exactly MaxDirEntries lines") + }) +} + +func TestLsPentestPagination(t *testing.T) { + dir := t.TempDir() + // Create 50 files: f00..f49 + for i := 0; i < 50; i++ { + name := fmt.Sprintf("f%02d", i) + require.NoError(t, os.WriteFile(filepath.Join(dir, name), nil, 0644)) + } + + t.Run("offset 10 limit 20 returns 20 sorted entries", func(t *testing.T) { + stdout, stderr, code := lsRun(t, "ls --offset 10 --limit 20", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 20, len(lines)) + // Entries should be sorted within the page. + for i := 1; i < len(lines); i++ { + assert.True(t, lines[i-1] <= lines[i], "entries should be sorted: %s <= %s", lines[i-1], lines[i]) + } + }) + + t.Run("offset 40 limit 20 returns remaining entries", func(t *testing.T) { + stdout, stderr, code := lsRun(t, "ls --offset 40 --limit 20", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 10, len(lines), "only 10 entries remain after offset 40 of 50") + }) + + t.Run("offset beyond count returns empty", func(t *testing.T) { + stdout, stderr, code := lsRun(t, "ls --offset 100 --limit 10", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + assert.Empty(t, stdout) + }) +} + +func TestLsPentestNegativeOffset(t *testing.T) { + dir := t.TempDir() + for i := 0; i < 5; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + + // Negative offset should be clamped to 0 (behaves like no offset). + stdout, stderr, code := lsRun(t, "ls --offset -1", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 5, len(lines)) + assert.Equal(t, "f00", lines[0]) +} + +func TestLsPentestDotDotDoesNotConsumeLimitSlots(t *testing.T) { + dir := t.TempDir() + // Create 20 files: f00..f19 + for i := 0; i < 20; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + + // ls -a --limit 20 should show ".", "..", and all 20 real files (22 lines). + stdout, stderr, code := lsRun(t, "ls -a --limit 20", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + // 20 real files + "." + ".." = 22 lines. + assert.Equal(t, 22, len(lines), "dot and dotdot should not consume limit slots; got:\n%s", stdout) + assert.Equal(t, ".", lines[0]) + assert.Equal(t, "..", lines[1]) + assert.Equal(t, "f00", lines[2]) + assert.Equal(t, "f19", lines[len(lines)-1]) +} + +// --- Large offset DoS --- + +func TestLsPentestLargeOffsetCapped(t *testing.T) { + dir := t.TempDir() + for i := 0; i < 5; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + + mustNotHang(t, func() { + stdout, stderr, code := lsRun(t, "ls --offset 999999 --limit 1", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + assert.Empty(t, stdout, "offset far beyond entry count should return empty") + }) +} + +// --- Flag injection --- + +func TestLsPentestUnknownFlag(t *testing.T) { + dir := t.TempDir() + _, stderr, code := lsRun(t, "ls --no-such-flag", dir) + assert.Equal(t, 1, code) + assert.Contains(t, stderr, "ls:") +} + +func TestLsPentestFlagViaExpansion(t *testing.T) { + dir := t.TempDir() + _, stderr, code := lsRun(t, `flag="--no-such-flag"; ls $flag`, dir) + assert.Equal(t, 1, code) + assert.Contains(t, stderr, "ls:") +} + +// --- Sandbox escape --- + +func TestLsPentestOutsideSandbox(t *testing.T) { + allowed := t.TempDir() + forbidden := t.TempDir() + require.NoError(t, os.WriteFile(filepath.Join(forbidden, "secret.txt"), []byte("secret"), 0644)) + + forbiddenPath := strings.ReplaceAll(forbidden, `\`, `/`) + _, stderr, code := lsRun(t, "ls "+forbiddenPath, allowed) + assert.Equal(t, 1, code) + assert.Contains(t, stderr, "ls:") +} + +func TestLsPentestPathTraversal(t *testing.T) { + dir := t.TempDir() + _, stderr, code := lsRun(t, "ls ../../etc/passwd", dir) + assert.Equal(t, 1, code) + assert.Contains(t, stderr, "ls:") +} + +// --- Edge cases --- + +func TestLsPentestEmptyFilename(t *testing.T) { + dir := t.TempDir() + _, stderr, code := lsRun(t, `ls ""`, dir) + assert.Equal(t, 1, code) + assert.Contains(t, stderr, "ls:") +} + +func TestLsPentestDashPrefixedFilename(t *testing.T) { + dir := t.TempDir() + require.NoError(t, os.WriteFile(filepath.Join(dir, "-n"), []byte("content"), 0644)) + stdout, _, code := lsRun(t, "ls -- -n", dir) + assert.Equal(t, 0, code) + assert.Contains(t, stdout, "-n") +} + +func TestLsPentestRecursiveLimitIgnored(t *testing.T) { + dir := t.TempDir() + // Create 3 subdirectories, each with a file. + for _, name := range []string{"aaa", "bbb", "ccc"} { + sub := filepath.Join(dir, name) + require.NoError(t, os.Mkdir(sub, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(sub, "file.txt"), nil, 0644)) + } + + // ls -R --limit 1 should still recurse into all 3 subdirs because + // --limit is silently ignored in recursive mode. + stdout, _, code := lsRun(t, "ls -R --limit 1", dir) + assert.Equal(t, 0, code) + assert.Contains(t, stdout, "./aaa:") + assert.Contains(t, stdout, "./bbb:") + assert.Contains(t, stdout, "./ccc:") +} + +func TestLsPentestMultiDirLimitIgnored(t *testing.T) { + dir := t.TempDir() + // Create two subdirs, each with 3 files. + for _, sub := range []string{"d1", "d2"} { + subDir := filepath.Join(dir, sub) + require.NoError(t, os.Mkdir(subDir, 0755)) + for i := 0; i < 3; i++ { + require.NoError(t, os.WriteFile(filepath.Join(subDir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + } + + // ls --limit 1 d1 d2 should show ALL entries in both dirs because + // --limit is silently ignored with multiple arguments. + stdout, stderr, code := lsRun(t, "ls --limit 1 d1 d2", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + // Both directories should show all 3 files. + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + // d1:\nf00\nf01\nf02\n\nd2:\nf00\nf01\nf02 => 9 lines (2 headers + 6 files + 1 blank separator) + assert.Equal(t, 9, len(lines), "should show all entries in both dirs; got:\n%s", stdout) +} + +func TestLsPentestLargeOffsetStreaming(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + dir := t.TempDir() + // Create 5010 files to test streaming offset beyond MaxDirEntries. + for i := 0; i < 5010; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%06d", i)), nil, 0644)) + } + + mustNotHang(t, func() { + stdout, stderr, code := lsRun(t, "ls --offset 5000 --limit 10", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 10, len(lines), "should return exactly 10 entries; got:\n%s", stdout) + }) +} + +func TestLsPentestNegativeLimit(t *testing.T) { + dir := t.TempDir() + for i := 0; i < 5; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + + // Negative limit should be clamped to 0 (behaves like no limit). + stdout, stderr, code := lsRun(t, "ls --limit -1", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 5, len(lines), "negative limit should show all entries") +} + +func TestLsPentestRecursiveOffsetIgnored(t *testing.T) { + dir := t.TempDir() + for _, name := range []string{"aaa", "bbb", "ccc"} { + sub := filepath.Join(dir, name) + require.NoError(t, os.Mkdir(sub, 0755)) + require.NoError(t, os.WriteFile(filepath.Join(sub, "file.txt"), nil, 0644)) + } + + // ls -R --offset 100 should still show all 3 subdirs because + // --offset is silently ignored in recursive mode. + stdout, _, code := lsRun(t, "ls -R --offset 100", dir) + assert.Equal(t, 0, code) + assert.Contains(t, stdout, "./aaa:") + assert.Contains(t, stdout, "./bbb:") + assert.Contains(t, stdout, "./ccc:") +} + +func TestLsPentestMultiDirOffsetIgnored(t *testing.T) { + dir := t.TempDir() + for _, sub := range []string{"d1", "d2"} { + subDir := filepath.Join(dir, sub) + require.NoError(t, os.Mkdir(subDir, 0755)) + for i := 0; i < 3; i++ { + require.NoError(t, os.WriteFile(filepath.Join(subDir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + } + + // ls --offset 100 d1 d2 should show ALL entries because + // --offset is silently ignored with multiple arguments. + stdout, stderr, code := lsRun(t, "ls --offset 100 d1 d2", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + assert.Contains(t, stdout, "d1:") + assert.Contains(t, stdout, "d2:") + // Both dirs should show all 3 files. + assert.Equal(t, 6, strings.Count(stdout, "f0"), "should show all 6 files across both dirs") +} + +func TestLsPentestDotDotNotAffectedByOffset(t *testing.T) { + dir := t.TempDir() + for i := 0; i < 10; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + + // ls -a --offset 5 --limit 5: offset skips 5 raw entries, reads 5, then + // prepends . and .. (which are never consumed by offset/limit). + stdout, stderr, code := lsRun(t, "ls -a --offset 5 --limit 5", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + // 5 real entries + "." + ".." = 7 lines. + assert.Equal(t, 7, len(lines), "dot/dotdot should always appear; got:\n%s", stdout) + assert.Equal(t, ".", lines[0]) + assert.Equal(t, "..", lines[1]) +} + +func TestLsPentestNoTruncationWarningWithPagination(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + dir := t.TempDir() + count := ls.MaxDirEntries + 100 + for i := 0; i < count; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%06d", i)), nil, 0644)) + } + + // With explicit --limit, no warning should appear even though + // the directory has more entries than MaxDirEntries. + stdout, stderr, code := lsRun(t, "ls --limit 10", dir) + assert.Equal(t, 0, code, "should exit 0 with explicit pagination") + assert.Empty(t, stderr, "no truncation warning with explicit --limit") + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 10, len(lines)) + + // With explicit --offset, same behavior. + stdout, stderr, code = lsRun(t, "ls --offset 10 --limit 10", dir) + assert.Equal(t, 0, code, "should exit 0 with explicit pagination") + assert.Empty(t, stderr, "no truncation warning with explicit --offset/--limit") + lines = strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 10, len(lines)) +} + +func TestLsPentestImplicitTruncationWarning(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + dir := t.TempDir() + count := ls.MaxDirEntries + 1 + for i := 0; i < count; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%06d", i)), nil, 0644)) + } + + // Without pagination flags, truncation should produce warning + exit 1. + _, stderr, code := lsRun(t, "ls", dir) + assert.Equal(t, 1, code, "implicit truncation must exit 1") + assert.Contains(t, stderr, "too many entries") + assert.Contains(t, stderr, "output truncated") +} + +func TestLsPentestRecursivePerDirCap(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + dir := t.TempDir() + // Create a subdir with MaxDirEntries+1 files. + sub := filepath.Join(dir, "big") + require.NoError(t, os.Mkdir(sub, 0755)) + for i := 0; i < ls.MaxDirEntries+1; i++ { + require.NoError(t, os.WriteFile(filepath.Join(sub, fmt.Sprintf("f%06d", i)), nil, 0644)) + } + + mustNotHang(t, func() { + _, stderr, code := lsRun(t, "ls -R", dir) + assert.Equal(t, 1, code, "recursive should still exit 1 on per-dir truncation") + assert.Contains(t, stderr, "too many entries") + }) +} + +func TestLsPentestDotfilesDoNotBypassCap(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + dir := t.TempDir() + // Create MaxDirEntries dotfiles + 10 regular files. + // Without raw cap, reading all dotfiles to find visible ones would be unbounded. + for i := 0; i < ls.MaxDirEntries; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf(".hidden%06d", i)), nil, 0644)) + } + for i := 0; i < 10; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("visible%02d", i)), nil, 0644)) + } + + mustNotHang(t, func() { + // ls (without -a) sees only visible files that fall within the raw cap window. + // The key invariant: ls must not read beyond MaxDirEntries raw entries. + stdout, stderr, code := lsRun(t, "ls", dir) + // Depending on filesystem order, we may or may not see all visible files. + // The important thing is that it completes and doesn't OOM. + _ = stdout + // With MaxDirEntries dotfiles + 10 visible files = 1010 total > MaxDirEntries, + // so truncation warning should appear. + assert.Equal(t, 1, code) + assert.Contains(t, stderr, "too many entries") + }) +} + +func TestLsPentestLimitOnly(t *testing.T) { + dir := t.TempDir() + for i := 0; i < 20; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + + stdout, stderr, code := lsRun(t, "ls --limit 5", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 5, len(lines), "limit-only should cap output") + // Entries should be sorted. + for i := 1; i < len(lines); i++ { + assert.True(t, lines[i-1] <= lines[i], "entries should be sorted: %s <= %s", lines[i-1], lines[i]) + } +} + +func TestLsPentestOffsetOnly(t *testing.T) { + dir := t.TempDir() + for i := 0; i < 20; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + + // offset-only should skip entries and return the rest (up to MaxDirEntries). + stdout, stderr, code := lsRun(t, "ls --offset 15", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 5, len(lines), "offset 15 of 20 should return 5 entries") +} + +func TestLsPentestLimitCappedAtMax(t *testing.T) { + dir := t.TempDir() + for i := 0; i < 10; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + + // --limit 999999 should behave like --limit MaxDirEntries. + stdout, stderr, code := lsRun(t, "ls --limit 999999", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 10, len(lines), "all 10 entries should appear") +} + +func TestLsPentestAlmostAllWithOffset(t *testing.T) { + dir := t.TempDir() + // Create some dotfiles and regular files. + for _, name := range []string{".hidden1", ".hidden2", "visible1", "visible2", "visible3"} { + require.NoError(t, os.WriteFile(filepath.Join(dir, name), nil, 0644)) + } + + // -A shows dotfiles (not . and ..) — offset applies to raw entries. + stdout, stderr, code := lsRun(t, "ls -A --offset 2 --limit 10", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + // Offset skips 2 raw filesystem entries; remaining 3 are returned. + assert.Equal(t, 3, len(lines), "should show 3 entries after offset 2 of 5") + // . and .. should NOT appear with -A. + assert.NotContains(t, stdout, "\n.\n") + assert.NotContains(t, stdout, "\n..\n") +} + +func TestLsPentestPaginationWithSortFlags(t *testing.T) { + dir := t.TempDir() + for i := 0; i < 10; i++ { + name := fmt.Sprintf("f%02d", i) + require.NoError(t, os.WriteFile(filepath.Join(dir, name), []byte(strings.Repeat("x", (i+1)*100)), 0644)) + } + + t.Run("limit with reverse sort", func(t *testing.T) { + stdout, stderr, code := lsRun(t, "ls -r --limit 5", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 5, len(lines)) + // Entries should be in reverse sorted order within the page. + for i := 1; i < len(lines); i++ { + assert.True(t, lines[i-1] >= lines[i], "entries should be reverse sorted: %s >= %s", lines[i-1], lines[i]) + } + }) + + t.Run("limit with size sort", func(t *testing.T) { + stdout, stderr, code := lsRun(t, "ls -S --limit 3", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 3, len(lines)) + }) +} + +func TestLsPentestRecursiveBothPaginationFlagsIgnored(t *testing.T) { + dir := t.TempDir() + for _, name := range []string{"aaa", "bbb"} { + sub := filepath.Join(dir, name) + require.NoError(t, os.Mkdir(sub, 0755)) + for i := 0; i < 5; i++ { + require.NoError(t, os.WriteFile(filepath.Join(sub, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + } + + // ls -R --offset 100 --limit 1 should show everything. + stdout, _, code := lsRun(t, "ls -R --offset 100 --limit 1", dir) + assert.Equal(t, 0, code) + assert.Contains(t, stdout, "./aaa:") + assert.Contains(t, stdout, "./bbb:") + // Each subdir should have all 5 files. + assert.Equal(t, 10, strings.Count(stdout, "f0"), "all entries should appear in recursive mode") +} + +func TestLsPentestMultiDirBothPaginationFlagsIgnored(t *testing.T) { + dir := t.TempDir() + for _, sub := range []string{"d1", "d2"} { + subDir := filepath.Join(dir, sub) + require.NoError(t, os.Mkdir(subDir, 0755)) + for i := 0; i < 5; i++ { + require.NoError(t, os.WriteFile(filepath.Join(subDir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + } + + stdout, stderr, code := lsRun(t, "ls --offset 100 --limit 1 d1 d2", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + assert.Contains(t, stdout, "d1:") + assert.Contains(t, stdout, "d2:") + assert.Equal(t, 10, strings.Count(stdout, "f0"), "all entries should appear with multi-dir") +} + +func TestLsPentestTruncatedOutputShowsPartialResults(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + dir := t.TempDir() + count := ls.MaxDirEntries + 50 + for i := 0; i < count; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%06d", i)), nil, 0644)) + } + + stdout, stderr, code := lsRun(t, "ls", dir) + assert.Equal(t, 1, code) + assert.Contains(t, stderr, "output truncated") + // Must have exactly MaxDirEntries lines (partial output shown before warning). + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, ls.MaxDirEntries, len(lines), "should show exactly MaxDirEntries entries before warning") + assert.NotEmpty(t, stdout, "must print partial results before warning") +} + +func TestLsPentestReadDirLimitedBoundsActualRead(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + dir := t.TempDir() + // Create 2x MaxDirEntries files. + count := ls.MaxDirEntries * 2 + for i := 0; i < count; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%06d", i)), nil, 0644)) + } + + mustNotHang(t, func() { + stdout, stderr, code := lsRun(t, "ls", dir) + assert.Equal(t, 1, code) + assert.Contains(t, stderr, "too many entries") + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + // Must show exactly MaxDirEntries, not more. + assert.Equal(t, ls.MaxDirEntries, len(lines)) + }) +} + +func TestLsPentestZeroOffsetZeroLimitNoPagination(t *testing.T) { + dir := t.TempDir() + for i := 0; i < 10; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + + // Explicit zero values should behave identically to omitting the flags. + stdout, stderr, code := lsRun(t, "ls --offset 0 --limit 0", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 10, len(lines), "offset=0 limit=0 should show all entries") +} + +func TestLsPentestDirectoryFlagWithPagination(t *testing.T) { + dir := t.TempDir() + sub := filepath.Join(dir, "sub") + require.NoError(t, os.Mkdir(sub, 0755)) + for i := 0; i < 5; i++ { + require.NoError(t, os.WriteFile(filepath.Join(sub, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + + // ls -d with --limit shouldn't crash and should list the directory itself. + stdout, stderr, code := lsRun(t, "ls -d --limit 1 sub", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + assert.Contains(t, stdout, "sub") +} + +func TestLsPentestPaginationWithAllFlag(t *testing.T) { + dir := t.TempDir() + for i := 0; i < 10; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%02d", i)), nil, 0644)) + } + + // -a --limit 3: should show ".", "..", and 3 real entries = 5 lines. + stdout, stderr, code := lsRun(t, "ls -a --limit 3", dir) + assert.Equal(t, 0, code) + assert.Empty(t, stderr) + lines := strings.Split(strings.TrimRight(stdout, "\n"), "\n") + assert.Equal(t, 5, len(lines), "3 real entries + . + .. = 5; got:\n%s", stdout) + assert.Equal(t, ".", lines[0]) + assert.Equal(t, "..", lines[1]) +} + +func TestLsPentestPaginationExitZeroOnHugeDir(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + dir := t.TempDir() + count := ls.MaxDirEntries + 500 + for i := 0; i < count; i++ { + require.NoError(t, os.WriteFile(filepath.Join(dir, fmt.Sprintf("f%06d", i)), nil, 0644)) + } + + // With pagination active, exit code should be 0 even on oversized dir. + _, stderr, code := lsRun(t, "ls --offset 0 --limit 100", dir) + assert.Equal(t, 0, code, "pagination should suppress truncation failure") + assert.Empty(t, stderr, "no warning with explicit pagination") +} + +func TestLsPentestSpecialCharFilenames(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("special characters in filenames may not be supported on Windows") + } + dir := t.TempDir() + names := []string{"file with spaces", "café", "日本語"} + for _, name := range names { + require.NoError(t, os.WriteFile(filepath.Join(dir, name), nil, 0644)) + } + stdout, _, code := lsRun(t, "ls", dir) + assert.Equal(t, 0, code) + for _, name := range names { + assert.Contains(t, stdout, name) + } +} diff --git a/interp/builtins/ls/ls.go b/interp/builtins/ls/ls.go index 68d7d725..f6826dd5 100644 --- a/interp/builtins/ls/ls.go +++ b/interp/builtins/ls/ls.go @@ -54,10 +54,29 @@ // -h, --human-readable // With -l, print sizes in human-readable format (e.g. 1K, 234M). // +// --offset N (non-standard) +// Skip the first N raw directory entries before collecting results. +// NOTE: offset operates on filesystem order, not sorted order — +// entries are sorted only within each page. For predictable +// pagination of large directories, use consistent offset/limit +// pairs. Applies to single-directory listings only; silently +// ignored with -R or multiple arguments. +// +// --limit N (non-standard) +// Show at most N entries, capped at MaxDirEntries (1,000) per call. +// Applies to single-directory listings only; silently ignored with -R +// or multiple arguments. +// +// When a directory exceeds MaxDirEntries (1,000) and no explicit --offset/ +// --limit is given, ls prints the first 1,000 entries, emits a warning to +// stderr, and exits 1. Use --offset/--limit to paginate through larger +// directories. +// // Exit codes: // // 0 All entries listed successfully. // 1 At least one error occurred (missing file, permission denied, etc.). +// 1 Directory truncated (exceeded MaxDirEntries without --offset/--limit). // 1 Invalid usage (unrecognised flag, etc.). package ls @@ -75,6 +94,9 @@ import ( // maxRecursionDepth limits how deep -R will recurse to prevent stack overflow. const maxRecursionDepth = 256 +// MaxDirEntries is the maximum number of entries ls will process per directory. +const MaxDirEntries = 1_000 + // errFailed is a sentinel used to signal that at least one entry had an error. var errFailed = errors.New("ls: one or more errors occurred") @@ -100,6 +122,9 @@ func registerFlags(fs *builtins.FlagSet) builtins.HandlerFunc { // -1 is the default in non-terminal (always true here), accepted for compat. _ = fs.Bool("one", false, "list one file per line") fs.Lookup("one").Shorthand = "1" + // Pagination flags (long-only, non-standard). + offset := fs.Int("offset", 0, "skip first N entries (pagination)") + limit := fs.Int("limit", 0, "show at most N entries (capped at MaxDirEntries)") return func(ctx context.Context, callCtx *builtins.CallContext, args []string) builtins.Result { now := callCtx.Now() @@ -145,6 +170,8 @@ func registerFlags(fs *builtins.FlagSet) builtins.HandlerFunc { recursive: *recursive, longFmt: *longFmt, humanReadable: *humanReadable, + offset: *offset, + limit: *limit, } paths := args @@ -155,6 +182,12 @@ func registerFlags(fs *builtins.FlagSet) builtins.HandlerFunc { failed := false multipleArgs := len(paths) > 1 + // Pagination applies only to single-directory listings. + if multipleArgs { + opts.offset = 0 + opts.limit = 0 + } + // Separate files and dirs (when not -d). // Use Lstat so that symlink operands retain ModeSymlink (GNU ls default). // The link target is only stat'd when needed (e.g. to decide dir vs file @@ -237,6 +270,8 @@ type options struct { recursive bool longFmt bool humanReadable bool + offset int + limit int } type pathArg struct { @@ -250,7 +285,36 @@ func listDir(ctx context.Context, callCtx *builtins.CallContext, dir string, opt return errFailed } - entries, err := callCtx.ReadDir(ctx, dir) + // Clamp negative values to zero so they become no-ops. + if opts.offset < 0 { + opts.offset = 0 + } + if opts.limit < 0 { + opts.limit = 0 + } + + // Pagination applies only to single-directory, non-recursive listings. + // Silently ignore --offset/--limit when used with -R. + paginationActive := !opts.recursive && (opts.offset > 0 || opts.limit > 0) + + // Determine effective read limit. + effectiveLimit := MaxDirEntries + if opts.limit > 0 && !opts.recursive { + effectiveLimit = min(opts.limit, MaxDirEntries) + } + + // NOTE: MaxDirEntries caps raw directory reads (before hidden-file filtering) + // intentionally. Applying the cap after filtering would allow DoS via + // directories with many dotfiles — the shell would have to read unbounded + // entries to find visible ones. + // + // When pagination is active, offset is passed to readDir so skipping happens + // at the read level (O(n) memory regardless of offset). Otherwise offset=0. + readOffset := 0 + if paginationActive { + readOffset = opts.offset + } + entries, truncated, err := readDir(ctx, callCtx, dir, readOffset, effectiveLimit) if err != nil { callCtx.Errf("ls: cannot open directory '%s': %s\n", dir, callCtx.PortableErr(err)) return err @@ -258,23 +322,14 @@ func listDir(ctx context.Context, callCtx *builtins.CallContext, dir string, opt // Get FileInfo for sorting (if needed) and for long format. type entryInfo struct { - name string - info iofs.FileInfo + name string + info iofs.FileInfo + isSymlink bool } failed := false var infoEntries []entryInfo - // Synthesize . and .. for -a (os.ReadDir never includes them). - // NOTE: ".." intentionally uses the same FileInfo as "." because the - // parent directory may be outside the sandbox and cannot be stat'd. - if opts.all { - if dotInfo, err := callCtx.StatFile(ctx, dir); err == nil { - infoEntries = append(infoEntries, entryInfo{name: ".", info: dotInfo}) - infoEntries = append(infoEntries, entryInfo{name: "..", info: dotInfo}) - } - } - for _, e := range entries { if ctx.Err() != nil { break @@ -289,12 +344,33 @@ func listDir(ctx context.Context, callCtx *builtins.CallContext, dir string, opt failed = true continue } - infoEntries = append(infoEntries, entryInfo{name: name, info: info}) + infoEntries = append(infoEntries, entryInfo{ + name: name, + info: info, + isSymlink: e.Type()&iofs.ModeSymlink != 0, + }) + } + + // Synthesize . and .. for -a (os.ReadDir never includes them). + // Added before sorting so they participate in sort modifiers (-r, -S, -t), + // matching bash behavior. They do not consume offset/limit slots because + // pagination is handled at the read level. + // NOTE: ".." intentionally uses the same FileInfo as "." because the + // parent directory may be outside the sandbox and cannot be stat'd. + if opts.all { + if dotInfo, err := callCtx.StatFile(ctx, dir); err == nil { + infoEntries = append(infoEntries, entryInfo{name: ".", info: dotInfo}) + infoEntries = append(infoEntries, entryInfo{name: "..", info: dotInfo}) + } } - // Sort. + // Sort all entries (including . and ..) so sort modifiers apply uniformly. sortEntries(infoEntries, opts, func(a entryInfo) iofs.FileInfo { return a.info }, func(a entryInfo) string { return a.name }) + // Offset is handled at the read level (streaming skip in readDir), + // so no post-sort slicing is needed. The effectiveLimit is already + // enforced by readDir's maxRead parameter. + // Print. for _, ei := range infoEntries { if ctx.Err() != nil { @@ -303,16 +379,28 @@ func listDir(ctx context.Context, callCtx *builtins.CallContext, dir string, opt printEntry(callCtx, ei.name, ei.info, opts, now) } + // Only warn on implicit truncation (no explicit --offset/--limit). + // Return immediately — do not recurse (-R) into subdirectories after + // hitting the MaxDirEntries cap. This intentionally diverges from bash, + // which would continue recursion. The cap exists to bound total work; + // allowing recursion after truncation would let an adversarial tree + // (e.g. 1000 subdirs × 1000 subdirs × ...) trigger unbounded traversal. + if truncated && !paginationActive { + callCtx.Errf("ls: warning: directory '%s': too many entries (exceeded %d limit), output truncated\n", dir, MaxDirEntries) + return errFailed + } + // Recurse into subdirectories if -R. - // NOTE: Symlink loops are not detected because the syscall package - // (needed for device+inode tracking) is banned by the import allowlist. - // The maxRecursionDepth limit bounds total work in this case. + // Symlinks are skipped to match GNU ls default behaviour (which does + // not follow symlinks during -R traversal) and to prevent symlink-loop + // denial-of-service attacks. The maxRecursionDepth limit provides an + // additional safety bound. if opts.recursive { for _, ei := range infoEntries { if ctx.Err() != nil { break } - if !ei.info.IsDir() { + if ei.isSymlink || !ei.info.IsDir() { continue } if ei.name == "." || ei.name == ".." { @@ -332,6 +420,16 @@ func listDir(ctx context.Context, callCtx *builtins.CallContext, dir string, opt return nil } +// readDir dispatches to ReadDirLimited when available, falling back to ReadDir. +// The offset parameter skips raw directory entries at the read level (before sorting). +func readDir(ctx context.Context, callCtx *builtins.CallContext, dir string, offset, maxRead int) (entries []iofs.DirEntry, truncated bool, err error) { + if callCtx.ReadDirLimited != nil { + return callCtx.ReadDirLimited(ctx, dir, offset, maxRead) + } + entries, err = callCtx.ReadDir(ctx, dir) + return entries, false, err +} + func printEntry(callCtx *builtins.CallContext, name string, info iofs.FileInfo, opts *options, now time.Time) { if opts.longFmt { mode := formatMode(info) diff --git a/interp/builtins/printf/printf.go b/interp/builtins/printf/printf.go index c7538cee..54b6cb76 100644 --- a/interp/builtins/printf/printf.go +++ b/interp/builtins/printf/printf.go @@ -1049,7 +1049,6 @@ func parseFloatArg(s string) (floatArg, error) { return floatArg{f: val}, nil } - // processBEscapes handles backslash escapes for %b (like echo -e). // Returns the processed string, whether \c was seen (stop all output), // and any warning messages to emit to stderr. diff --git a/interp/builtins/strings_cmd/strings.go b/interp/builtins/strings_cmd/strings.go index bdba91f8..b884e59a 100644 --- a/interp/builtins/strings_cmd/strings.go +++ b/interp/builtins/strings_cmd/strings.go @@ -91,7 +91,7 @@ const ( // radixFlagVal implements pflag.Value for the -t / --radix flag. // Validation happens in Set so pflag reports errors during parsing, which also -// correctly rejects empty values (e.g. --radix= or -t ''). +// correctly rejects empty values (e.g. --radix= or -t ”). type radixFlagVal struct{ target *radixFormat } func (r *radixFlagVal) String() string { diff --git a/interp/builtins/tail/tail.go b/interp/builtins/tail/tail.go index b7f227f6..35d231e3 100644 --- a/interp/builtins/tail/tail.go +++ b/interp/builtins/tail/tail.go @@ -120,15 +120,15 @@ type countMode struct { // returns a bound handler whose flag variables are captured by closure. The // framework calls Parse and passes positional arguments to the handler. func registerFlags(fs *builtins.FlagSet) builtins.HandlerFunc { - help := fs.BoolP("help", "h", false, "print usage and exit") + help := fs.BoolP("help", "h", false, "print usage and exit") zeroTerminated := fs.BoolP("zero-terminated", "z", false, "use NUL as line delimiter") // quietFlag, silentFlag, and verboseFlag share a sequence counter so that // after parsing we can tell which appeared last on the command line and // apply last-flag-wins semantics (e.g. "-q -v" should show headers). var headerSeq int - quietFlag := newHeaderFlag(&headerSeq) - silentFlag := newHeaderFlag(&headerSeq) + quietFlag := newHeaderFlag(&headerSeq) + silentFlag := newHeaderFlag(&headerSeq) verboseFlag := newHeaderFlag(&headerSeq) fs.VarP(quietFlag, "quiet", "q", "never print file name headers") fs.Var(silentFlag, "silent", "alias for --quiet") @@ -162,10 +162,10 @@ func registerFlags(fs *builtins.FlagSet) builtins.HandlerFunc { // Bytes mode wins if -c/--bytes was parsed after -n/--lines. useBytesMode := bytesFlag.pos > linesFlag.pos - countStr := linesFlag.val + countStr := linesFlag.val modeLabel := "lines" if useBytesMode { - countStr = bytesFlag.val + countStr = bytesFlag.val modeLabel = "bytes" } diff --git a/interp/builtins/wc/wc.go b/interp/builtins/wc/wc.go index 1b71418b..7fbf50ee 100644 --- a/interp/builtins/wc/wc.go +++ b/interp/builtins/wc/wc.go @@ -259,8 +259,8 @@ func countReader(ctx context.Context, r io.Reader) (counts, error) { } c.chars += int64(utf8.RuneCount(chunk)) // carryN bytes are subtracted here and will be re-added via - // n += carryN at the top of the next iteration. - c.bytes -= int64(carryN) + // n += carryN at the top of the next iteration. + c.bytes -= int64(carryN) for i := 0; i < len(chunk); { r, size := utf8.DecodeRune(chunk[i:]) @@ -353,25 +353,25 @@ func runeWidth(r rune) int { // codepoints per UAX #11, matching the ranges used by wcwidth(3). var eastAsianWide = &unicode.RangeTable{ R16: []unicode.Range16{ - {0x1100, 0x115F, 1}, // Hangul Jamo initials - {0x2329, 0x232A, 1}, // CJK angle brackets - {0x2E80, 0x303E, 1}, // CJK Radicals Supplement .. CJK Symbols - {0x3040, 0x33BF, 1}, // Hiragana .. CJK Compatibility - {0x33C0, 0x33FF, 1}, // CJK Compatibility (cont.) - {0x3400, 0x4DBF, 1}, // CJK Unified Ideographs Extension A - {0x4E00, 0xA4CF, 1}, // CJK Unified Ideographs .. Yi - {0xAC00, 0xD7A3, 1}, // Hangul Syllables - {0xF900, 0xFAFF, 1}, // CJK Compatibility Ideographs - {0xFE10, 0xFE19, 1}, // Vertical Forms - {0xFE30, 0xFE6F, 1}, // CJK Compatibility Forms + Small Form Variants - {0xFF01, 0xFF60, 1}, // Fullwidth Forms - {0xFFE0, 0xFFE6, 1}, // Fullwidth Signs + {0x1100, 0x115F, 1}, // Hangul Jamo initials + {0x2329, 0x232A, 1}, // CJK angle brackets + {0x2E80, 0x303E, 1}, // CJK Radicals Supplement .. CJK Symbols + {0x3040, 0x33BF, 1}, // Hiragana .. CJK Compatibility + {0x33C0, 0x33FF, 1}, // CJK Compatibility (cont.) + {0x3400, 0x4DBF, 1}, // CJK Unified Ideographs Extension A + {0x4E00, 0xA4CF, 1}, // CJK Unified Ideographs .. Yi + {0xAC00, 0xD7A3, 1}, // Hangul Syllables + {0xF900, 0xFAFF, 1}, // CJK Compatibility Ideographs + {0xFE10, 0xFE19, 1}, // Vertical Forms + {0xFE30, 0xFE6F, 1}, // CJK Compatibility Forms + Small Form Variants + {0xFF01, 0xFF60, 1}, // Fullwidth Forms + {0xFFE0, 0xFFE6, 1}, // Fullwidth Signs }, R32: []unicode.Range32{ - {0x1F300, 0x1F64F, 1}, // Misc Symbols/Pictographs + Emoticons - {0x1F900, 0x1F9FF, 1}, // Supplemental Symbols and Pictographs - {0x20000, 0x2FFFD, 1}, // CJK Extension B..F - {0x30000, 0x3FFFD, 1}, // CJK Extension G+ + {0x1F300, 0x1F64F, 1}, // Misc Symbols/Pictographs + Emoticons + {0x1F900, 0x1F9FF, 1}, // Supplemental Symbols and Pictographs + {0x20000, 0x2FFFD, 1}, // CJK Extension B..F + {0x30000, 0x3FFFD, 1}, // CJK Extension G+ }, } diff --git a/interp/builtins/wc/wc_test.go b/interp/builtins/wc/wc_test.go index 4707b0dd..dd2e3d20 100644 --- a/interp/builtins/wc/wc_test.go +++ b/interp/builtins/wc/wc_test.go @@ -451,4 +451,3 @@ func TestWcChunkBoundaryMultibyte(t *testing.T) { // max line length: 32766 + 2 (emoji display width) = 32768 assert.Equal(t, "32768 32768 file.txt\n", stdout) } - diff --git a/interp/register_builtins.go b/interp/register_builtins.go index a86488a6..1a360bbc 100644 --- a/interp/register_builtins.go +++ b/interp/register_builtins.go @@ -11,8 +11,8 @@ import ( "github.com/DataDog/rshell/interp/builtins" breakcmd "github.com/DataDog/rshell/interp/builtins/break" "github.com/DataDog/rshell/interp/builtins/cat" - "github.com/DataDog/rshell/interp/builtins/cut" continuecmd "github.com/DataDog/rshell/interp/builtins/continue" + "github.com/DataDog/rshell/interp/builtins/cut" "github.com/DataDog/rshell/interp/builtins/echo" "github.com/DataDog/rshell/interp/builtins/exit" falsecmd "github.com/DataDog/rshell/interp/builtins/false" diff --git a/interp/runner_exec.go b/interp/runner_exec.go index 6ea8e557..cf0b6a1e 100644 --- a/interp/runner_exec.go +++ b/interp/runner_exec.go @@ -245,6 +245,9 @@ func (r *Runner) call(ctx context.Context, pos syntax.Pos, args []string) { ReadDir: func(ctx context.Context, path string) ([]fs.DirEntry, error) { return r.sandbox.readDir(r.handlerCtx(ctx, todoPos), path) }, + ReadDirLimited: func(ctx context.Context, path string, offset, maxRead int) ([]fs.DirEntry, bool, error) { + return r.sandbox.readDirLimited(r.handlerCtx(ctx, todoPos), path, offset, maxRead) + }, StatFile: func(ctx context.Context, path string) (fs.FileInfo, error) { return r.sandbox.stat(r.handlerCtx(ctx, todoPos), path) }, diff --git a/tests/allowed_symbols_test.go b/tests/allowed_symbols_test.go index 0b272b2f..eab10e9c 100644 --- a/tests/allowed_symbols_test.go +++ b/tests/allowed_symbols_test.go @@ -46,6 +46,8 @@ var builtinAllowedSymbols = []string{ "errors.New", // fmt.Sprintf — string formatting; pure function, no I/O. "fmt.Sprintf", + // io/fs.DirEntry — interface type for directory entries; no side effects. + "io/fs.DirEntry", // io/fs.FileInfo — interface type for file information; no side effects. "io/fs.FileInfo", // io/fs.ModeDir — file mode bit constant for directories; pure constant. diff --git a/tests/compliance_test.go b/tests/compliance_test.go index 037a0008..e2e62683 100644 --- a/tests/compliance_test.go +++ b/tests/compliance_test.go @@ -156,11 +156,11 @@ func TestComplianceLicense3rdPartyFormat(t *testing.T) { // Known SPDX identifiers (extend as needed). knownSPDX := map[string]bool{ "MIT": true, - "Apache-2.0": true, - "BSD-2-Clause": true, - "BSD-3-Clause": true, - "ISC": true, - "MPL-2.0": true, + "Apache-2.0": true, + "BSD-2-Clause": true, + "BSD-3-Clause": true, + "ISC": true, + "MPL-2.0": true, "MIT AND Apache-2.0": true, } diff --git a/tests/scenarios/cmd/ls/flags/all_reverse.yaml b/tests/scenarios/cmd/ls/flags/all_reverse.yaml new file mode 100644 index 00000000..2cf04cd9 --- /dev/null +++ b/tests/scenarios/cmd/ls/flags/all_reverse.yaml @@ -0,0 +1,25 @@ +description: ls -ar reverses all entries including . and .. +setup: + files: + - path: f1 + content: "" + chmod: 0644 + - path: f2 + content: "" + chmod: 0644 + - path: f3 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls -ar +expect: + stdout: |+ + f3 + f2 + f1 + .. + . + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/flags/recursive_symlink_not_followed.yaml b/tests/scenarios/cmd/ls/flags/recursive_symlink_not_followed.yaml new file mode 100644 index 00000000..b9dabab2 --- /dev/null +++ b/tests/scenarios/cmd/ls/flags/recursive_symlink_not_followed.yaml @@ -0,0 +1,22 @@ +description: ls -R does not follow symlinks to directories (matches GNU ls default). +setup: + files: + - path: real/file.txt + content: "hello" + chmod: 0644 + - path: link + symlink: real +input: + allowed_paths: ["$DIR"] + script: |+ + ls -R +expect: + stdout: |+ + .: + link + real + + ./real: + file.txt + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/directory_flag_with_limit.yaml b/tests/scenarios/cmd/ls/pagination/directory_flag_with_limit.yaml new file mode 100644 index 00000000..e866eb4e --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/directory_flag_with_limit.yaml @@ -0,0 +1,19 @@ +description: ls -d --limit lists directories themselves without entering them. +skip_assert_against_bash: true +setup: + files: + - path: sub/f01 + content: "" + chmod: 0644 + - path: sub/f02 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls -d --limit 1 sub +expect: + stdout: |+ + sub + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/dot_dotdot_not_consumed_by_limit.yaml b/tests/scenarios/cmd/ls/pagination/dot_dotdot_not_consumed_by_limit.yaml new file mode 100644 index 00000000..1515490e --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/dot_dotdot_not_consumed_by_limit.yaml @@ -0,0 +1,26 @@ +description: ls -a --limit does not count . and .. against the limit. +skip_assert_against_bash: true +setup: + files: + - path: f01 + content: "" + chmod: 0644 + - path: f02 + content: "" + chmod: 0644 + - path: f03 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls -a --limit 3 +expect: + stdout: |+ + . + .. + f01 + f02 + f03 + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/dot_dotdot_not_consumed_by_offset.yaml b/tests/scenarios/cmd/ls/pagination/dot_dotdot_not_consumed_by_offset.yaml new file mode 100644 index 00000000..d71a84e6 --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/dot_dotdot_not_consumed_by_offset.yaml @@ -0,0 +1,28 @@ +description: ls -a --offset does not skip . and .. (they are synthesized after pagination). +skip_assert_against_bash: true +setup: + files: + - path: f01 + content: "" + chmod: 0644 + - path: f02 + content: "" + chmod: 0644 + - path: f03 + content: "" + chmod: 0644 + - path: f04 + content: "" + chmod: 0644 + - path: f05 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls -a --offset 3 --limit 2 | wc -l | tr -d ' ' +expect: + stdout: |+ + 4 + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/limit_only.yaml b/tests/scenarios/cmd/ls/pagination/limit_only.yaml new file mode 100644 index 00000000..d581db04 --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/limit_only.yaml @@ -0,0 +1,30 @@ +description: ls --limit caps the number of entries returned. +skip_assert_against_bash: true +setup: + files: + - path: f01 + content: "" + chmod: 0644 + - path: f02 + content: "" + chmod: 0644 + - path: f03 + content: "" + chmod: 0644 + - path: f04 + content: "" + chmod: 0644 + - path: f05 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls --limit 3 +expect: + stdout: |+ + f01 + f02 + f03 + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/limit_with_reverse.yaml b/tests/scenarios/cmd/ls/pagination/limit_with_reverse.yaml new file mode 100644 index 00000000..e7382c63 --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/limit_with_reverse.yaml @@ -0,0 +1,28 @@ +description: ls -r --limit returns the correct number of entries in reverse sort order. +skip_assert_against_bash: true +setup: + files: + - path: f01 + content: "" + chmod: 0644 + - path: f02 + content: "" + chmod: 0644 + - path: f03 + content: "" + chmod: 0644 + - path: f04 + content: "" + chmod: 0644 + - path: f05 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls -r --limit 3 | wc -l | tr -d ' ' +expect: + stdout: |+ + 3 + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/multi_dir_ignores_limit.yaml b/tests/scenarios/cmd/ls/pagination/multi_dir_ignores_limit.yaml new file mode 100644 index 00000000..0315339f --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/multi_dir_ignores_limit.yaml @@ -0,0 +1,31 @@ +description: ls with multiple directory args silently ignores --limit. +skip_assert_against_bash: true +setup: + files: + - path: d1/f01 + content: "" + chmod: 0644 + - path: d1/f02 + content: "" + chmod: 0644 + - path: d2/f01 + content: "" + chmod: 0644 + - path: d2/f02 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls --limit 1 d1 d2 +expect: + stdout: |+ + d1: + f01 + f02 + + d2: + f01 + f02 + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/multi_dir_ignores_offset.yaml b/tests/scenarios/cmd/ls/pagination/multi_dir_ignores_offset.yaml new file mode 100644 index 00000000..f46d3ae4 --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/multi_dir_ignores_offset.yaml @@ -0,0 +1,31 @@ +description: ls with multiple directory args silently ignores --offset. +skip_assert_against_bash: true +setup: + files: + - path: d1/f01 + content: "" + chmod: 0644 + - path: d1/f02 + content: "" + chmod: 0644 + - path: d2/f01 + content: "" + chmod: 0644 + - path: d2/f02 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls --offset 100 d1 d2 +expect: + stdout: |+ + d1: + f01 + f02 + + d2: + f01 + f02 + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/negative_limit_clamped.yaml b/tests/scenarios/cmd/ls/pagination/negative_limit_clamped.yaml new file mode 100644 index 00000000..5ee4023b --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/negative_limit_clamped.yaml @@ -0,0 +1,24 @@ +description: Negative --limit is clamped to zero (shows all entries). +skip_assert_against_bash: true +setup: + files: + - path: f01 + content: "" + chmod: 0644 + - path: f02 + content: "" + chmod: 0644 + - path: f03 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls --limit -1 +expect: + stdout: |+ + f01 + f02 + f03 + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/negative_offset_clamped.yaml b/tests/scenarios/cmd/ls/pagination/negative_offset_clamped.yaml new file mode 100644 index 00000000..267ad534 --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/negative_offset_clamped.yaml @@ -0,0 +1,24 @@ +description: Negative --offset is clamped to zero. +skip_assert_against_bash: true +setup: + files: + - path: f01 + content: "" + chmod: 0644 + - path: f02 + content: "" + chmod: 0644 + - path: f03 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls --offset -1 +expect: + stdout: |+ + f01 + f02 + f03 + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/offset_and_limit.yaml b/tests/scenarios/cmd/ls/pagination/offset_and_limit.yaml new file mode 100644 index 00000000..141641de --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/offset_and_limit.yaml @@ -0,0 +1,37 @@ +description: ls --offset and --limit together paginate results. +skip_assert_against_bash: true +setup: + files: + - path: f01 + content: "" + chmod: 0644 + - path: f02 + content: "" + chmod: 0644 + - path: f03 + content: "" + chmod: 0644 + - path: f04 + content: "" + chmod: 0644 + - path: f05 + content: "" + chmod: 0644 + - path: f06 + content: "" + chmod: 0644 + - path: f07 + content: "" + chmod: 0644 + - path: f08 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls --offset 2 --limit 3 | wc -l | tr -d ' ' +expect: + stdout: |+ + 3 + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/offset_beyond_count.yaml b/tests/scenarios/cmd/ls/pagination/offset_beyond_count.yaml new file mode 100644 index 00000000..d91c4a3e --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/offset_beyond_count.yaml @@ -0,0 +1,18 @@ +description: ls --offset beyond entry count returns empty output. +skip_assert_against_bash: true +setup: + files: + - path: f01 + content: "" + chmod: 0644 + - path: f02 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls --offset 100 --limit 10 +expect: + stdout: "" + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/offset_only.yaml b/tests/scenarios/cmd/ls/pagination/offset_only.yaml new file mode 100644 index 00000000..ffbeb394 --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/offset_only.yaml @@ -0,0 +1,28 @@ +description: ls --offset skips entries and returns the rest. +skip_assert_against_bash: true +setup: + files: + - path: f01 + content: "" + chmod: 0644 + - path: f02 + content: "" + chmod: 0644 + - path: f03 + content: "" + chmod: 0644 + - path: f04 + content: "" + chmod: 0644 + - path: f05 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls --offset 3 | wc -l | tr -d ' ' +expect: + stdout: |+ + 2 + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/recursive_ignores_limit.yaml b/tests/scenarios/cmd/ls/pagination/recursive_ignores_limit.yaml new file mode 100644 index 00000000..f8b4bfa6 --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/recursive_ignores_limit.yaml @@ -0,0 +1,27 @@ +description: ls -R silently ignores --limit. +skip_assert_against_bash: true +setup: + files: + - path: aaa/file.txt + content: "" + chmod: 0644 + - path: bbb/file.txt + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls -R --limit 1 +expect: + stdout: |+ + .: + aaa + bbb + + ./aaa: + file.txt + + ./bbb: + file.txt + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/recursive_ignores_offset.yaml b/tests/scenarios/cmd/ls/pagination/recursive_ignores_offset.yaml new file mode 100644 index 00000000..171a8e06 --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/recursive_ignores_offset.yaml @@ -0,0 +1,27 @@ +description: ls -R silently ignores --offset. +skip_assert_against_bash: true +setup: + files: + - path: aaa/file.txt + content: "" + chmod: 0644 + - path: bbb/file.txt + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls -R --offset 100 +expect: + stdout: |+ + .: + aaa + bbb + + ./aaa: + file.txt + + ./bbb: + file.txt + stderr: "" + exit_code: 0 diff --git a/tests/scenarios/cmd/ls/pagination/zero_offset_zero_limit.yaml b/tests/scenarios/cmd/ls/pagination/zero_offset_zero_limit.yaml new file mode 100644 index 00000000..23f1e4d2 --- /dev/null +++ b/tests/scenarios/cmd/ls/pagination/zero_offset_zero_limit.yaml @@ -0,0 +1,24 @@ +description: --offset 0 --limit 0 behaves like no pagination. +skip_assert_against_bash: true +setup: + files: + - path: f01 + content: "" + chmod: 0644 + - path: f02 + content: "" + chmod: 0644 + - path: f03 + content: "" + chmod: 0644 +input: + allowed_paths: ["$DIR"] + script: |+ + ls --offset 0 --limit 0 +expect: + stdout: |+ + f01 + f02 + f03 + stderr: "" + exit_code: 0 diff --git a/tests/scenarios_test.go b/tests/scenarios_test.go index e3240c32..58652141 100644 --- a/tests/scenarios_test.go +++ b/tests/scenarios_test.go @@ -31,7 +31,7 @@ const dockerBashImage = "debian:bookworm-slim" // scenario represents a single test scenario. type scenario struct { Description string `yaml:"description"` - SkipAssertAgainstBash bool `yaml:"skip_assert_against_bash"` // true = skip bash comparison + SkipAssertAgainstBash bool `yaml:"skip_assert_against_bash"` // true = skip bash comparison Setup setup `yaml:"setup"` Input input `yaml:"input"` Expect expected `yaml:"expect"`