...

Source file src/runtime/mpagecache_test.go

Documentation: runtime

		 1  // Copyright 2019 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  package runtime_test
		 6  
		 7  import (
		 8  	"math/rand"
		 9  	. "runtime"
		10  	"runtime/internal/sys"
		11  	"testing"
		12  )
		13  
		14  func checkPageCache(t *testing.T, got, want PageCache) {
		15  	if got.Base() != want.Base() {
		16  		t.Errorf("bad pageCache base: got 0x%x, want 0x%x", got.Base(), want.Base())
		17  	}
		18  	if got.Cache() != want.Cache() {
		19  		t.Errorf("bad pageCache bits: got %016x, want %016x", got.Base(), want.Base())
		20  	}
		21  	if got.Scav() != want.Scav() {
		22  		t.Errorf("bad pageCache scav: got %016x, want %016x", got.Scav(), want.Scav())
		23  	}
		24  }
		25  
		26  func TestPageCacheAlloc(t *testing.T) {
		27  	base := PageBase(BaseChunkIdx, 0)
		28  	type hit struct {
		29  		npages uintptr
		30  		base	 uintptr
		31  		scav	 uintptr
		32  	}
		33  	tests := map[string]struct {
		34  		cache PageCache
		35  		hits	[]hit
		36  	}{
		37  		"Empty": {
		38  			cache: NewPageCache(base, 0, 0),
		39  			hits: []hit{
		40  				{1, 0, 0},
		41  				{2, 0, 0},
		42  				{3, 0, 0},
		43  				{4, 0, 0},
		44  				{5, 0, 0},
		45  				{11, 0, 0},
		46  				{12, 0, 0},
		47  				{16, 0, 0},
		48  				{27, 0, 0},
		49  				{32, 0, 0},
		50  				{43, 0, 0},
		51  				{57, 0, 0},
		52  				{64, 0, 0},
		53  				{121, 0, 0},
		54  			},
		55  		},
		56  		"Lo1": {
		57  			cache: NewPageCache(base, 0x1, 0x1),
		58  			hits: []hit{
		59  				{1, base, PageSize},
		60  				{1, 0, 0},
		61  				{10, 0, 0},
		62  			},
		63  		},
		64  		"Hi1": {
		65  			cache: NewPageCache(base, 0x1<<63, 0x1),
		66  			hits: []hit{
		67  				{1, base + 63*PageSize, 0},
		68  				{1, 0, 0},
		69  				{10, 0, 0},
		70  			},
		71  		},
		72  		"Swiss1": {
		73  			cache: NewPageCache(base, 0x20005555, 0x5505),
		74  			hits: []hit{
		75  				{2, 0, 0},
		76  				{1, base, PageSize},
		77  				{1, base + 2*PageSize, PageSize},
		78  				{1, base + 4*PageSize, 0},
		79  				{1, base + 6*PageSize, 0},
		80  				{1, base + 8*PageSize, PageSize},
		81  				{1, base + 10*PageSize, PageSize},
		82  				{1, base + 12*PageSize, PageSize},
		83  				{1, base + 14*PageSize, PageSize},
		84  				{1, base + 29*PageSize, 0},
		85  				{1, 0, 0},
		86  				{10, 0, 0},
		87  			},
		88  		},
		89  		"Lo2": {
		90  			cache: NewPageCache(base, 0x3, 0x2<<62),
		91  			hits: []hit{
		92  				{2, base, 0},
		93  				{2, 0, 0},
		94  				{1, 0, 0},
		95  			},
		96  		},
		97  		"Hi2": {
		98  			cache: NewPageCache(base, 0x3<<62, 0x3<<62),
		99  			hits: []hit{
	 100  				{2, base + 62*PageSize, 2 * PageSize},
	 101  				{2, 0, 0},
	 102  				{1, 0, 0},
	 103  			},
	 104  		},
	 105  		"Swiss2": {
	 106  			cache: NewPageCache(base, 0x3333<<31, 0x3030<<31),
	 107  			hits: []hit{
	 108  				{2, base + 31*PageSize, 0},
	 109  				{2, base + 35*PageSize, 2 * PageSize},
	 110  				{2, base + 39*PageSize, 0},
	 111  				{2, base + 43*PageSize, 2 * PageSize},
	 112  				{2, 0, 0},
	 113  			},
	 114  		},
	 115  		"Hi53": {
	 116  			cache: NewPageCache(base, ((uint64(1)<<53)-1)<<10, ((uint64(1)<<16)-1)<<10),
	 117  			hits: []hit{
	 118  				{53, base + 10*PageSize, 16 * PageSize},
	 119  				{53, 0, 0},
	 120  				{1, 0, 0},
	 121  			},
	 122  		},
	 123  		"Full53": {
	 124  			cache: NewPageCache(base, ^uint64(0), ((uint64(1)<<16)-1)<<10),
	 125  			hits: []hit{
	 126  				{53, base, 16 * PageSize},
	 127  				{53, 0, 0},
	 128  				{1, base + 53*PageSize, 0},
	 129  			},
	 130  		},
	 131  		"Full64": {
	 132  			cache: NewPageCache(base, ^uint64(0), ^uint64(0)),
	 133  			hits: []hit{
	 134  				{64, base, 64 * PageSize},
	 135  				{64, 0, 0},
	 136  				{1, 0, 0},
	 137  			},
	 138  		},
	 139  		"FullMixed": {
	 140  			cache: NewPageCache(base, ^uint64(0), ^uint64(0)),
	 141  			hits: []hit{
	 142  				{5, base, 5 * PageSize},
	 143  				{7, base + 5*PageSize, 7 * PageSize},
	 144  				{1, base + 12*PageSize, 1 * PageSize},
	 145  				{23, base + 13*PageSize, 23 * PageSize},
	 146  				{63, 0, 0},
	 147  				{3, base + 36*PageSize, 3 * PageSize},
	 148  				{3, base + 39*PageSize, 3 * PageSize},
	 149  				{3, base + 42*PageSize, 3 * PageSize},
	 150  				{12, base + 45*PageSize, 12 * PageSize},
	 151  				{11, 0, 0},
	 152  				{4, base + 57*PageSize, 4 * PageSize},
	 153  				{4, 0, 0},
	 154  				{6, 0, 0},
	 155  				{36, 0, 0},
	 156  				{2, base + 61*PageSize, 2 * PageSize},
	 157  				{3, 0, 0},
	 158  				{1, base + 63*PageSize, 1 * PageSize},
	 159  				{4, 0, 0},
	 160  				{2, 0, 0},
	 161  				{62, 0, 0},
	 162  				{1, 0, 0},
	 163  			},
	 164  		},
	 165  	}
	 166  	for name, test := range tests {
	 167  		test := test
	 168  		t.Run(name, func(t *testing.T) {
	 169  			c := test.cache
	 170  			for i, h := range test.hits {
	 171  				b, s := c.Alloc(h.npages)
	 172  				if b != h.base {
	 173  					t.Fatalf("bad alloc base #%d: got 0x%x, want 0x%x", i, b, h.base)
	 174  				}
	 175  				if s != h.scav {
	 176  					t.Fatalf("bad alloc scav #%d: got %d, want %d", i, s, h.scav)
	 177  				}
	 178  			}
	 179  		})
	 180  	}
	 181  }
	 182  
	 183  func TestPageCacheFlush(t *testing.T) {
	 184  	if GOOS == "openbsd" && testing.Short() {
	 185  		t.Skip("skipping because virtual memory is limited; see #36210")
	 186  	}
	 187  	bits64ToBitRanges := func(bits uint64, base uint) []BitRange {
	 188  		var ranges []BitRange
	 189  		start, size := uint(0), uint(0)
	 190  		for i := 0; i < 64; i++ {
	 191  			if bits&(1<<i) != 0 {
	 192  				if size == 0 {
	 193  					start = uint(i) + base
	 194  				}
	 195  				size++
	 196  			} else {
	 197  				if size != 0 {
	 198  					ranges = append(ranges, BitRange{start, size})
	 199  					size = 0
	 200  				}
	 201  			}
	 202  		}
	 203  		if size != 0 {
	 204  			ranges = append(ranges, BitRange{start, size})
	 205  		}
	 206  		return ranges
	 207  	}
	 208  	runTest := func(t *testing.T, base uint, cache, scav uint64) {
	 209  		// Set up the before state.
	 210  		beforeAlloc := map[ChunkIdx][]BitRange{
	 211  			BaseChunkIdx: {{base, 64}},
	 212  		}
	 213  		beforeScav := map[ChunkIdx][]BitRange{
	 214  			BaseChunkIdx: {},
	 215  		}
	 216  		b := NewPageAlloc(beforeAlloc, beforeScav)
	 217  		defer FreePageAlloc(b)
	 218  
	 219  		// Create and flush the cache.
	 220  		c := NewPageCache(PageBase(BaseChunkIdx, base), cache, scav)
	 221  		c.Flush(b)
	 222  		if !c.Empty() {
	 223  			t.Errorf("pageCache flush did not clear cache")
	 224  		}
	 225  
	 226  		// Set up the expected after state.
	 227  		afterAlloc := map[ChunkIdx][]BitRange{
	 228  			BaseChunkIdx: bits64ToBitRanges(^cache, base),
	 229  		}
	 230  		afterScav := map[ChunkIdx][]BitRange{
	 231  			BaseChunkIdx: bits64ToBitRanges(scav, base),
	 232  		}
	 233  		want := NewPageAlloc(afterAlloc, afterScav)
	 234  		defer FreePageAlloc(want)
	 235  
	 236  		// Check to see if it worked.
	 237  		checkPageAlloc(t, want, b)
	 238  	}
	 239  
	 240  	// Empty.
	 241  	runTest(t, 0, 0, 0)
	 242  
	 243  	// Full.
	 244  	runTest(t, 0, ^uint64(0), ^uint64(0))
	 245  
	 246  	// Random.
	 247  	for i := 0; i < 100; i++ {
	 248  		// Generate random valid base within a chunk.
	 249  		base := uint(rand.Intn(PallocChunkPages/64)) * 64
	 250  
	 251  		// Generate random cache.
	 252  		cache := rand.Uint64()
	 253  		scav := rand.Uint64() & cache
	 254  
	 255  		// Run the test.
	 256  		runTest(t, base, cache, scav)
	 257  	}
	 258  }
	 259  
	 260  func TestPageAllocAllocToCache(t *testing.T) {
	 261  	if GOOS == "openbsd" && testing.Short() {
	 262  		t.Skip("skipping because virtual memory is limited; see #36210")
	 263  	}
	 264  	type test struct {
	 265  		before map[ChunkIdx][]BitRange
	 266  		scav	 map[ChunkIdx][]BitRange
	 267  		hits	 []PageCache // expected base addresses and patterns
	 268  		after	map[ChunkIdx][]BitRange
	 269  	}
	 270  	tests := map[string]test{
	 271  		"AllFree": {
	 272  			before: map[ChunkIdx][]BitRange{
	 273  				BaseChunkIdx: {},
	 274  			},
	 275  			scav: map[ChunkIdx][]BitRange{
	 276  				BaseChunkIdx: {{1, 1}, {64, 64}},
	 277  			},
	 278  			hits: []PageCache{
	 279  				NewPageCache(PageBase(BaseChunkIdx, 0), ^uint64(0), 0x2),
	 280  				NewPageCache(PageBase(BaseChunkIdx, 64), ^uint64(0), ^uint64(0)),
	 281  				NewPageCache(PageBase(BaseChunkIdx, 128), ^uint64(0), 0),
	 282  				NewPageCache(PageBase(BaseChunkIdx, 192), ^uint64(0), 0),
	 283  			},
	 284  			after: map[ChunkIdx][]BitRange{
	 285  				BaseChunkIdx: {{0, 256}},
	 286  			},
	 287  		},
	 288  		"ManyArena": {
	 289  			before: map[ChunkIdx][]BitRange{
	 290  				BaseChunkIdx:		 {{0, PallocChunkPages}},
	 291  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
	 292  				BaseChunkIdx + 2: {{0, PallocChunkPages - 64}},
	 293  			},
	 294  			scav: map[ChunkIdx][]BitRange{
	 295  				BaseChunkIdx:		 {{0, PallocChunkPages}},
	 296  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
	 297  				BaseChunkIdx + 2: {},
	 298  			},
	 299  			hits: []PageCache{
	 300  				NewPageCache(PageBase(BaseChunkIdx+2, PallocChunkPages-64), ^uint64(0), 0),
	 301  			},
	 302  			after: map[ChunkIdx][]BitRange{
	 303  				BaseChunkIdx:		 {{0, PallocChunkPages}},
	 304  				BaseChunkIdx + 1: {{0, PallocChunkPages}},
	 305  				BaseChunkIdx + 2: {{0, PallocChunkPages}},
	 306  			},
	 307  		},
	 308  		"NotContiguous": {
	 309  			before: map[ChunkIdx][]BitRange{
	 310  				BaseChunkIdx:				{{0, PallocChunkPages}},
	 311  				BaseChunkIdx + 0xff: {{0, 0}},
	 312  			},
	 313  			scav: map[ChunkIdx][]BitRange{
	 314  				BaseChunkIdx:				{{0, PallocChunkPages}},
	 315  				BaseChunkIdx + 0xff: {{31, 67}},
	 316  			},
	 317  			hits: []PageCache{
	 318  				NewPageCache(PageBase(BaseChunkIdx+0xff, 0), ^uint64(0), ((uint64(1)<<33)-1)<<31),
	 319  			},
	 320  			after: map[ChunkIdx][]BitRange{
	 321  				BaseChunkIdx:				{{0, PallocChunkPages}},
	 322  				BaseChunkIdx + 0xff: {{0, 64}},
	 323  			},
	 324  		},
	 325  		"First": {
	 326  			before: map[ChunkIdx][]BitRange{
	 327  				BaseChunkIdx: {{0, 32}, {33, 31}, {96, 32}},
	 328  			},
	 329  			scav: map[ChunkIdx][]BitRange{
	 330  				BaseChunkIdx: {{1, 4}, {31, 5}, {66, 2}},
	 331  			},
	 332  			hits: []PageCache{
	 333  				NewPageCache(PageBase(BaseChunkIdx, 0), 1<<32, 1<<32),
	 334  				NewPageCache(PageBase(BaseChunkIdx, 64), (uint64(1)<<32)-1, 0x3<<2),
	 335  			},
	 336  			after: map[ChunkIdx][]BitRange{
	 337  				BaseChunkIdx: {{0, 128}},
	 338  			},
	 339  		},
	 340  		"Fail": {
	 341  			before: map[ChunkIdx][]BitRange{
	 342  				BaseChunkIdx: {{0, PallocChunkPages}},
	 343  			},
	 344  			hits: []PageCache{
	 345  				NewPageCache(0, 0, 0),
	 346  				NewPageCache(0, 0, 0),
	 347  				NewPageCache(0, 0, 0),
	 348  			},
	 349  			after: map[ChunkIdx][]BitRange{
	 350  				BaseChunkIdx: {{0, PallocChunkPages}},
	 351  			},
	 352  		},
	 353  	}
	 354  	// Disable these tests on iOS since we have a small address space.
	 355  	// See #46860.
	 356  	if PageAlloc64Bit != 0 && sys.GoosIos == 0 {
	 357  		const chunkIdxBigJump = 0x100000 // chunk index offset which translates to O(TiB)
	 358  
	 359  		// This test is similar to the one with the same name for
	 360  		// pageAlloc.alloc and serves the same purpose.
	 361  		// See mpagealloc_test.go for details.
	 362  		sumsPerPhysPage := ChunkIdx(PhysPageSize / PallocSumBytes)
	 363  		baseChunkIdx := BaseChunkIdx &^ (sumsPerPhysPage - 1)
	 364  		tests["DiscontiguousMappedSumBoundary"] = test{
	 365  			before: map[ChunkIdx][]BitRange{
	 366  				baseChunkIdx + sumsPerPhysPage - 1: {{0, PallocChunkPages - 1}},
	 367  				baseChunkIdx + chunkIdxBigJump:		 {{1, PallocChunkPages - 1}},
	 368  			},
	 369  			scav: map[ChunkIdx][]BitRange{
	 370  				baseChunkIdx + sumsPerPhysPage - 1: {},
	 371  				baseChunkIdx + chunkIdxBigJump:		 {},
	 372  			},
	 373  			hits: []PageCache{
	 374  				NewPageCache(PageBase(baseChunkIdx+sumsPerPhysPage-1, PallocChunkPages-64), 1<<63, 0),
	 375  				NewPageCache(PageBase(baseChunkIdx+chunkIdxBigJump, 0), 1, 0),
	 376  				NewPageCache(0, 0, 0),
	 377  			},
	 378  			after: map[ChunkIdx][]BitRange{
	 379  				baseChunkIdx + sumsPerPhysPage - 1: {{0, PallocChunkPages}},
	 380  				baseChunkIdx + chunkIdxBigJump:		 {{0, PallocChunkPages}},
	 381  			},
	 382  		}
	 383  	}
	 384  	for name, v := range tests {
	 385  		v := v
	 386  		t.Run(name, func(t *testing.T) {
	 387  			b := NewPageAlloc(v.before, v.scav)
	 388  			defer FreePageAlloc(b)
	 389  
	 390  			for _, expect := range v.hits {
	 391  				checkPageCache(t, b.AllocToCache(), expect)
	 392  				if t.Failed() {
	 393  					return
	 394  				}
	 395  			}
	 396  			want := NewPageAlloc(v.after, v.scav)
	 397  			defer FreePageAlloc(want)
	 398  
	 399  			checkPageAlloc(t, want, b)
	 400  		})
	 401  	}
	 402  }
	 403  

View as plain text