The primary improvement is in early clearing of
analyzed package's TypeInfo, facts, etc for
whole program analyzers (`unused`). Clear it when it
becomes unused and GC collects them early. Initially this
clearing was performed for all analyzers except `unused`.
Update staticcheck from v0.0.1-2019.2.3 to v0.0.1-2020.1.4
Also in this commit:
  * speed up loading packages from export data (2.5s -> 2.1s for std)
    by not using mutex for export data since it was allowed in
    x/tools#07722704da13
  * make an order of execution of linters stable
  * update renameio and robustio
  * use robustio in caching
Relates: #987, #994, #995, #1011
		
	
			
		
			
				
	
	
		
			151 lines
		
	
	
		
			3.8 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			151 lines
		
	
	
		
			3.8 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
// Copyright 2019 The Go Authors. All rights reserved.
 | 
						|
// Use of this source code is governed by a BSD-style
 | 
						|
// license that can be found in the LICENSE file.
 | 
						|
 | 
						|
// +build !plan9
 | 
						|
 | 
						|
package renameio
 | 
						|
 | 
						|
import (
 | 
						|
	"encoding/binary"
 | 
						|
	"io/ioutil"
 | 
						|
	"math/rand"
 | 
						|
	"os"
 | 
						|
	"path/filepath"
 | 
						|
	"runtime"
 | 
						|
	"sync"
 | 
						|
	"sync/atomic"
 | 
						|
	"syscall"
 | 
						|
	"testing"
 | 
						|
	"time"
 | 
						|
 | 
						|
	"github.com/golangci/golangci-lint/internal/robustio"
 | 
						|
)
 | 
						|
 | 
						|
func TestConcurrentReadsAndWrites(t *testing.T) {
 | 
						|
	dir, err := ioutil.TempDir("", "renameio")
 | 
						|
	if err != nil {
 | 
						|
		t.Fatal(err)
 | 
						|
	}
 | 
						|
	defer os.RemoveAll(dir)
 | 
						|
	path := filepath.Join(dir, "blob.bin")
 | 
						|
 | 
						|
	const chunkWords = 8 << 10
 | 
						|
	buf := make([]byte, 2*chunkWords*8)
 | 
						|
	for i := uint64(0); i < 2*chunkWords; i++ {
 | 
						|
		binary.LittleEndian.PutUint64(buf[i*8:], i)
 | 
						|
	}
 | 
						|
 | 
						|
	var attempts int64 = 128
 | 
						|
	if !testing.Short() {
 | 
						|
		attempts *= 16
 | 
						|
	}
 | 
						|
	const parallel = 32
 | 
						|
 | 
						|
	var sem = make(chan bool, parallel)
 | 
						|
 | 
						|
	var (
 | 
						|
		writeSuccesses, readSuccesses int64 // atomic
 | 
						|
		writeErrnoSeen, readErrnoSeen sync.Map
 | 
						|
	)
 | 
						|
 | 
						|
	for n := attempts; n > 0; n-- {
 | 
						|
		sem <- true
 | 
						|
		go func() {
 | 
						|
			defer func() { <-sem }()
 | 
						|
 | 
						|
			time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond)
 | 
						|
			offset := rand.Intn(chunkWords)
 | 
						|
			chunk := buf[offset*8 : (offset+chunkWords)*8]
 | 
						|
			if err := WriteFile(path, chunk, 0666); err == nil {
 | 
						|
				atomic.AddInt64(&writeSuccesses, 1)
 | 
						|
			} else if robustio.IsEphemeralError(err) {
 | 
						|
				var (
 | 
						|
					dup bool
 | 
						|
				)
 | 
						|
				if errno, ok := err.(syscall.Errno); ok {
 | 
						|
					_, dup = writeErrnoSeen.LoadOrStore(errno, true)
 | 
						|
				}
 | 
						|
				if !dup {
 | 
						|
					t.Logf("ephemeral error: %v", err)
 | 
						|
				}
 | 
						|
			} else {
 | 
						|
				t.Errorf("unexpected error: %v", err)
 | 
						|
			}
 | 
						|
 | 
						|
			time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond)
 | 
						|
			data, err := ReadFile(path)
 | 
						|
			if err == nil {
 | 
						|
				atomic.AddInt64(&readSuccesses, 1)
 | 
						|
			} else if robustio.IsEphemeralError(err) {
 | 
						|
				var (
 | 
						|
					dup bool
 | 
						|
				)
 | 
						|
				if errno, ok := err.(syscall.Errno); ok {
 | 
						|
					_, dup = readErrnoSeen.LoadOrStore(errno, true)
 | 
						|
				}
 | 
						|
				if !dup {
 | 
						|
					t.Logf("ephemeral error: %v", err)
 | 
						|
				}
 | 
						|
				return
 | 
						|
			} else {
 | 
						|
				t.Errorf("unexpected error: %v", err)
 | 
						|
				return
 | 
						|
			}
 | 
						|
 | 
						|
			if len(data) != 8*chunkWords {
 | 
						|
				t.Errorf("read %d bytes, but each write is a %d-byte file", len(data), 8*chunkWords)
 | 
						|
				return
 | 
						|
			}
 | 
						|
 | 
						|
			u := binary.LittleEndian.Uint64(data)
 | 
						|
			for i := 1; i < chunkWords; i++ {
 | 
						|
				next := binary.LittleEndian.Uint64(data[i*8:])
 | 
						|
				if next != u+1 {
 | 
						|
					t.Errorf("wrote sequential integers, but read integer out of sequence at offset %d", i)
 | 
						|
					return
 | 
						|
				}
 | 
						|
				u = next
 | 
						|
			}
 | 
						|
		}()
 | 
						|
	}
 | 
						|
 | 
						|
	for n := parallel; n > 0; n-- {
 | 
						|
		sem <- true
 | 
						|
	}
 | 
						|
 | 
						|
	var minWriteSuccesses int64 = attempts
 | 
						|
	if runtime.GOOS == "windows" {
 | 
						|
		// Windows produces frequent "Access is denied" errors under heavy rename load.
 | 
						|
		// As long as those are the only errors and *some* of the writes succeed, we're happy.
 | 
						|
		minWriteSuccesses = attempts / 4
 | 
						|
	}
 | 
						|
 | 
						|
	if writeSuccesses < minWriteSuccesses {
 | 
						|
		t.Errorf("%d (of %d) writes succeeded; want ≥ %d", writeSuccesses, attempts, minWriteSuccesses)
 | 
						|
	} else {
 | 
						|
		t.Logf("%d (of %d) writes succeeded (ok: ≥ %d)", writeSuccesses, attempts, minWriteSuccesses)
 | 
						|
	}
 | 
						|
 | 
						|
	var minReadSuccesses int64 = attempts
 | 
						|
 | 
						|
	switch runtime.GOOS {
 | 
						|
	case "windows":
 | 
						|
		// Windows produces frequent "Access is denied" errors under heavy rename load.
 | 
						|
		// As long as those are the only errors and *some* of the reads succeed, we're happy.
 | 
						|
		minReadSuccesses = attempts / 4
 | 
						|
 | 
						|
	case "darwin":
 | 
						|
		// The filesystem on macOS 10.14 occasionally fails with "no such file or
 | 
						|
		// directory" errors. See https://golang.org/issue/33041. The flake rate is
 | 
						|
		// fairly low, so ensure that at least 75% of attempts succeed.
 | 
						|
		minReadSuccesses = attempts - (attempts / 4)
 | 
						|
	}
 | 
						|
 | 
						|
	if readSuccesses < minReadSuccesses {
 | 
						|
		t.Errorf("%d (of %d) reads succeeded; want ≥ %d", readSuccesses, attempts, minReadSuccesses)
 | 
						|
	} else {
 | 
						|
		t.Logf("%d (of %d) reads succeeded (ok: ≥ %d)", readSuccesses, attempts, minReadSuccesses)
 | 
						|
	}
 | 
						|
}
 |