@@ -25,22 +25,18 @@ import (
25
25
"runtime"
26
26
"runtime/debug"
27
27
"sort"
28
- "strings"
29
28
"sync"
30
29
"sync/atomic"
31
30
"time"
32
31
33
32
"cuelang.org/go/internal/golangorgx/gopls/cache/metadata"
34
33
"cuelang.org/go/internal/golangorgx/gopls/file"
35
34
"cuelang.org/go/internal/golangorgx/gopls/filecache"
36
- "cuelang.org/go/internal/golangorgx/gopls/progress"
37
35
"cuelang.org/go/internal/golangorgx/gopls/protocol"
38
- "cuelang.org/go/internal/golangorgx/gopls/settings"
39
36
"cuelang.org/go/internal/golangorgx/gopls/util/astutil"
40
37
"cuelang.org/go/internal/golangorgx/gopls/util/bug"
41
38
"cuelang.org/go/internal/golangorgx/gopls/util/frob"
42
39
"cuelang.org/go/internal/golangorgx/tools/event"
43
- "cuelang.org/go/internal/golangorgx/tools/event/tag"
44
40
"cuelang.org/go/internal/golangorgx/tools/facts"
45
41
"cuelang.org/go/internal/golangorgx/tools/gcimporter"
46
42
"cuelang.org/go/internal/golangorgx/tools/typesinternal"
@@ -166,314 +162,6 @@ import (
166
162
// feature.
167
163
const AnalysisProgressTitle = "Analyzing Dependencies"
168
164
169
- // Analyze applies a set of analyzers to the package denoted by id,
170
- // and returns their diagnostics for that package.
171
- //
172
- // The analyzers list must be duplicate free; order does not matter.
173
- //
174
- // Notifications of progress may be sent to the optional reporter.
175
- func (s * Snapshot ) Analyze (ctx context.Context , pkgs map [PackageID ]* metadata.Package , analyzers []* settings.Analyzer , reporter * progress.Tracker ) ([]* Diagnostic , error ) {
176
- start := time .Now () // for progress reporting
177
-
178
- var tagStr string // sorted comma-separated list of PackageIDs
179
- {
180
- // TODO(adonovan): replace with a generic map[S]any -> string
181
- // function in the tag package, and use maps.Keys + slices.Sort.
182
- keys := make ([]string , 0 , len (pkgs ))
183
- for id := range pkgs {
184
- keys = append (keys , string (id ))
185
- }
186
- sort .Strings (keys )
187
- tagStr = strings .Join (keys , "," )
188
- }
189
- ctx , done := event .Start (ctx , "snapshot.Analyze" , tag .Package .Of (tagStr ))
190
- defer done ()
191
-
192
- // Filter and sort enabled root analyzers.
193
- // A disabled analyzer may still be run if required by another.
194
- toSrc := make (map [* analysis.Analyzer ]* settings.Analyzer )
195
- var enabled []* analysis.Analyzer // enabled subset + transitive requirements
196
- for _ , a := range analyzers {
197
- if a .IsEnabled (s .Options ()) {
198
- toSrc [a .Analyzer ] = a
199
- enabled = append (enabled , a .Analyzer )
200
- }
201
- }
202
- sort .Slice (enabled , func (i , j int ) bool {
203
- return enabled [i ].Name < enabled [j ].Name
204
- })
205
- analyzers = nil // prevent accidental use
206
-
207
- enabled = requiredAnalyzers (enabled )
208
-
209
- // Perform basic sanity checks.
210
- // (Ideally we would do this only once.)
211
- if err := analysis .Validate (enabled ); err != nil {
212
- return nil , fmt .Errorf ("invalid analyzer configuration: %v" , err )
213
- }
214
-
215
- stableNames := make (map [* analysis.Analyzer ]string )
216
-
217
- var facty []* analysis.Analyzer // facty subset of enabled + transitive requirements
218
- for _ , a := range enabled {
219
- // TODO(adonovan): reject duplicate stable names (very unlikely).
220
- stableNames [a ] = stableName (a )
221
-
222
- // Register fact types of all required analyzers.
223
- if len (a .FactTypes ) > 0 {
224
- facty = append (facty , a )
225
- for _ , f := range a .FactTypes {
226
- gob .Register (f ) // <2us
227
- }
228
- }
229
- }
230
- facty = requiredAnalyzers (facty )
231
-
232
- // File set for this batch (entire graph) of analysis.
233
- fset := token .NewFileSet ()
234
-
235
- // Starting from the root packages and following DepsByPkgPath,
236
- // build the DAG of packages we're going to analyze.
237
- //
238
- // Root nodes will run the enabled set of analyzers,
239
- // whereas dependencies will run only the facty set.
240
- // Because (by construction) enabled is a superset of facty,
241
- // we can analyze each node with exactly one set of analyzers.
242
- nodes := make (map [PackageID ]* analysisNode )
243
- var leaves []* analysisNode // nodes with no unfinished successors
244
- var makeNode func (from * analysisNode , id PackageID ) (* analysisNode , error )
245
- makeNode = func (from * analysisNode , id PackageID ) (* analysisNode , error ) {
246
- an , ok := nodes [id ]
247
- if ! ok {
248
- mp := s .Metadata (id )
249
- if mp == nil {
250
- return nil , bug .Errorf ("no metadata for %s" , id )
251
- }
252
-
253
- // -- preorder --
254
-
255
- an = & analysisNode {
256
- fset : fset ,
257
- mp : mp ,
258
- analyzers : facty , // all nodes run at least the facty analyzers
259
- allDeps : make (map [PackagePath ]* analysisNode ),
260
- exportDeps : make (map [PackagePath ]* analysisNode ),
261
- stableNames : stableNames ,
262
- }
263
- nodes [id ] = an
264
-
265
- // -- recursion --
266
-
267
- // Build subgraphs for dependencies.
268
- an .succs = make (map [PackageID ]* analysisNode , len (mp .DepsByPkgPath ))
269
- for _ , depID := range mp .DepsByPkgPath {
270
- dep , err := makeNode (an , depID )
271
- if err != nil {
272
- return nil , err
273
- }
274
- an .succs [depID ] = dep
275
-
276
- // Compute the union of all dependencies.
277
- // (This step has quadratic complexity.)
278
- for pkgPath , node := range dep .allDeps {
279
- an .allDeps [pkgPath ] = node
280
- }
281
- }
282
-
283
- // -- postorder --
284
-
285
- an .allDeps [mp .PkgPath ] = an // add self entry (reflexive transitive closure)
286
-
287
- // Add leaf nodes (no successors) directly to queue.
288
- if len (an .succs ) == 0 {
289
- leaves = append (leaves , an )
290
- }
291
-
292
- // Load the contents of each compiled Go file through
293
- // the snapshot's cache. (These are all cache hits as
294
- // files are pre-loaded following packages.Load)
295
- an .files = make ([]file.Handle , len (mp .CompiledGoFiles ))
296
- for i , uri := range mp .CompiledGoFiles {
297
- fh , err := s .ReadFile (ctx , uri )
298
- if err != nil {
299
- return nil , err
300
- }
301
- an .files [i ] = fh
302
- }
303
- }
304
- // Add edge from predecessor.
305
- if from != nil {
306
- atomic .AddInt32 (& from .unfinishedSuccs , 1 ) // TODO(adonovan): use generics
307
- an .preds = append (an .preds , from )
308
- }
309
- atomic .AddInt32 (& an .unfinishedPreds , 1 )
310
- return an , nil
311
- }
312
-
313
- // For root packages, we run the enabled set of analyzers.
314
- var roots []* analysisNode
315
- for id := range pkgs {
316
- root , err := makeNode (nil , id )
317
- if err != nil {
318
- return nil , err
319
- }
320
- root .analyzers = enabled
321
- roots = append (roots , root )
322
- }
323
-
324
- // Now that we have read all files,
325
- // we no longer need the snapshot.
326
- // (but options are needed for progress reporting)
327
- options := s .Options ()
328
- s = nil
329
-
330
- // Progress reporting. If supported, gopls reports progress on analysis
331
- // passes that are taking a long time.
332
- maybeReport := func (completed int64 ) {}
333
-
334
- // Enable progress reporting if enabled by the user
335
- // and we have a capable reporter.
336
- if reporter != nil && reporter .SupportsWorkDoneProgress () && options .AnalysisProgressReporting {
337
- var reportAfter = options .ReportAnalysisProgressAfter // tests may set this to 0
338
- const reportEvery = 1 * time .Second
339
-
340
- ctx , cancel := context .WithCancel (ctx )
341
- defer cancel ()
342
-
343
- var (
344
- reportMu sync.Mutex
345
- lastReport time.Time
346
- wd * progress.WorkDone
347
- )
348
- defer func () {
349
- reportMu .Lock ()
350
- defer reportMu .Unlock ()
351
-
352
- if wd != nil {
353
- wd .End (ctx , "Done." ) // ensure that the progress report exits
354
- }
355
- }()
356
- maybeReport = func (completed int64 ) {
357
- now := time .Now ()
358
- if now .Sub (start ) < reportAfter {
359
- return
360
- }
361
-
362
- reportMu .Lock ()
363
- defer reportMu .Unlock ()
364
-
365
- if wd == nil {
366
- wd = reporter .Start (ctx , AnalysisProgressTitle , "" , nil , cancel )
367
- }
368
-
369
- if now .Sub (lastReport ) > reportEvery {
370
- lastReport = now
371
- // Trailing space is intentional: some LSP clients strip newlines.
372
- msg := fmt .Sprintf (`Indexed %d/%d packages. (Set "analysisProgressReporting" to false to disable notifications.)` ,
373
- completed , len (nodes ))
374
- pct := 100 * float64 (completed ) / float64 (len (nodes ))
375
- wd .Report (ctx , msg , pct )
376
- }
377
- }
378
- }
379
-
380
- // Execute phase: run leaves first, adding
381
- // new nodes to the queue as they become leaves.
382
- var g errgroup.Group
383
-
384
- // Analysis is CPU-bound.
385
- //
386
- // Note: avoid g.SetLimit here: it makes g.Go stop accepting work, which
387
- // prevents workers from enqeuing, and thus finishing, and thus allowing the
388
- // group to make progress: deadlock.
389
- limiter := make (chan unit , runtime .GOMAXPROCS (0 ))
390
- var completed int64
391
-
392
- var enqueue func (* analysisNode )
393
- enqueue = func (an * analysisNode ) {
394
- g .Go (func () error {
395
- limiter <- unit {}
396
- defer func () { <- limiter }()
397
-
398
- summary , err := an .runCached (ctx )
399
- if err != nil {
400
- return err // cancelled, or failed to produce a package
401
- }
402
- maybeReport (atomic .AddInt64 (& completed , 1 ))
403
- an .summary = summary
404
-
405
- // Notify each waiting predecessor,
406
- // and enqueue it when it becomes a leaf.
407
- for _ , pred := range an .preds {
408
- if atomic .AddInt32 (& pred .unfinishedSuccs , - 1 ) == 0 {
409
- enqueue (pred )
410
- }
411
- }
412
-
413
- // Notify each successor that we no longer need
414
- // its action summaries, which hold Result values.
415
- // After the last one, delete it, so that we
416
- // free up large results such as SSA.
417
- for _ , succ := range an .succs {
418
- succ .decrefPreds ()
419
- }
420
- return nil
421
- })
422
- }
423
- for _ , leaf := range leaves {
424
- enqueue (leaf )
425
- }
426
- if err := g .Wait (); err != nil {
427
- return nil , err // cancelled, or failed to produce a package
428
- }
429
-
430
- // Report diagnostics only from enabled actions that succeeded.
431
- // Errors from creating or analyzing packages are ignored.
432
- // Diagnostics are reported in the order of the analyzers argument.
433
- //
434
- // TODO(adonovan): ignoring action errors gives the caller no way
435
- // to distinguish "there are no problems in this code" from
436
- // "the code (or analyzers!) are so broken that we couldn't even
437
- // begin the analysis you asked for".
438
- // Even if current callers choose to discard the
439
- // results, we should propagate the per-action errors.
440
- var results []* Diagnostic
441
- for _ , root := range roots {
442
- for _ , a := range enabled {
443
- // Skip analyzers that were added only to
444
- // fulfil requirements of the original set.
445
- srcAnalyzer , ok := toSrc [a ]
446
- if ! ok {
447
- // Although this 'skip' operation is logically sound,
448
- // it is nonetheless surprising that its absence should
449
- // cause #60909 since none of the analyzers currently added for
450
- // requirements (e.g. ctrlflow, inspect, buildssa)
451
- // is capable of reporting diagnostics.
452
- if summary := root .summary .Actions [stableNames [a ]]; summary != nil {
453
- if n := len (summary .Diagnostics ); n > 0 {
454
- bug .Reportf ("Internal error: got %d unexpected diagnostics from analyzer %s. This analyzer was added only to fulfil the requirements of the requested set of analyzers, and it is not expected that such analyzers report diagnostics. Please report this in issue #60909." , n , a )
455
- }
456
- }
457
- continue
458
- }
459
-
460
- // Inv: root.summary is the successful result of run (via runCached).
461
- summary , ok := root .summary .Actions [stableNames [a ]]
462
- if summary == nil {
463
- panic (fmt .Sprintf ("analyzeSummary.Actions[%q] = (nil, %t); got %v (#60551)" ,
464
- stableNames [a ], ok , root .summary .Actions ))
465
- }
466
- if summary .Err != "" {
467
- continue // action failed
468
- }
469
- for _ , gobDiag := range summary .Diagnostics {
470
- results = append (results , toSourceDiagnostic (srcAnalyzer , & gobDiag ))
471
- }
472
- }
473
- }
474
- return results , nil
475
- }
476
-
477
165
func (an * analysisNode ) decrefPreds () {
478
166
if atomic .AddInt32 (& an .unfinishedPreds , - 1 ) == 0 {
479
167
an .summary .Actions = nil
0 commit comments