argo-cd

Форк
0
/
state.go 
1024 строки · 41.8 Кб
1
package controller
2

3
import (
4
	"context"
5
	"encoding/json"
6
	"errors"
7
	"fmt"
8
	"reflect"
9
	"strings"
10
	goSync "sync"
11
	"time"
12

13
	v1 "k8s.io/api/core/v1"
14

15
	"github.com/argoproj/gitops-engine/pkg/diff"
16
	"github.com/argoproj/gitops-engine/pkg/health"
17
	"github.com/argoproj/gitops-engine/pkg/sync"
18
	hookutil "github.com/argoproj/gitops-engine/pkg/sync/hook"
19
	"github.com/argoproj/gitops-engine/pkg/sync/ignore"
20
	resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource"
21
	"github.com/argoproj/gitops-engine/pkg/sync/syncwaves"
22
	kubeutil "github.com/argoproj/gitops-engine/pkg/utils/kube"
23
	log "github.com/sirupsen/logrus"
24
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
25
	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
26
	"k8s.io/apimachinery/pkg/runtime/schema"
27
	"k8s.io/apimachinery/pkg/types"
28
	"k8s.io/client-go/tools/cache"
29

30
	"github.com/argoproj/argo-cd/v2/common"
31
	statecache "github.com/argoproj/argo-cd/v2/controller/cache"
32
	"github.com/argoproj/argo-cd/v2/controller/metrics"
33
	"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
34
	appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
35
	"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
36
	"github.com/argoproj/argo-cd/v2/util/argo"
37
	argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"
38
	appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"
39
	"github.com/argoproj/argo-cd/v2/util/db"
40
	"github.com/argoproj/argo-cd/v2/util/gpg"
41
	"github.com/argoproj/argo-cd/v2/util/io"
42
	"github.com/argoproj/argo-cd/v2/util/settings"
43
	"github.com/argoproj/argo-cd/v2/util/stats"
44
)
45

46
var (
47
	CompareStateRepoError = errors.New("failed to get repo objects")
48
)
49

50
type resourceInfoProviderStub struct {
51
}
52

53
func (r *resourceInfoProviderStub) IsNamespaced(_ schema.GroupKind) (bool, error) {
54
	return false, nil
55
}
56

57
type managedResource struct {
58
	Target          *unstructured.Unstructured
59
	Live            *unstructured.Unstructured
60
	Diff            diff.DiffResult
61
	Group           string
62
	Version         string
63
	Kind            string
64
	Namespace       string
65
	Name            string
66
	Hook            bool
67
	ResourceVersion string
68
}
69

70
// AppStateManager defines methods which allow to compare application spec and actual application state.
71
type AppStateManager interface {
72
	CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) (*comparisonResult, error)
73
	SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)
74
	GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error)
75
}
76

77
// comparisonResult holds the state of an application after the reconciliation
78
type comparisonResult struct {
79
	syncStatus           *v1alpha1.SyncStatus
80
	healthStatus         *v1alpha1.HealthStatus
81
	resources            []v1alpha1.ResourceStatus
82
	managedResources     []managedResource
83
	reconciliationResult sync.ReconciliationResult
84
	diffConfig           argodiff.DiffConfig
85
	appSourceType        v1alpha1.ApplicationSourceType
86
	// appSourceTypes stores the SourceType for each application source under sources field
87
	appSourceTypes []v1alpha1.ApplicationSourceType
88
	// timings maps phases of comparison to the duration it took to complete (for statistical purposes)
89
	timings            map[string]time.Duration
90
	diffResultList     *diff.DiffResultList
91
	hasPostDeleteHooks bool
92
}
93

94
func (res *comparisonResult) GetSyncStatus() *v1alpha1.SyncStatus {
95
	return res.syncStatus
96
}
97

98
func (res *comparisonResult) GetHealthStatus() *v1alpha1.HealthStatus {
99
	return res.healthStatus
100
}
101

102
// appStateManager allows to compare applications to git
103
type appStateManager struct {
104
	metricsServer         *metrics.MetricsServer
105
	db                    db.ArgoDB
106
	settingsMgr           *settings.SettingsManager
107
	appclientset          appclientset.Interface
108
	projInformer          cache.SharedIndexInformer
109
	kubectl               kubeutil.Kubectl
110
	repoClientset         apiclient.Clientset
111
	liveStateCache        statecache.LiveStateCache
112
	cache                 *appstatecache.Cache
113
	namespace             string
114
	statusRefreshTimeout  time.Duration
115
	resourceTracking      argo.ResourceTracking
116
	persistResourceHealth bool
117
	repoErrorCache        goSync.Map
118
	repoErrorGracePeriod  time.Duration
119
	serverSideDiff        bool
120
}
121

122
// GetRepoObjs will generate the manifests for the given application delegating the
123
// task to the repo-server. It returns the list of generated manifests as unstructured
124
// objects. It also returns the full response from all calls to the repo server as the
125
// second argument.
126
func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error) {
127
	ts := stats.NewTimingStats()
128
	helmRepos, err := m.db.ListHelmRepositories(context.Background())
129
	if err != nil {
130
		return nil, nil, fmt.Errorf("failed to list Helm repositories: %w", err)
131
	}
132
	permittedHelmRepos, err := argo.GetPermittedRepos(proj, helmRepos)
133
	if err != nil {
134
		return nil, nil, fmt.Errorf("failed to get permitted Helm repositories for project %q: %w", proj.Name, err)
135
	}
136

137
	ts.AddCheckpoint("repo_ms")
138
	helmRepositoryCredentials, err := m.db.GetAllHelmRepositoryCredentials(context.Background())
139
	if err != nil {
140
		return nil, nil, fmt.Errorf("failed to get Helm credentials: %w", err)
141
	}
142
	permittedHelmCredentials, err := argo.GetPermittedReposCredentials(proj, helmRepositoryCredentials)
143
	if err != nil {
144
		return nil, nil, fmt.Errorf("failed to get permitted Helm credentials for project %q: %w", proj.Name, err)
145
	}
146

147
	enabledSourceTypes, err := m.settingsMgr.GetEnabledSourceTypes()
148
	if err != nil {
149
		return nil, nil, fmt.Errorf("failed to get enabled source types: %w", err)
150
	}
151
	ts.AddCheckpoint("plugins_ms")
152

153
	kustomizeSettings, err := m.settingsMgr.GetKustomizeSettings()
154
	if err != nil {
155
		return nil, nil, fmt.Errorf("failed to get Kustomize settings: %w", err)
156
	}
157

158
	helmOptions, err := m.settingsMgr.GetHelmSettings()
159
	if err != nil {
160
		return nil, nil, fmt.Errorf("failed to get Helm settings: %w", err)
161
	}
162

163
	ts.AddCheckpoint("build_options_ms")
164
	serverVersion, apiResources, err := m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server)
165
	if err != nil {
166
		return nil, nil, fmt.Errorf("failed to get cluster version for cluster %q: %w", app.Spec.Destination.Server, err)
167
	}
168
	conn, repoClient, err := m.repoClientset.NewRepoServerClient()
169
	if err != nil {
170
		return nil, nil, fmt.Errorf("failed to connect to repo server: %w", err)
171
	}
172
	defer io.Close(conn)
173

174
	manifestInfos := make([]*apiclient.ManifestResponse, 0)
175
	targetObjs := make([]*unstructured.Unstructured, 0)
176

177
	// Store the map of all sources having ref field into a map for applications with sources field
178
	refSources, err := argo.GetRefSources(context.Background(), app.Spec, m.db)
179
	if err != nil {
180
		return nil, nil, fmt.Errorf("failed to get ref sources: %v", err)
181
	}
182

183
	for i, source := range sources {
184
		if len(revisions) < len(sources) || revisions[i] == "" {
185
			revisions[i] = source.TargetRevision
186
		}
187
		ts.AddCheckpoint("helm_ms")
188
		repo, err := m.db.GetRepository(context.Background(), source.RepoURL)
189
		if err != nil {
190
			return nil, nil, fmt.Errorf("failed to get repo %q: %w", source.RepoURL, err)
191
		}
192
		kustomizeOptions, err := kustomizeSettings.GetOptions(source)
193
		if err != nil {
194
			return nil, nil, fmt.Errorf("failed to get Kustomize options for source %d of %d: %w", i+1, len(sources), err)
195
		}
196

197
		ts.AddCheckpoint("version_ms")
198
		log.Debugf("Generating Manifest for source %s revision %s", source, revisions[i])
199
		manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{
200
			Repo:               repo,
201
			Repos:              permittedHelmRepos,
202
			Revision:           revisions[i],
203
			NoCache:            noCache,
204
			NoRevisionCache:    noRevisionCache,
205
			AppLabelKey:        appLabelKey,
206
			AppName:            app.InstanceName(m.namespace),
207
			Namespace:          app.Spec.Destination.Namespace,
208
			ApplicationSource:  &source,
209
			KustomizeOptions:   kustomizeOptions,
210
			KubeVersion:        serverVersion,
211
			ApiVersions:        argo.APIResourcesToStrings(apiResources, true),
212
			VerifySignature:    verifySignature,
213
			HelmRepoCreds:      permittedHelmCredentials,
214
			TrackingMethod:     string(argo.GetTrackingMethod(m.settingsMgr)),
215
			EnabledSourceTypes: enabledSourceTypes,
216
			HelmOptions:        helmOptions,
217
			HasMultipleSources: app.Spec.HasMultipleSources(),
218
			RefSources:         refSources,
219
			ProjectName:        proj.Name,
220
			ProjectSourceRepos: proj.Spec.SourceRepos,
221
		})
222
		if err != nil {
223
			return nil, nil, fmt.Errorf("failed to generate manifest for source %d of %d: %w", i+1, len(sources), err)
224
		}
225

226
		targetObj, err := unmarshalManifests(manifestInfo.Manifests)
227

228
		if err != nil {
229
			return nil, nil, fmt.Errorf("failed to unmarshal manifests for source %d of %d: %w", i+1, len(sources), err)
230
		}
231
		targetObjs = append(targetObjs, targetObj...)
232

233
		manifestInfos = append(manifestInfos, manifestInfo)
234
	}
235

236
	ts.AddCheckpoint("unmarshal_ms")
237
	logCtx := log.WithField("application", app.QualifiedName())
238
	for k, v := range ts.Timings() {
239
		logCtx = logCtx.WithField(k, v.Milliseconds())
240
	}
241
	logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
242
	logCtx.Info("GetRepoObjs stats")
243
	return targetObjs, manifestInfos, nil
244
}
245

246
func unmarshalManifests(manifests []string) ([]*unstructured.Unstructured, error) {
247
	targetObjs := make([]*unstructured.Unstructured, 0)
248
	for _, manifest := range manifests {
249
		obj, err := v1alpha1.UnmarshalToUnstructured(manifest)
250
		if err != nil {
251
			return nil, err
252
		}
253
		targetObjs = append(targetObjs, obj)
254
	}
255
	return targetObjs, nil
256
}
257

258
func DeduplicateTargetObjects(
259
	namespace string,
260
	objs []*unstructured.Unstructured,
261
	infoProvider kubeutil.ResourceInfoProvider,
262
) ([]*unstructured.Unstructured, []v1alpha1.ApplicationCondition, error) {
263

264
	targetByKey := make(map[kubeutil.ResourceKey][]*unstructured.Unstructured)
265
	for i := range objs {
266
		obj := objs[i]
267
		if obj == nil {
268
			continue
269
		}
270
		isNamespaced := kubeutil.IsNamespacedOrUnknown(infoProvider, obj.GroupVersionKind().GroupKind())
271
		if !isNamespaced {
272
			obj.SetNamespace("")
273
		} else if obj.GetNamespace() == "" {
274
			obj.SetNamespace(namespace)
275
		}
276
		key := kubeutil.GetResourceKey(obj)
277
		if key.Name == "" && obj.GetGenerateName() != "" {
278
			key.Name = fmt.Sprintf("%s%d", obj.GetGenerateName(), i)
279
		}
280
		targetByKey[key] = append(targetByKey[key], obj)
281
	}
282
	conditions := make([]v1alpha1.ApplicationCondition, 0)
283
	result := make([]*unstructured.Unstructured, 0)
284
	for key, targets := range targetByKey {
285
		if len(targets) > 1 {
286
			now := metav1.Now()
287
			conditions = append(conditions, v1alpha1.ApplicationCondition{
288
				Type:               v1alpha1.ApplicationConditionRepeatedResourceWarning,
289
				Message:            fmt.Sprintf("Resource %s appeared %d times among application resources.", key.String(), len(targets)),
290
				LastTransitionTime: &now,
291
			})
292
		}
293
		result = append(result, targets[len(targets)-1])
294
	}
295

296
	return result, conditions, nil
297
}
298

299
// getComparisonSettings will return the system level settings related to the
300
// diff/normalization process.
301
func (m *appStateManager) getComparisonSettings() (string, map[string]v1alpha1.ResourceOverride, *settings.ResourcesFilter, error) {
302
	resourceOverrides, err := m.settingsMgr.GetResourceOverrides()
303
	if err != nil {
304
		return "", nil, nil, err
305
	}
306
	appLabelKey, err := m.settingsMgr.GetAppInstanceLabelKey()
307
	if err != nil {
308
		return "", nil, nil, err
309
	}
310
	resFilter, err := m.settingsMgr.GetResourcesFilter()
311
	if err != nil {
312
		return "", nil, nil, err
313
	}
314
	return appLabelKey, resourceOverrides, resFilter, nil
315
}
316

317
// verifyGnuPGSignature verifies the result of a GnuPG operation for a given git
318
// revision.
319
func verifyGnuPGSignature(revision string, project *v1alpha1.AppProject, manifestInfo *apiclient.ManifestResponse) []v1alpha1.ApplicationCondition {
320
	now := metav1.Now()
321
	conditions := make([]v1alpha1.ApplicationCondition, 0)
322
	// We need to have some data in the verification result to parse, otherwise there was no signature
323
	if manifestInfo.VerifyResult != "" {
324
		verifyResult := gpg.ParseGitCommitVerification(manifestInfo.VerifyResult)
325
		switch verifyResult.Result {
326
		case gpg.VerifyResultGood:
327
			// This is the only case we allow to sync to, but we need to make sure signing key is allowed
328
			validKey := false
329
			for _, k := range project.Spec.SignatureKeys {
330
				if gpg.KeyID(k.KeyID) == gpg.KeyID(verifyResult.KeyID) && gpg.KeyID(k.KeyID) != "" {
331
					validKey = true
332
					break
333
				}
334
			}
335
			if !validKey {
336
				msg := fmt.Sprintf("Found good signature made with %s key %s, but this key is not allowed in AppProject",
337
					verifyResult.Cipher, verifyResult.KeyID)
338
				conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
339
			}
340
		case gpg.VerifyResultInvalid:
341
			msg := fmt.Sprintf("Found signature made with %s key %s, but verification result was invalid: '%s'",
342
				verifyResult.Cipher, verifyResult.KeyID, verifyResult.Message)
343
			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
344
		default:
345
			msg := fmt.Sprintf("Could not verify commit signature on revision '%s', check logs for more information.", revision)
346
			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
347
		}
348
	} else {
349
		msg := fmt.Sprintf("Target revision %s in Git is not signed, but a signature is required", revision)
350
		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
351
	}
352

353
	return conditions
354
}
355

356
func isManagedNamespace(ns *unstructured.Unstructured, app *v1alpha1.Application) bool {
357
	return ns != nil && ns.GetKind() == kubeutil.NamespaceKind && ns.GetName() == app.Spec.Destination.Namespace && app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.ManagedNamespaceMetadata != nil
358
}
359

360
// CompareAppState compares application git state to the live app state, using the specified
361
// revision and supplied source. If revision or overrides are empty, then compares against
362
// revision and overrides in the app spec.
363
func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) (*comparisonResult, error) {
364
	ts := stats.NewTimingStats()
365
	appLabelKey, resourceOverrides, resFilter, err := m.getComparisonSettings()
366

367
	ts.AddCheckpoint("settings_ms")
368

369
	// return unknown comparison result if basic comparison settings cannot be loaded
370
	if err != nil {
371
		if hasMultipleSources {
372
			return &comparisonResult{
373
				syncStatus: &v1alpha1.SyncStatus{
374
					ComparedTo: v1alpha1.ComparedTo{Destination: app.Spec.Destination, Sources: sources, IgnoreDifferences: app.Spec.IgnoreDifferences},
375
					Status:     v1alpha1.SyncStatusCodeUnknown,
376
					Revisions:  revisions,
377
				},
378
				healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown},
379
			}, nil
380
		} else {
381
			return &comparisonResult{
382
				syncStatus: &v1alpha1.SyncStatus{
383
					ComparedTo: v1alpha1.ComparedTo{Source: sources[0], Destination: app.Spec.Destination, IgnoreDifferences: app.Spec.IgnoreDifferences},
384
					Status:     v1alpha1.SyncStatusCodeUnknown,
385
					Revision:   revisions[0],
386
				},
387
				healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown},
388
			}, nil
389
		}
390
	}
391

392
	// When signature keys are defined in the project spec, we need to verify the signature on the Git revision
393
	verifySignature := false
394
	if project.Spec.SignatureKeys != nil && len(project.Spec.SignatureKeys) > 0 && gpg.IsGPGEnabled() {
395
		verifySignature = true
396
	}
397

398
	// do best effort loading live and target state to present as much information about app state as possible
399
	failedToLoadObjs := false
400
	conditions := make([]v1alpha1.ApplicationCondition, 0)
401

402
	logCtx := log.WithField("application", app.QualifiedName())
403
	logCtx.Infof("Comparing app state (cluster: %s, namespace: %s)", app.Spec.Destination.Server, app.Spec.Destination.Namespace)
404

405
	var targetObjs []*unstructured.Unstructured
406
	now := metav1.Now()
407

408
	var manifestInfos []*apiclient.ManifestResponse
409
	targetNsExists := false
410

411
	if len(localManifests) == 0 {
412
		// If the length of revisions is not same as the length of sources,
413
		// we take the revisions from the sources directly for all the sources.
414
		if len(revisions) != len(sources) {
415
			revisions = make([]string, 0)
416
			for _, source := range sources {
417
				revisions = append(revisions, source.TargetRevision)
418
			}
419
		}
420

421
		targetObjs, manifestInfos, err = m.GetRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project)
422
		if err != nil {
423
			targetObjs = make([]*unstructured.Unstructured, 0)
424
			msg := fmt.Sprintf("Failed to load target state: %s", err.Error())
425
			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
426
			if firstSeen, ok := m.repoErrorCache.Load(app.Name); ok {
427
				if time.Since(firstSeen.(time.Time)) <= m.repoErrorGracePeriod && !noRevisionCache {
428
					// if first seen is less than grace period and it's not a Level 3 comparison,
429
					// ignore error and short circuit
430
					logCtx.Debugf("Ignoring repo error %v, already encountered error in grace period", err.Error())
431
					return nil, CompareStateRepoError
432
				}
433
			} else if !noRevisionCache {
434
				logCtx.Debugf("Ignoring repo error %v, new occurrence", err.Error())
435
				m.repoErrorCache.Store(app.Name, time.Now())
436
				return nil, CompareStateRepoError
437
			}
438
			failedToLoadObjs = true
439
		} else {
440
			m.repoErrorCache.Delete(app.Name)
441
		}
442
	} else {
443
		// Prevent applying local manifests for now when signature verification is enabled
444
		// This is also enforced on API level, but as a last resort, we also enforce it here
445
		if gpg.IsGPGEnabled() && verifySignature {
446
			msg := "Cannot use local manifests when signature verification is required"
447
			targetObjs = make([]*unstructured.Unstructured, 0)
448
			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
449
			failedToLoadObjs = true
450
		} else {
451
			targetObjs, err = unmarshalManifests(localManifests)
452
			if err != nil {
453
				targetObjs = make([]*unstructured.Unstructured, 0)
454
				msg := fmt.Sprintf("Failed to load local manifests: %s", err.Error())
455
				conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
456
				failedToLoadObjs = true
457
			}
458
		}
459
		// empty out manifestInfoMap
460
		manifestInfos = make([]*apiclient.ManifestResponse, 0)
461
	}
462
	ts.AddCheckpoint("git_ms")
463

464
	var infoProvider kubeutil.ResourceInfoProvider
465
	infoProvider, err = m.liveStateCache.GetClusterCache(app.Spec.Destination.Server)
466
	if err != nil {
467
		infoProvider = &resourceInfoProviderStub{}
468
	}
469
	targetObjs, dedupConditions, err := DeduplicateTargetObjects(app.Spec.Destination.Namespace, targetObjs, infoProvider)
470
	if err != nil {
471
		msg := fmt.Sprintf("Failed to deduplicate target state: %s", err.Error())
472
		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
473
	}
474
	conditions = append(conditions, dedupConditions...)
475
	for i := len(targetObjs) - 1; i >= 0; i-- {
476
		targetObj := targetObjs[i]
477
		gvk := targetObj.GroupVersionKind()
478
		if resFilter.IsExcludedResource(gvk.Group, gvk.Kind, app.Spec.Destination.Server) {
479
			targetObjs = append(targetObjs[:i], targetObjs[i+1:]...)
480
			conditions = append(conditions, v1alpha1.ApplicationCondition{
481
				Type:               v1alpha1.ApplicationConditionExcludedResourceWarning,
482
				Message:            fmt.Sprintf("Resource %s/%s %s is excluded in the settings", gvk.Group, gvk.Kind, targetObj.GetName()),
483
				LastTransitionTime: &now,
484
			})
485
		}
486

487
		// If we reach this path, this means that a namespace has been both defined in Git, as well in the
488
		// application's managedNamespaceMetadata. We want to ensure that this manifest is the one being used instead
489
		// of what is present in managedNamespaceMetadata.
490
		if isManagedNamespace(targetObj, app) {
491
			targetNsExists = true
492
		}
493
	}
494
	ts.AddCheckpoint("dedup_ms")
495

496
	liveObjByKey, err := m.liveStateCache.GetManagedLiveObjs(app, targetObjs)
497
	if err != nil {
498
		liveObjByKey = make(map[kubeutil.ResourceKey]*unstructured.Unstructured)
499
		msg := fmt.Sprintf("Failed to load live state: %s", err.Error())
500
		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
501
		failedToLoadObjs = true
502
	}
503

504
	logCtx.Debugf("Retrieved live manifests")
505

506
	// filter out all resources which are not permitted in the application project
507
	for k, v := range liveObjByKey {
508
		permitted, err := project.IsLiveResourcePermitted(v, app.Spec.Destination.Server, app.Spec.Destination.Name, func(project string) ([]*v1alpha1.Cluster, error) {
509
			clusters, err := m.db.GetProjectClusters(context.TODO(), project)
510
			if err != nil {
511
				return nil, fmt.Errorf("failed to get clusters for project %q: %v", project, err)
512
			}
513
			return clusters, nil
514
		})
515

516
		if err != nil {
517
			msg := fmt.Sprintf("Failed to check if live resource %q is permitted in project %q: %s", k.String(), app.Spec.Project, err.Error())
518
			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
519
			failedToLoadObjs = true
520
			continue
521
		}
522

523
		if !permitted {
524
			delete(liveObjByKey, k)
525
		}
526
	}
527

528
	trackingMethod := argo.GetTrackingMethod(m.settingsMgr)
529

530
	for _, liveObj := range liveObjByKey {
531
		if liveObj != nil {
532
			appInstanceName := m.resourceTracking.GetAppName(liveObj, appLabelKey, trackingMethod)
533
			if appInstanceName != "" && appInstanceName != app.InstanceName(m.namespace) {
534
				fqInstanceName := strings.ReplaceAll(appInstanceName, "_", "/")
535
				conditions = append(conditions, v1alpha1.ApplicationCondition{
536
					Type:               v1alpha1.ApplicationConditionSharedResourceWarning,
537
					Message:            fmt.Sprintf("%s/%s is part of applications %s and %s", liveObj.GetKind(), liveObj.GetName(), app.QualifiedName(), fqInstanceName),
538
					LastTransitionTime: &now,
539
				})
540
			}
541

542
			// For the case when a namespace is managed with `managedNamespaceMetadata` AND it has resource tracking
543
			// enabled (e.g. someone manually adds resource tracking labels or annotations), we need to do some
544
			// bookkeeping in order to prevent the managed namespace from being pruned.
545
			//
546
			// Live namespaces which are managed namespaces (i.e. application namespaces which are managed with
547
			// CreateNamespace=true and has non-nil managedNamespaceMetadata) will (usually) not have a corresponding
548
			// entry in source control. In order for the namespace not to risk being pruned, we'll need to generate a
549
			// namespace which we can compare the live namespace with. For that, we'll do the same as is done in
550
			// gitops-engine, the difference here being that we create a managed namespace which is only used for comparison.
551
			//
552
			// targetNsExists == true implies that it already exists as a target, so no need to add the namespace to the
553
			// targetObjs array.
554
			if isManagedNamespace(liveObj, app) && !targetNsExists {
555
				nsSpec := &v1.Namespace{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kubeutil.NamespaceKind}, ObjectMeta: metav1.ObjectMeta{Name: liveObj.GetName()}}
556
				managedNs, err := kubeutil.ToUnstructured(nsSpec)
557

558
				if err != nil {
559
					conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})
560
					failedToLoadObjs = true
561
					continue
562
				}
563

564
				// No need to care about the return value here, we just want the modified managedNs
565
				_, err = syncNamespace(m.resourceTracking, appLabelKey, trackingMethod, app.Name, app.Spec.SyncPolicy)(managedNs, liveObj)
566
				if err != nil {
567
					conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})
568
					failedToLoadObjs = true
569
				} else {
570
					targetObjs = append(targetObjs, managedNs)
571
				}
572
			}
573
		}
574
	}
575
	hasPostDeleteHooks := false
576
	for _, obj := range targetObjs {
577
		if isPostDeleteHook(obj) {
578
			hasPostDeleteHooks = true
579
		}
580
	}
581

582
	reconciliation := sync.Reconcile(targetObjs, liveObjByKey, app.Spec.Destination.Namespace, infoProvider)
583
	ts.AddCheckpoint("live_ms")
584

585
	compareOptions, err := m.settingsMgr.GetResourceCompareOptions()
586
	if err != nil {
587
		log.Warnf("Could not get compare options from ConfigMap (assuming defaults): %v", err)
588
		compareOptions = settings.GetDefaultDiffOptions()
589
	}
590
	manifestRevisions := make([]string, 0)
591

592
	for _, manifestInfo := range manifestInfos {
593
		manifestRevisions = append(manifestRevisions, manifestInfo.Revision)
594
	}
595

596
	serverSideDiff := m.serverSideDiff ||
597
		resourceutil.HasAnnotationOption(app, common.AnnotationCompareOptions, "ServerSideDiff=true")
598

599
	// This allows turning SSD off for a given app if it is enabled at the
600
	// controller level
601
	if resourceutil.HasAnnotationOption(app, common.AnnotationCompareOptions, "ServerSideDiff=false") {
602
		serverSideDiff = false
603
	}
604

605
	useDiffCache := useDiffCache(noCache, manifestInfos, sources, app, manifestRevisions, m.statusRefreshTimeout, serverSideDiff, logCtx)
606

607
	diffConfigBuilder := argodiff.NewDiffConfigBuilder().
608
		WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles).
609
		WithTracking(appLabelKey, string(trackingMethod))
610

611
	if useDiffCache {
612
		diffConfigBuilder.WithCache(m.cache, app.InstanceName(m.namespace))
613
	} else {
614
		diffConfigBuilder.WithNoCache()
615
	}
616

617
	if resourceutil.HasAnnotationOption(app, common.AnnotationCompareOptions, "IncludeMutationWebhook=true") {
618
		diffConfigBuilder.WithIgnoreMutationWebhook(false)
619
	}
620

621
	gvkParser, err := m.getGVKParser(app.Spec.Destination.Server)
622
	if err != nil {
623
		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionUnknownError, Message: err.Error(), LastTransitionTime: &now})
624
	}
625
	diffConfigBuilder.WithGVKParser(gvkParser)
626
	diffConfigBuilder.WithManager(common.ArgoCDSSAManager)
627

628
	diffConfigBuilder.WithServerSideDiff(serverSideDiff)
629

630
	if serverSideDiff {
631
		resourceOps, cleanup, err := m.getResourceOperations(app.Spec.Destination.Server)
632
		if err != nil {
633
			log.Errorf("CompareAppState error getting resource operations: %s", err)
634
			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionUnknownError, Message: err.Error(), LastTransitionTime: &now})
635
		}
636
		defer cleanup()
637
		diffConfigBuilder.WithServerSideDryRunner(diff.NewK8sServerSideDryRunner(resourceOps))
638
	}
639

640
	// enable structured merge diff if application syncs with server-side apply
641
	if app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.SyncOptions.HasOption("ServerSideApply=true") {
642
		diffConfigBuilder.WithStructuredMergeDiff(true)
643
	}
644

645
	// it is necessary to ignore the error at this point to avoid creating duplicated
646
	// application conditions as argo.StateDiffs will validate this diffConfig again.
647
	diffConfig, _ := diffConfigBuilder.Build()
648

649
	diffResults, err := argodiff.StateDiffs(reconciliation.Live, reconciliation.Target, diffConfig)
650
	if err != nil {
651
		diffResults = &diff.DiffResultList{}
652
		failedToLoadObjs = true
653
		msg := fmt.Sprintf("Failed to compare desired state to live state: %s", err.Error())
654
		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
655
	}
656
	ts.AddCheckpoint("diff_ms")
657

658
	syncCode := v1alpha1.SyncStatusCodeSynced
659
	managedResources := make([]managedResource, len(reconciliation.Target))
660
	resourceSummaries := make([]v1alpha1.ResourceStatus, len(reconciliation.Target))
661
	for i, targetObj := range reconciliation.Target {
662
		liveObj := reconciliation.Live[i]
663
		obj := liveObj
664
		if obj == nil {
665
			obj = targetObj
666
		}
667
		if obj == nil {
668
			continue
669
		}
670
		gvk := obj.GroupVersionKind()
671

672
		isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod)
673

674
		resState := v1alpha1.ResourceStatus{
675
			Namespace:       obj.GetNamespace(),
676
			Name:            obj.GetName(),
677
			Kind:            gvk.Kind,
678
			Version:         gvk.Version,
679
			Group:           gvk.Group,
680
			Hook:            isHook(obj),
681
			RequiresPruning: targetObj == nil && liveObj != nil && isSelfReferencedObj,
682
		}
683
		if targetObj != nil {
684
			resState.SyncWave = int64(syncwaves.Wave(targetObj))
685
		}
686

687
		var diffResult diff.DiffResult
688
		if i < len(diffResults.Diffs) {
689
			diffResult = diffResults.Diffs[i]
690
		} else {
691
			diffResult = diff.DiffResult{Modified: false, NormalizedLive: []byte("{}"), PredictedLive: []byte("{}")}
692
		}
693

694
		// For the case when a namespace is managed with `managedNamespaceMetadata` AND it has resource tracking
695
		// enabled (e.g. someone manually adds resource tracking labels or annotations), we need to do some
696
		// bookkeeping in order to ensure that it's not considered `OutOfSync` (since it does not exist in source
697
		// control).
698
		//
699
		// This is in addition to the bookkeeping we do (see `isManagedNamespace` and its references) to prevent said
700
		// namespace from being pruned.
701
		isManagedNs := isManagedNamespace(targetObj, app) && liveObj == nil
702

703
		if resState.Hook || ignore.Ignore(obj) || (targetObj != nil && hookutil.Skip(targetObj)) || !isSelfReferencedObj {
704
			// For resource hooks, skipped resources or objects that may have
705
			// been created by another controller with annotations copied from
706
			// the source object, don't store sync status, and do not affect
707
			// overall sync status
708
		} else if !isManagedNs && (diffResult.Modified || targetObj == nil || liveObj == nil) {
709
			// Set resource state to OutOfSync since one of the following is true:
710
			// * target and live resource are different
711
			// * target resource not defined and live resource is extra
712
			// * target resource present but live resource is missing
713
			resState.Status = v1alpha1.SyncStatusCodeOutOfSync
714
			// we ignore the status if the obj needs pruning AND we have the annotation
715
			needsPruning := targetObj == nil && liveObj != nil
716
			if !(needsPruning && resourceutil.HasAnnotationOption(obj, common.AnnotationCompareOptions, "IgnoreExtraneous")) {
717
				syncCode = v1alpha1.SyncStatusCodeOutOfSync
718
			}
719
		} else {
720
			resState.Status = v1alpha1.SyncStatusCodeSynced
721
		}
722
		// set unknown status to all resource that are not permitted in the app project
723
		isNamespaced, err := m.liveStateCache.IsNamespaced(app.Spec.Destination.Server, gvk.GroupKind())
724
		if !project.IsGroupKindPermitted(gvk.GroupKind(), isNamespaced && err == nil) {
725
			resState.Status = v1alpha1.SyncStatusCodeUnknown
726
		}
727

728
		if isNamespaced && obj.GetNamespace() == "" {
729
			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionInvalidSpecError, Message: fmt.Sprintf("Namespace for %s %s is missing.", obj.GetName(), gvk.String()), LastTransitionTime: &now})
730
		}
731

732
		// we can't say anything about the status if we were unable to get the target objects
733
		if failedToLoadObjs {
734
			resState.Status = v1alpha1.SyncStatusCodeUnknown
735
		}
736

737
		resourceVersion := ""
738
		if liveObj != nil {
739
			resourceVersion = liveObj.GetResourceVersion()
740
		}
741
		managedResources[i] = managedResource{
742
			Name:            resState.Name,
743
			Namespace:       resState.Namespace,
744
			Group:           resState.Group,
745
			Kind:            resState.Kind,
746
			Version:         resState.Version,
747
			Live:            liveObj,
748
			Target:          targetObj,
749
			Diff:            diffResult,
750
			Hook:            resState.Hook,
751
			ResourceVersion: resourceVersion,
752
		}
753
		resourceSummaries[i] = resState
754
	}
755

756
	if failedToLoadObjs {
757
		syncCode = v1alpha1.SyncStatusCodeUnknown
758
	} else if app.HasChangedManagedNamespaceMetadata() {
759
		syncCode = v1alpha1.SyncStatusCodeOutOfSync
760
	}
761
	var revision string
762

763
	if !hasMultipleSources && len(manifestRevisions) > 0 {
764
		revision = manifestRevisions[0]
765
	}
766
	var syncStatus v1alpha1.SyncStatus
767
	if hasMultipleSources {
768
		syncStatus = v1alpha1.SyncStatus{
769
			ComparedTo: v1alpha1.ComparedTo{
770
				Destination:       app.Spec.Destination,
771
				Sources:           sources,
772
				IgnoreDifferences: app.Spec.IgnoreDifferences,
773
			},
774
			Status:    syncCode,
775
			Revisions: manifestRevisions,
776
		}
777
	} else {
778
		syncStatus = v1alpha1.SyncStatus{
779
			ComparedTo: v1alpha1.ComparedTo{
780
				Destination:       app.Spec.Destination,
781
				Source:            app.Spec.GetSource(),
782
				IgnoreDifferences: app.Spec.IgnoreDifferences,
783
			},
784
			Status:   syncCode,
785
			Revision: revision,
786
		}
787
	}
788

789
	ts.AddCheckpoint("sync_ms")
790

791
	healthStatus, err := setApplicationHealth(managedResources, resourceSummaries, resourceOverrides, app, m.persistResourceHealth)
792
	if err != nil {
793
		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: fmt.Sprintf("error setting app health: %s", err.Error()), LastTransitionTime: &now})
794
	}
795

796
	// Git has already performed the signature verification via its GPG interface, and the result is available
797
	// in the manifest info received from the repository server. We now need to form our opinion about the result
798
	// and stop processing if we do not agree about the outcome.
799
	for _, manifestInfo := range manifestInfos {
800
		if gpg.IsGPGEnabled() && verifySignature && manifestInfo != nil {
801
			conditions = append(conditions, verifyGnuPGSignature(manifestInfo.Revision, project, manifestInfo)...)
802
		}
803
	}
804

805
	compRes := comparisonResult{
806
		syncStatus:           &syncStatus,
807
		healthStatus:         healthStatus,
808
		resources:            resourceSummaries,
809
		managedResources:     managedResources,
810
		reconciliationResult: reconciliation,
811
		diffConfig:           diffConfig,
812
		diffResultList:       diffResults,
813
		hasPostDeleteHooks:   hasPostDeleteHooks,
814
	}
815

816
	if hasMultipleSources {
817
		for _, manifestInfo := range manifestInfos {
818
			compRes.appSourceTypes = append(compRes.appSourceTypes, v1alpha1.ApplicationSourceType(manifestInfo.SourceType))
819
		}
820
	} else {
821
		for _, manifestInfo := range manifestInfos {
822
			compRes.appSourceType = v1alpha1.ApplicationSourceType(manifestInfo.SourceType)
823
			break
824
		}
825
	}
826

827
	app.Status.SetConditions(conditions, map[v1alpha1.ApplicationConditionType]bool{
828
		v1alpha1.ApplicationConditionComparisonError:         true,
829
		v1alpha1.ApplicationConditionSharedResourceWarning:   true,
830
		v1alpha1.ApplicationConditionRepeatedResourceWarning: true,
831
		v1alpha1.ApplicationConditionExcludedResourceWarning: true,
832
	})
833
	ts.AddCheckpoint("health_ms")
834
	compRes.timings = ts.Timings()
835
	return &compRes, nil
836
}
837

838
// useDiffCache will determine if the diff should be calculated based
839
// on the existing live state cache or not.
840
func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sources []v1alpha1.ApplicationSource, app *v1alpha1.Application, manifestRevisions []string, statusRefreshTimeout time.Duration, serverSideDiff bool, log *log.Entry) bool {
841

842
	if noCache {
843
		log.WithField("useDiffCache", "false").Debug("noCache is true")
844
		return false
845
	}
846
	refreshType, refreshRequested := app.IsRefreshRequested()
847
	if refreshRequested {
848
		log.WithField("useDiffCache", "false").Debugf("refresh type %s requested", string(refreshType))
849
		return false
850
	}
851
	// serverSideDiff should still use cache even if status is expired.
852
	// This is an attempt to avoid hitting k8s API server too frequently during
853
	// app refresh with serverSideDiff is enabled. If there are negative side
854
	// effects identified with this approach, the serverSideDiff should be removed
855
	// from this condition.
856
	if app.Status.Expired(statusRefreshTimeout) && !serverSideDiff {
857
		log.WithField("useDiffCache", "false").Debug("app.status.expired")
858
		return false
859
	}
860

861
	if len(manifestInfos) != len(sources) {
862
		log.WithField("useDiffCache", "false").Debug("manifestInfos len != sources len")
863
		return false
864
	}
865

866
	revisionChanged := !reflect.DeepEqual(app.Status.GetRevisions(), manifestRevisions)
867
	if revisionChanged {
868
		log.WithField("useDiffCache", "false").Debug("revisionChanged")
869
		return false
870
	}
871

872
	currentSpec := app.BuildComparedToStatus()
873
	specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, currentSpec)
874
	if specChanged {
875
		log.WithField("useDiffCache", "false").Debug("specChanged")
876
		return false
877
	}
878

879
	log.WithField("useDiffCache", "true").Debug("using diff cache")
880
	return true
881
}
882

883
func (m *appStateManager) persistRevisionHistory(
884
	app *v1alpha1.Application,
885
	revision string,
886
	source v1alpha1.ApplicationSource,
887
	revisions []string,
888
	sources []v1alpha1.ApplicationSource,
889
	hasMultipleSources bool,
890
	startedAt metav1.Time,
891
	initiatedBy v1alpha1.OperationInitiator,
892
) error {
893
	var nextID int64
894
	if len(app.Status.History) > 0 {
895
		nextID = app.Status.History.LastRevisionHistory().ID + 1
896
	}
897

898
	if hasMultipleSources {
899
		app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{
900
			DeployedAt:      metav1.NewTime(time.Now().UTC()),
901
			DeployStartedAt: &startedAt,
902
			ID:              nextID,
903
			Sources:         sources,
904
			Revisions:       revisions,
905
			InitiatedBy:     initiatedBy,
906
		})
907
	} else {
908
		app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{
909
			Revision:        revision,
910
			DeployedAt:      metav1.NewTime(time.Now().UTC()),
911
			DeployStartedAt: &startedAt,
912
			ID:              nextID,
913
			Source:          source,
914
			InitiatedBy:     initiatedBy,
915
		})
916
	}
917

918
	app.Status.History = app.Status.History.Trunc(app.Spec.GetRevisionHistoryLimit())
919

920
	patch, err := json.Marshal(map[string]map[string][]v1alpha1.RevisionHistory{
921
		"status": {
922
			"history": app.Status.History,
923
		},
924
	})
925
	if err != nil {
926
		return fmt.Errorf("error marshaling revision history patch: %w", err)
927
	}
928
	_, err = m.appclientset.ArgoprojV1alpha1().Applications(app.Namespace).Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{})
929
	return err
930
}
931

932
// NewAppStateManager creates new instance of AppStateManager
933
func NewAppStateManager(
934
	db db.ArgoDB,
935
	appclientset appclientset.Interface,
936
	repoClientset apiclient.Clientset,
937
	namespace string,
938
	kubectl kubeutil.Kubectl,
939
	settingsMgr *settings.SettingsManager,
940
	liveStateCache statecache.LiveStateCache,
941
	projInformer cache.SharedIndexInformer,
942
	metricsServer *metrics.MetricsServer,
943
	cache *appstatecache.Cache,
944
	statusRefreshTimeout time.Duration,
945
	resourceTracking argo.ResourceTracking,
946
	persistResourceHealth bool,
947
	repoErrorGracePeriod time.Duration,
948
	serverSideDiff bool,
949
) AppStateManager {
950
	return &appStateManager{
951
		liveStateCache:        liveStateCache,
952
		cache:                 cache,
953
		db:                    db,
954
		appclientset:          appclientset,
955
		kubectl:               kubectl,
956
		repoClientset:         repoClientset,
957
		namespace:             namespace,
958
		settingsMgr:           settingsMgr,
959
		projInformer:          projInformer,
960
		metricsServer:         metricsServer,
961
		statusRefreshTimeout:  statusRefreshTimeout,
962
		resourceTracking:      resourceTracking,
963
		persistResourceHealth: persistResourceHealth,
964
		repoErrorGracePeriod:  repoErrorGracePeriod,
965
		serverSideDiff:        serverSideDiff,
966
	}
967
}
968

969
// isSelfReferencedObj returns whether the given obj is managed by the application
970
// according to the values of the tracking id (aka app instance value) annotation.
971
// It returns true when all of the properties of the tracking id (app name, namespace,
972
// group and kind) match the properties of the live object, or if the tracking method
973
// used does not provide the required properties for matching.
974
// Reference: https://github.com/argoproj/argo-cd/issues/8683
975
func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod) bool {
976
	if live == nil {
977
		return true
978
	}
979

980
	// If tracking method doesn't contain required metadata for this check,
981
	// we are not able to determine and just assume the object to be managed.
982
	if trackingMethod == argo.TrackingMethodLabel {
983
		return true
984
	}
985

986
	// config != nil is the best-case scenario for constructing an accurate
987
	// Tracking ID. `config` is the "desired state" (from git/helm/etc.).
988
	// Using the desired state is important when there is an ApiGroup upgrade.
989
	// When upgrading, the comparison must be made with the new tracking ID.
990
	// Example:
991
	//     live resource annotation will be:
992
	//        ingress-app:extensions/Ingress:default/some-ingress
993
	//     when it should be:
994
	//        ingress-app:networking.k8s.io/Ingress:default/some-ingress
995
	// More details in: https://github.com/argoproj/argo-cd/pull/11012
996
	var aiv argo.AppInstanceValue
997
	if config != nil {
998
		aiv = argo.UnstructuredToAppInstanceValue(config, appName, "")
999
		return isSelfReferencedObj(live, aiv)
1000
	}
1001

1002
	// If config is nil then compare the live resource with the value
1003
	// of the annotation. In this case, in order to validate if obj is
1004
	// managed by this application, the values from the annotation have
1005
	// to match the properties from the live object. Cluster scoped objects
1006
	// carry the app's destination namespace in the tracking annotation,
1007
	// but are unique in GVK + name combination.
1008
	appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod)
1009
	if appInstance != nil {
1010
		return isSelfReferencedObj(live, *appInstance)
1011
	}
1012
	return true
1013
}
1014

1015
// isSelfReferencedObj returns true if the given Tracking ID (`aiv`) matches
1016
// the given object. It returns false when the ID doesn't match. This sometimes
1017
// happens when a tracking label or annotation gets accidentally copied to a
1018
// different resource.
1019
func isSelfReferencedObj(obj *unstructured.Unstructured, aiv argo.AppInstanceValue) bool {
1020
	return (obj.GetNamespace() == aiv.Namespace || obj.GetNamespace() == "") &&
1021
		obj.GetName() == aiv.Name &&
1022
		obj.GetObjectKind().GroupVersionKind().Group == aiv.Group &&
1023
		obj.GetObjectKind().GroupVersionKind().Kind == aiv.Kind
1024
}
1025

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.