argo-cd
1024 строки · 41.8 Кб
1package controller2
3import (4"context"5"encoding/json"6"errors"7"fmt"8"reflect"9"strings"10goSync "sync"11"time"12
13v1 "k8s.io/api/core/v1"14
15"github.com/argoproj/gitops-engine/pkg/diff"16"github.com/argoproj/gitops-engine/pkg/health"17"github.com/argoproj/gitops-engine/pkg/sync"18hookutil "github.com/argoproj/gitops-engine/pkg/sync/hook"19"github.com/argoproj/gitops-engine/pkg/sync/ignore"20resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource"21"github.com/argoproj/gitops-engine/pkg/sync/syncwaves"22kubeutil "github.com/argoproj/gitops-engine/pkg/utils/kube"23log "github.com/sirupsen/logrus"24metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"25"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"26"k8s.io/apimachinery/pkg/runtime/schema"27"k8s.io/apimachinery/pkg/types"28"k8s.io/client-go/tools/cache"29
30"github.com/argoproj/argo-cd/v2/common"31statecache "github.com/argoproj/argo-cd/v2/controller/cache"32"github.com/argoproj/argo-cd/v2/controller/metrics"33"github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"34appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"35"github.com/argoproj/argo-cd/v2/reposerver/apiclient"36"github.com/argoproj/argo-cd/v2/util/argo"37argodiff "github.com/argoproj/argo-cd/v2/util/argo/diff"38appstatecache "github.com/argoproj/argo-cd/v2/util/cache/appstate"39"github.com/argoproj/argo-cd/v2/util/db"40"github.com/argoproj/argo-cd/v2/util/gpg"41"github.com/argoproj/argo-cd/v2/util/io"42"github.com/argoproj/argo-cd/v2/util/settings"43"github.com/argoproj/argo-cd/v2/util/stats"44)
45
46var (47CompareStateRepoError = errors.New("failed to get repo objects")48)
49
50type resourceInfoProviderStub struct {51}
52
53func (r *resourceInfoProviderStub) IsNamespaced(_ schema.GroupKind) (bool, error) {54return false, nil55}
56
57type managedResource struct {58Target *unstructured.Unstructured59Live *unstructured.Unstructured60Diff diff.DiffResult61Group string62Version string63Kind string64Namespace string65Name string66Hook bool67ResourceVersion string68}
69
70// AppStateManager defines methods which allow to compare application spec and actual application state.
71type AppStateManager interface {72CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) (*comparisonResult, error)73SyncAppState(app *v1alpha1.Application, state *v1alpha1.OperationState)74GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error)75}
76
77// comparisonResult holds the state of an application after the reconciliation
78type comparisonResult struct {79syncStatus *v1alpha1.SyncStatus80healthStatus *v1alpha1.HealthStatus81resources []v1alpha1.ResourceStatus82managedResources []managedResource83reconciliationResult sync.ReconciliationResult84diffConfig argodiff.DiffConfig85appSourceType v1alpha1.ApplicationSourceType86// appSourceTypes stores the SourceType for each application source under sources field87appSourceTypes []v1alpha1.ApplicationSourceType88// timings maps phases of comparison to the duration it took to complete (for statistical purposes)89timings map[string]time.Duration90diffResultList *diff.DiffResultList91hasPostDeleteHooks bool92}
93
94func (res *comparisonResult) GetSyncStatus() *v1alpha1.SyncStatus {95return res.syncStatus96}
97
98func (res *comparisonResult) GetHealthStatus() *v1alpha1.HealthStatus {99return res.healthStatus100}
101
102// appStateManager allows to compare applications to git
103type appStateManager struct {104metricsServer *metrics.MetricsServer105db db.ArgoDB106settingsMgr *settings.SettingsManager107appclientset appclientset.Interface108projInformer cache.SharedIndexInformer109kubectl kubeutil.Kubectl110repoClientset apiclient.Clientset111liveStateCache statecache.LiveStateCache112cache *appstatecache.Cache113namespace string114statusRefreshTimeout time.Duration115resourceTracking argo.ResourceTracking116persistResourceHealth bool117repoErrorCache goSync.Map118repoErrorGracePeriod time.Duration119serverSideDiff bool120}
121
122// GetRepoObjs will generate the manifests for the given application delegating the
123// task to the repo-server. It returns the list of generated manifests as unstructured
124// objects. It also returns the full response from all calls to the repo server as the
125// second argument.
126func (m *appStateManager) GetRepoObjs(app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, error) {127ts := stats.NewTimingStats()128helmRepos, err := m.db.ListHelmRepositories(context.Background())129if err != nil {130return nil, nil, fmt.Errorf("failed to list Helm repositories: %w", err)131}132permittedHelmRepos, err := argo.GetPermittedRepos(proj, helmRepos)133if err != nil {134return nil, nil, fmt.Errorf("failed to get permitted Helm repositories for project %q: %w", proj.Name, err)135}136
137ts.AddCheckpoint("repo_ms")138helmRepositoryCredentials, err := m.db.GetAllHelmRepositoryCredentials(context.Background())139if err != nil {140return nil, nil, fmt.Errorf("failed to get Helm credentials: %w", err)141}142permittedHelmCredentials, err := argo.GetPermittedReposCredentials(proj, helmRepositoryCredentials)143if err != nil {144return nil, nil, fmt.Errorf("failed to get permitted Helm credentials for project %q: %w", proj.Name, err)145}146
147enabledSourceTypes, err := m.settingsMgr.GetEnabledSourceTypes()148if err != nil {149return nil, nil, fmt.Errorf("failed to get enabled source types: %w", err)150}151ts.AddCheckpoint("plugins_ms")152
153kustomizeSettings, err := m.settingsMgr.GetKustomizeSettings()154if err != nil {155return nil, nil, fmt.Errorf("failed to get Kustomize settings: %w", err)156}157
158helmOptions, err := m.settingsMgr.GetHelmSettings()159if err != nil {160return nil, nil, fmt.Errorf("failed to get Helm settings: %w", err)161}162
163ts.AddCheckpoint("build_options_ms")164serverVersion, apiResources, err := m.liveStateCache.GetVersionsInfo(app.Spec.Destination.Server)165if err != nil {166return nil, nil, fmt.Errorf("failed to get cluster version for cluster %q: %w", app.Spec.Destination.Server, err)167}168conn, repoClient, err := m.repoClientset.NewRepoServerClient()169if err != nil {170return nil, nil, fmt.Errorf("failed to connect to repo server: %w", err)171}172defer io.Close(conn)173
174manifestInfos := make([]*apiclient.ManifestResponse, 0)175targetObjs := make([]*unstructured.Unstructured, 0)176
177// Store the map of all sources having ref field into a map for applications with sources field178refSources, err := argo.GetRefSources(context.Background(), app.Spec, m.db)179if err != nil {180return nil, nil, fmt.Errorf("failed to get ref sources: %v", err)181}182
183for i, source := range sources {184if len(revisions) < len(sources) || revisions[i] == "" {185revisions[i] = source.TargetRevision186}187ts.AddCheckpoint("helm_ms")188repo, err := m.db.GetRepository(context.Background(), source.RepoURL)189if err != nil {190return nil, nil, fmt.Errorf("failed to get repo %q: %w", source.RepoURL, err)191}192kustomizeOptions, err := kustomizeSettings.GetOptions(source)193if err != nil {194return nil, nil, fmt.Errorf("failed to get Kustomize options for source %d of %d: %w", i+1, len(sources), err)195}196
197ts.AddCheckpoint("version_ms")198log.Debugf("Generating Manifest for source %s revision %s", source, revisions[i])199manifestInfo, err := repoClient.GenerateManifest(context.Background(), &apiclient.ManifestRequest{200Repo: repo,201Repos: permittedHelmRepos,202Revision: revisions[i],203NoCache: noCache,204NoRevisionCache: noRevisionCache,205AppLabelKey: appLabelKey,206AppName: app.InstanceName(m.namespace),207Namespace: app.Spec.Destination.Namespace,208ApplicationSource: &source,209KustomizeOptions: kustomizeOptions,210KubeVersion: serverVersion,211ApiVersions: argo.APIResourcesToStrings(apiResources, true),212VerifySignature: verifySignature,213HelmRepoCreds: permittedHelmCredentials,214TrackingMethod: string(argo.GetTrackingMethod(m.settingsMgr)),215EnabledSourceTypes: enabledSourceTypes,216HelmOptions: helmOptions,217HasMultipleSources: app.Spec.HasMultipleSources(),218RefSources: refSources,219ProjectName: proj.Name,220ProjectSourceRepos: proj.Spec.SourceRepos,221})222if err != nil {223return nil, nil, fmt.Errorf("failed to generate manifest for source %d of %d: %w", i+1, len(sources), err)224}225
226targetObj, err := unmarshalManifests(manifestInfo.Manifests)227
228if err != nil {229return nil, nil, fmt.Errorf("failed to unmarshal manifests for source %d of %d: %w", i+1, len(sources), err)230}231targetObjs = append(targetObjs, targetObj...)232
233manifestInfos = append(manifestInfos, manifestInfo)234}235
236ts.AddCheckpoint("unmarshal_ms")237logCtx := log.WithField("application", app.QualifiedName())238for k, v := range ts.Timings() {239logCtx = logCtx.WithField(k, v.Milliseconds())240}241logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())242logCtx.Info("GetRepoObjs stats")243return targetObjs, manifestInfos, nil244}
245
246func unmarshalManifests(manifests []string) ([]*unstructured.Unstructured, error) {247targetObjs := make([]*unstructured.Unstructured, 0)248for _, manifest := range manifests {249obj, err := v1alpha1.UnmarshalToUnstructured(manifest)250if err != nil {251return nil, err252}253targetObjs = append(targetObjs, obj)254}255return targetObjs, nil256}
257
258func DeduplicateTargetObjects(259namespace string,260objs []*unstructured.Unstructured,261infoProvider kubeutil.ResourceInfoProvider,262) ([]*unstructured.Unstructured, []v1alpha1.ApplicationCondition, error) {263
264targetByKey := make(map[kubeutil.ResourceKey][]*unstructured.Unstructured)265for i := range objs {266obj := objs[i]267if obj == nil {268continue269}270isNamespaced := kubeutil.IsNamespacedOrUnknown(infoProvider, obj.GroupVersionKind().GroupKind())271if !isNamespaced {272obj.SetNamespace("")273} else if obj.GetNamespace() == "" {274obj.SetNamespace(namespace)275}276key := kubeutil.GetResourceKey(obj)277if key.Name == "" && obj.GetGenerateName() != "" {278key.Name = fmt.Sprintf("%s%d", obj.GetGenerateName(), i)279}280targetByKey[key] = append(targetByKey[key], obj)281}282conditions := make([]v1alpha1.ApplicationCondition, 0)283result := make([]*unstructured.Unstructured, 0)284for key, targets := range targetByKey {285if len(targets) > 1 {286now := metav1.Now()287conditions = append(conditions, v1alpha1.ApplicationCondition{288Type: v1alpha1.ApplicationConditionRepeatedResourceWarning,289Message: fmt.Sprintf("Resource %s appeared %d times among application resources.", key.String(), len(targets)),290LastTransitionTime: &now,291})292}293result = append(result, targets[len(targets)-1])294}295
296return result, conditions, nil297}
298
299// getComparisonSettings will return the system level settings related to the
300// diff/normalization process.
301func (m *appStateManager) getComparisonSettings() (string, map[string]v1alpha1.ResourceOverride, *settings.ResourcesFilter, error) {302resourceOverrides, err := m.settingsMgr.GetResourceOverrides()303if err != nil {304return "", nil, nil, err305}306appLabelKey, err := m.settingsMgr.GetAppInstanceLabelKey()307if err != nil {308return "", nil, nil, err309}310resFilter, err := m.settingsMgr.GetResourcesFilter()311if err != nil {312return "", nil, nil, err313}314return appLabelKey, resourceOverrides, resFilter, nil315}
316
317// verifyGnuPGSignature verifies the result of a GnuPG operation for a given git
318// revision.
319func verifyGnuPGSignature(revision string, project *v1alpha1.AppProject, manifestInfo *apiclient.ManifestResponse) []v1alpha1.ApplicationCondition {320now := metav1.Now()321conditions := make([]v1alpha1.ApplicationCondition, 0)322// We need to have some data in the verification result to parse, otherwise there was no signature323if manifestInfo.VerifyResult != "" {324verifyResult := gpg.ParseGitCommitVerification(manifestInfo.VerifyResult)325switch verifyResult.Result {326case gpg.VerifyResultGood:327// This is the only case we allow to sync to, but we need to make sure signing key is allowed328validKey := false329for _, k := range project.Spec.SignatureKeys {330if gpg.KeyID(k.KeyID) == gpg.KeyID(verifyResult.KeyID) && gpg.KeyID(k.KeyID) != "" {331validKey = true332break333}334}335if !validKey {336msg := fmt.Sprintf("Found good signature made with %s key %s, but this key is not allowed in AppProject",337verifyResult.Cipher, verifyResult.KeyID)338conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})339}340case gpg.VerifyResultInvalid:341msg := fmt.Sprintf("Found signature made with %s key %s, but verification result was invalid: '%s'",342verifyResult.Cipher, verifyResult.KeyID, verifyResult.Message)343conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})344default:345msg := fmt.Sprintf("Could not verify commit signature on revision '%s', check logs for more information.", revision)346conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})347}348} else {349msg := fmt.Sprintf("Target revision %s in Git is not signed, but a signature is required", revision)350conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})351}352
353return conditions354}
355
356func isManagedNamespace(ns *unstructured.Unstructured, app *v1alpha1.Application) bool {357return ns != nil && ns.GetKind() == kubeutil.NamespaceKind && ns.GetName() == app.Spec.Destination.Namespace && app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.ManagedNamespaceMetadata != nil358}
359
360// CompareAppState compares application git state to the live app state, using the specified
361// revision and supplied source. If revision or overrides are empty, then compares against
362// revision and overrides in the app spec.
363func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) (*comparisonResult, error) {364ts := stats.NewTimingStats()365appLabelKey, resourceOverrides, resFilter, err := m.getComparisonSettings()366
367ts.AddCheckpoint("settings_ms")368
369// return unknown comparison result if basic comparison settings cannot be loaded370if err != nil {371if hasMultipleSources {372return &comparisonResult{373syncStatus: &v1alpha1.SyncStatus{374ComparedTo: v1alpha1.ComparedTo{Destination: app.Spec.Destination, Sources: sources, IgnoreDifferences: app.Spec.IgnoreDifferences},375Status: v1alpha1.SyncStatusCodeUnknown,376Revisions: revisions,377},378healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown},379}, nil380} else {381return &comparisonResult{382syncStatus: &v1alpha1.SyncStatus{383ComparedTo: v1alpha1.ComparedTo{Source: sources[0], Destination: app.Spec.Destination, IgnoreDifferences: app.Spec.IgnoreDifferences},384Status: v1alpha1.SyncStatusCodeUnknown,385Revision: revisions[0],386},387healthStatus: &v1alpha1.HealthStatus{Status: health.HealthStatusUnknown},388}, nil389}390}391
392// When signature keys are defined in the project spec, we need to verify the signature on the Git revision393verifySignature := false394if project.Spec.SignatureKeys != nil && len(project.Spec.SignatureKeys) > 0 && gpg.IsGPGEnabled() {395verifySignature = true396}397
398// do best effort loading live and target state to present as much information about app state as possible399failedToLoadObjs := false400conditions := make([]v1alpha1.ApplicationCondition, 0)401
402logCtx := log.WithField("application", app.QualifiedName())403logCtx.Infof("Comparing app state (cluster: %s, namespace: %s)", app.Spec.Destination.Server, app.Spec.Destination.Namespace)404
405var targetObjs []*unstructured.Unstructured406now := metav1.Now()407
408var manifestInfos []*apiclient.ManifestResponse409targetNsExists := false410
411if len(localManifests) == 0 {412// If the length of revisions is not same as the length of sources,413// we take the revisions from the sources directly for all the sources.414if len(revisions) != len(sources) {415revisions = make([]string, 0)416for _, source := range sources {417revisions = append(revisions, source.TargetRevision)418}419}420
421targetObjs, manifestInfos, err = m.GetRepoObjs(app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project)422if err != nil {423targetObjs = make([]*unstructured.Unstructured, 0)424msg := fmt.Sprintf("Failed to load target state: %s", err.Error())425conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})426if firstSeen, ok := m.repoErrorCache.Load(app.Name); ok {427if time.Since(firstSeen.(time.Time)) <= m.repoErrorGracePeriod && !noRevisionCache {428// if first seen is less than grace period and it's not a Level 3 comparison,429// ignore error and short circuit430logCtx.Debugf("Ignoring repo error %v, already encountered error in grace period", err.Error())431return nil, CompareStateRepoError432}433} else if !noRevisionCache {434logCtx.Debugf("Ignoring repo error %v, new occurrence", err.Error())435m.repoErrorCache.Store(app.Name, time.Now())436return nil, CompareStateRepoError437}438failedToLoadObjs = true439} else {440m.repoErrorCache.Delete(app.Name)441}442} else {443// Prevent applying local manifests for now when signature verification is enabled444// This is also enforced on API level, but as a last resort, we also enforce it here445if gpg.IsGPGEnabled() && verifySignature {446msg := "Cannot use local manifests when signature verification is required"447targetObjs = make([]*unstructured.Unstructured, 0)448conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})449failedToLoadObjs = true450} else {451targetObjs, err = unmarshalManifests(localManifests)452if err != nil {453targetObjs = make([]*unstructured.Unstructured, 0)454msg := fmt.Sprintf("Failed to load local manifests: %s", err.Error())455conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})456failedToLoadObjs = true457}458}459// empty out manifestInfoMap460manifestInfos = make([]*apiclient.ManifestResponse, 0)461}462ts.AddCheckpoint("git_ms")463
464var infoProvider kubeutil.ResourceInfoProvider465infoProvider, err = m.liveStateCache.GetClusterCache(app.Spec.Destination.Server)466if err != nil {467infoProvider = &resourceInfoProviderStub{}468}469targetObjs, dedupConditions, err := DeduplicateTargetObjects(app.Spec.Destination.Namespace, targetObjs, infoProvider)470if err != nil {471msg := fmt.Sprintf("Failed to deduplicate target state: %s", err.Error())472conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})473}474conditions = append(conditions, dedupConditions...)475for i := len(targetObjs) - 1; i >= 0; i-- {476targetObj := targetObjs[i]477gvk := targetObj.GroupVersionKind()478if resFilter.IsExcludedResource(gvk.Group, gvk.Kind, app.Spec.Destination.Server) {479targetObjs = append(targetObjs[:i], targetObjs[i+1:]...)480conditions = append(conditions, v1alpha1.ApplicationCondition{481Type: v1alpha1.ApplicationConditionExcludedResourceWarning,482Message: fmt.Sprintf("Resource %s/%s %s is excluded in the settings", gvk.Group, gvk.Kind, targetObj.GetName()),483LastTransitionTime: &now,484})485}486
487// If we reach this path, this means that a namespace has been both defined in Git, as well in the488// application's managedNamespaceMetadata. We want to ensure that this manifest is the one being used instead489// of what is present in managedNamespaceMetadata.490if isManagedNamespace(targetObj, app) {491targetNsExists = true492}493}494ts.AddCheckpoint("dedup_ms")495
496liveObjByKey, err := m.liveStateCache.GetManagedLiveObjs(app, targetObjs)497if err != nil {498liveObjByKey = make(map[kubeutil.ResourceKey]*unstructured.Unstructured)499msg := fmt.Sprintf("Failed to load live state: %s", err.Error())500conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})501failedToLoadObjs = true502}503
504logCtx.Debugf("Retrieved live manifests")505
506// filter out all resources which are not permitted in the application project507for k, v := range liveObjByKey {508permitted, err := project.IsLiveResourcePermitted(v, app.Spec.Destination.Server, app.Spec.Destination.Name, func(project string) ([]*v1alpha1.Cluster, error) {509clusters, err := m.db.GetProjectClusters(context.TODO(), project)510if err != nil {511return nil, fmt.Errorf("failed to get clusters for project %q: %v", project, err)512}513return clusters, nil514})515
516if err != nil {517msg := fmt.Sprintf("Failed to check if live resource %q is permitted in project %q: %s", k.String(), app.Spec.Project, err.Error())518conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})519failedToLoadObjs = true520continue521}522
523if !permitted {524delete(liveObjByKey, k)525}526}527
528trackingMethod := argo.GetTrackingMethod(m.settingsMgr)529
530for _, liveObj := range liveObjByKey {531if liveObj != nil {532appInstanceName := m.resourceTracking.GetAppName(liveObj, appLabelKey, trackingMethod)533if appInstanceName != "" && appInstanceName != app.InstanceName(m.namespace) {534fqInstanceName := strings.ReplaceAll(appInstanceName, "_", "/")535conditions = append(conditions, v1alpha1.ApplicationCondition{536Type: v1alpha1.ApplicationConditionSharedResourceWarning,537Message: fmt.Sprintf("%s/%s is part of applications %s and %s", liveObj.GetKind(), liveObj.GetName(), app.QualifiedName(), fqInstanceName),538LastTransitionTime: &now,539})540}541
542// For the case when a namespace is managed with `managedNamespaceMetadata` AND it has resource tracking543// enabled (e.g. someone manually adds resource tracking labels or annotations), we need to do some544// bookkeeping in order to prevent the managed namespace from being pruned.545//546// Live namespaces which are managed namespaces (i.e. application namespaces which are managed with547// CreateNamespace=true and has non-nil managedNamespaceMetadata) will (usually) not have a corresponding548// entry in source control. In order for the namespace not to risk being pruned, we'll need to generate a549// namespace which we can compare the live namespace with. For that, we'll do the same as is done in550// gitops-engine, the difference here being that we create a managed namespace which is only used for comparison.551//552// targetNsExists == true implies that it already exists as a target, so no need to add the namespace to the553// targetObjs array.554if isManagedNamespace(liveObj, app) && !targetNsExists {555nsSpec := &v1.Namespace{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kubeutil.NamespaceKind}, ObjectMeta: metav1.ObjectMeta{Name: liveObj.GetName()}}556managedNs, err := kubeutil.ToUnstructured(nsSpec)557
558if err != nil {559conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})560failedToLoadObjs = true561continue562}563
564// No need to care about the return value here, we just want the modified managedNs565_, err = syncNamespace(m.resourceTracking, appLabelKey, trackingMethod, app.Name, app.Spec.SyncPolicy)(managedNs, liveObj)566if err != nil {567conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})568failedToLoadObjs = true569} else {570targetObjs = append(targetObjs, managedNs)571}572}573}574}575hasPostDeleteHooks := false576for _, obj := range targetObjs {577if isPostDeleteHook(obj) {578hasPostDeleteHooks = true579}580}581
582reconciliation := sync.Reconcile(targetObjs, liveObjByKey, app.Spec.Destination.Namespace, infoProvider)583ts.AddCheckpoint("live_ms")584
585compareOptions, err := m.settingsMgr.GetResourceCompareOptions()586if err != nil {587log.Warnf("Could not get compare options from ConfigMap (assuming defaults): %v", err)588compareOptions = settings.GetDefaultDiffOptions()589}590manifestRevisions := make([]string, 0)591
592for _, manifestInfo := range manifestInfos {593manifestRevisions = append(manifestRevisions, manifestInfo.Revision)594}595
596serverSideDiff := m.serverSideDiff ||597resourceutil.HasAnnotationOption(app, common.AnnotationCompareOptions, "ServerSideDiff=true")598
599// This allows turning SSD off for a given app if it is enabled at the600// controller level601if resourceutil.HasAnnotationOption(app, common.AnnotationCompareOptions, "ServerSideDiff=false") {602serverSideDiff = false603}604
605useDiffCache := useDiffCache(noCache, manifestInfos, sources, app, manifestRevisions, m.statusRefreshTimeout, serverSideDiff, logCtx)606
607diffConfigBuilder := argodiff.NewDiffConfigBuilder().608WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles).609WithTracking(appLabelKey, string(trackingMethod))610
611if useDiffCache {612diffConfigBuilder.WithCache(m.cache, app.InstanceName(m.namespace))613} else {614diffConfigBuilder.WithNoCache()615}616
617if resourceutil.HasAnnotationOption(app, common.AnnotationCompareOptions, "IncludeMutationWebhook=true") {618diffConfigBuilder.WithIgnoreMutationWebhook(false)619}620
621gvkParser, err := m.getGVKParser(app.Spec.Destination.Server)622if err != nil {623conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionUnknownError, Message: err.Error(), LastTransitionTime: &now})624}625diffConfigBuilder.WithGVKParser(gvkParser)626diffConfigBuilder.WithManager(common.ArgoCDSSAManager)627
628diffConfigBuilder.WithServerSideDiff(serverSideDiff)629
630if serverSideDiff {631resourceOps, cleanup, err := m.getResourceOperations(app.Spec.Destination.Server)632if err != nil {633log.Errorf("CompareAppState error getting resource operations: %s", err)634conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionUnknownError, Message: err.Error(), LastTransitionTime: &now})635}636defer cleanup()637diffConfigBuilder.WithServerSideDryRunner(diff.NewK8sServerSideDryRunner(resourceOps))638}639
640// enable structured merge diff if application syncs with server-side apply641if app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.SyncOptions.HasOption("ServerSideApply=true") {642diffConfigBuilder.WithStructuredMergeDiff(true)643}644
645// it is necessary to ignore the error at this point to avoid creating duplicated646// application conditions as argo.StateDiffs will validate this diffConfig again.647diffConfig, _ := diffConfigBuilder.Build()648
649diffResults, err := argodiff.StateDiffs(reconciliation.Live, reconciliation.Target, diffConfig)650if err != nil {651diffResults = &diff.DiffResultList{}652failedToLoadObjs = true653msg := fmt.Sprintf("Failed to compare desired state to live state: %s", err.Error())654conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})655}656ts.AddCheckpoint("diff_ms")657
658syncCode := v1alpha1.SyncStatusCodeSynced659managedResources := make([]managedResource, len(reconciliation.Target))660resourceSummaries := make([]v1alpha1.ResourceStatus, len(reconciliation.Target))661for i, targetObj := range reconciliation.Target {662liveObj := reconciliation.Live[i]663obj := liveObj664if obj == nil {665obj = targetObj666}667if obj == nil {668continue669}670gvk := obj.GroupVersionKind()671
672isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), appLabelKey, trackingMethod)673
674resState := v1alpha1.ResourceStatus{675Namespace: obj.GetNamespace(),676Name: obj.GetName(),677Kind: gvk.Kind,678Version: gvk.Version,679Group: gvk.Group,680Hook: isHook(obj),681RequiresPruning: targetObj == nil && liveObj != nil && isSelfReferencedObj,682}683if targetObj != nil {684resState.SyncWave = int64(syncwaves.Wave(targetObj))685}686
687var diffResult diff.DiffResult688if i < len(diffResults.Diffs) {689diffResult = diffResults.Diffs[i]690} else {691diffResult = diff.DiffResult{Modified: false, NormalizedLive: []byte("{}"), PredictedLive: []byte("{}")}692}693
694// For the case when a namespace is managed with `managedNamespaceMetadata` AND it has resource tracking695// enabled (e.g. someone manually adds resource tracking labels or annotations), we need to do some696// bookkeeping in order to ensure that it's not considered `OutOfSync` (since it does not exist in source697// control).698//699// This is in addition to the bookkeeping we do (see `isManagedNamespace` and its references) to prevent said700// namespace from being pruned.701isManagedNs := isManagedNamespace(targetObj, app) && liveObj == nil702
703if resState.Hook || ignore.Ignore(obj) || (targetObj != nil && hookutil.Skip(targetObj)) || !isSelfReferencedObj {704// For resource hooks, skipped resources or objects that may have705// been created by another controller with annotations copied from706// the source object, don't store sync status, and do not affect707// overall sync status708} else if !isManagedNs && (diffResult.Modified || targetObj == nil || liveObj == nil) {709// Set resource state to OutOfSync since one of the following is true:710// * target and live resource are different711// * target resource not defined and live resource is extra712// * target resource present but live resource is missing713resState.Status = v1alpha1.SyncStatusCodeOutOfSync714// we ignore the status if the obj needs pruning AND we have the annotation715needsPruning := targetObj == nil && liveObj != nil716if !(needsPruning && resourceutil.HasAnnotationOption(obj, common.AnnotationCompareOptions, "IgnoreExtraneous")) {717syncCode = v1alpha1.SyncStatusCodeOutOfSync718}719} else {720resState.Status = v1alpha1.SyncStatusCodeSynced721}722// set unknown status to all resource that are not permitted in the app project723isNamespaced, err := m.liveStateCache.IsNamespaced(app.Spec.Destination.Server, gvk.GroupKind())724if !project.IsGroupKindPermitted(gvk.GroupKind(), isNamespaced && err == nil) {725resState.Status = v1alpha1.SyncStatusCodeUnknown726}727
728if isNamespaced && obj.GetNamespace() == "" {729conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionInvalidSpecError, Message: fmt.Sprintf("Namespace for %s %s is missing.", obj.GetName(), gvk.String()), LastTransitionTime: &now})730}731
732// we can't say anything about the status if we were unable to get the target objects733if failedToLoadObjs {734resState.Status = v1alpha1.SyncStatusCodeUnknown735}736
737resourceVersion := ""738if liveObj != nil {739resourceVersion = liveObj.GetResourceVersion()740}741managedResources[i] = managedResource{742Name: resState.Name,743Namespace: resState.Namespace,744Group: resState.Group,745Kind: resState.Kind,746Version: resState.Version,747Live: liveObj,748Target: targetObj,749Diff: diffResult,750Hook: resState.Hook,751ResourceVersion: resourceVersion,752}753resourceSummaries[i] = resState754}755
756if failedToLoadObjs {757syncCode = v1alpha1.SyncStatusCodeUnknown758} else if app.HasChangedManagedNamespaceMetadata() {759syncCode = v1alpha1.SyncStatusCodeOutOfSync760}761var revision string762
763if !hasMultipleSources && len(manifestRevisions) > 0 {764revision = manifestRevisions[0]765}766var syncStatus v1alpha1.SyncStatus767if hasMultipleSources {768syncStatus = v1alpha1.SyncStatus{769ComparedTo: v1alpha1.ComparedTo{770Destination: app.Spec.Destination,771Sources: sources,772IgnoreDifferences: app.Spec.IgnoreDifferences,773},774Status: syncCode,775Revisions: manifestRevisions,776}777} else {778syncStatus = v1alpha1.SyncStatus{779ComparedTo: v1alpha1.ComparedTo{780Destination: app.Spec.Destination,781Source: app.Spec.GetSource(),782IgnoreDifferences: app.Spec.IgnoreDifferences,783},784Status: syncCode,785Revision: revision,786}787}788
789ts.AddCheckpoint("sync_ms")790
791healthStatus, err := setApplicationHealth(managedResources, resourceSummaries, resourceOverrides, app, m.persistResourceHealth)792if err != nil {793conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: fmt.Sprintf("error setting app health: %s", err.Error()), LastTransitionTime: &now})794}795
796// Git has already performed the signature verification via its GPG interface, and the result is available797// in the manifest info received from the repository server. We now need to form our opinion about the result798// and stop processing if we do not agree about the outcome.799for _, manifestInfo := range manifestInfos {800if gpg.IsGPGEnabled() && verifySignature && manifestInfo != nil {801conditions = append(conditions, verifyGnuPGSignature(manifestInfo.Revision, project, manifestInfo)...)802}803}804
805compRes := comparisonResult{806syncStatus: &syncStatus,807healthStatus: healthStatus,808resources: resourceSummaries,809managedResources: managedResources,810reconciliationResult: reconciliation,811diffConfig: diffConfig,812diffResultList: diffResults,813hasPostDeleteHooks: hasPostDeleteHooks,814}815
816if hasMultipleSources {817for _, manifestInfo := range manifestInfos {818compRes.appSourceTypes = append(compRes.appSourceTypes, v1alpha1.ApplicationSourceType(manifestInfo.SourceType))819}820} else {821for _, manifestInfo := range manifestInfos {822compRes.appSourceType = v1alpha1.ApplicationSourceType(manifestInfo.SourceType)823break824}825}826
827app.Status.SetConditions(conditions, map[v1alpha1.ApplicationConditionType]bool{828v1alpha1.ApplicationConditionComparisonError: true,829v1alpha1.ApplicationConditionSharedResourceWarning: true,830v1alpha1.ApplicationConditionRepeatedResourceWarning: true,831v1alpha1.ApplicationConditionExcludedResourceWarning: true,832})833ts.AddCheckpoint("health_ms")834compRes.timings = ts.Timings()835return &compRes, nil836}
837
838// useDiffCache will determine if the diff should be calculated based
839// on the existing live state cache or not.
840func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sources []v1alpha1.ApplicationSource, app *v1alpha1.Application, manifestRevisions []string, statusRefreshTimeout time.Duration, serverSideDiff bool, log *log.Entry) bool {841
842if noCache {843log.WithField("useDiffCache", "false").Debug("noCache is true")844return false845}846refreshType, refreshRequested := app.IsRefreshRequested()847if refreshRequested {848log.WithField("useDiffCache", "false").Debugf("refresh type %s requested", string(refreshType))849return false850}851// serverSideDiff should still use cache even if status is expired.852// This is an attempt to avoid hitting k8s API server too frequently during853// app refresh with serverSideDiff is enabled. If there are negative side854// effects identified with this approach, the serverSideDiff should be removed855// from this condition.856if app.Status.Expired(statusRefreshTimeout) && !serverSideDiff {857log.WithField("useDiffCache", "false").Debug("app.status.expired")858return false859}860
861if len(manifestInfos) != len(sources) {862log.WithField("useDiffCache", "false").Debug("manifestInfos len != sources len")863return false864}865
866revisionChanged := !reflect.DeepEqual(app.Status.GetRevisions(), manifestRevisions)867if revisionChanged {868log.WithField("useDiffCache", "false").Debug("revisionChanged")869return false870}871
872currentSpec := app.BuildComparedToStatus()873specChanged := !reflect.DeepEqual(app.Status.Sync.ComparedTo, currentSpec)874if specChanged {875log.WithField("useDiffCache", "false").Debug("specChanged")876return false877}878
879log.WithField("useDiffCache", "true").Debug("using diff cache")880return true881}
882
883func (m *appStateManager) persistRevisionHistory(884app *v1alpha1.Application,885revision string,886source v1alpha1.ApplicationSource,887revisions []string,888sources []v1alpha1.ApplicationSource,889hasMultipleSources bool,890startedAt metav1.Time,891initiatedBy v1alpha1.OperationInitiator,892) error {893var nextID int64894if len(app.Status.History) > 0 {895nextID = app.Status.History.LastRevisionHistory().ID + 1896}897
898if hasMultipleSources {899app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{900DeployedAt: metav1.NewTime(time.Now().UTC()),901DeployStartedAt: &startedAt,902ID: nextID,903Sources: sources,904Revisions: revisions,905InitiatedBy: initiatedBy,906})907} else {908app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{909Revision: revision,910DeployedAt: metav1.NewTime(time.Now().UTC()),911DeployStartedAt: &startedAt,912ID: nextID,913Source: source,914InitiatedBy: initiatedBy,915})916}917
918app.Status.History = app.Status.History.Trunc(app.Spec.GetRevisionHistoryLimit())919
920patch, err := json.Marshal(map[string]map[string][]v1alpha1.RevisionHistory{921"status": {922"history": app.Status.History,923},924})925if err != nil {926return fmt.Errorf("error marshaling revision history patch: %w", err)927}928_, err = m.appclientset.ArgoprojV1alpha1().Applications(app.Namespace).Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{})929return err930}
931
932// NewAppStateManager creates new instance of AppStateManager
933func NewAppStateManager(934db db.ArgoDB,935appclientset appclientset.Interface,936repoClientset apiclient.Clientset,937namespace string,938kubectl kubeutil.Kubectl,939settingsMgr *settings.SettingsManager,940liveStateCache statecache.LiveStateCache,941projInformer cache.SharedIndexInformer,942metricsServer *metrics.MetricsServer,943cache *appstatecache.Cache,944statusRefreshTimeout time.Duration,945resourceTracking argo.ResourceTracking,946persistResourceHealth bool,947repoErrorGracePeriod time.Duration,948serverSideDiff bool,949) AppStateManager {950return &appStateManager{951liveStateCache: liveStateCache,952cache: cache,953db: db,954appclientset: appclientset,955kubectl: kubectl,956repoClientset: repoClientset,957namespace: namespace,958settingsMgr: settingsMgr,959projInformer: projInformer,960metricsServer: metricsServer,961statusRefreshTimeout: statusRefreshTimeout,962resourceTracking: resourceTracking,963persistResourceHealth: persistResourceHealth,964repoErrorGracePeriod: repoErrorGracePeriod,965serverSideDiff: serverSideDiff,966}967}
968
969// isSelfReferencedObj returns whether the given obj is managed by the application
970// according to the values of the tracking id (aka app instance value) annotation.
971// It returns true when all of the properties of the tracking id (app name, namespace,
972// group and kind) match the properties of the live object, or if the tracking method
973// used does not provide the required properties for matching.
974// Reference: https://github.com/argoproj/argo-cd/issues/8683
975func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName, appLabelKey string, trackingMethod v1alpha1.TrackingMethod) bool {976if live == nil {977return true978}979
980// If tracking method doesn't contain required metadata for this check,981// we are not able to determine and just assume the object to be managed.982if trackingMethod == argo.TrackingMethodLabel {983return true984}985
986// config != nil is the best-case scenario for constructing an accurate987// Tracking ID. `config` is the "desired state" (from git/helm/etc.).988// Using the desired state is important when there is an ApiGroup upgrade.989// When upgrading, the comparison must be made with the new tracking ID.990// Example:991// live resource annotation will be:992// ingress-app:extensions/Ingress:default/some-ingress993// when it should be:994// ingress-app:networking.k8s.io/Ingress:default/some-ingress995// More details in: https://github.com/argoproj/argo-cd/pull/11012996var aiv argo.AppInstanceValue997if config != nil {998aiv = argo.UnstructuredToAppInstanceValue(config, appName, "")999return isSelfReferencedObj(live, aiv)1000}1001
1002// If config is nil then compare the live resource with the value1003// of the annotation. In this case, in order to validate if obj is1004// managed by this application, the values from the annotation have1005// to match the properties from the live object. Cluster scoped objects1006// carry the app's destination namespace in the tracking annotation,1007// but are unique in GVK + name combination.1008appInstance := m.resourceTracking.GetAppInstance(live, appLabelKey, trackingMethod)1009if appInstance != nil {1010return isSelfReferencedObj(live, *appInstance)1011}1012return true1013}
1014
1015// isSelfReferencedObj returns true if the given Tracking ID (`aiv`) matches
1016// the given object. It returns false when the ID doesn't match. This sometimes
1017// happens when a tracking label or annotation gets accidentally copied to a
1018// different resource.
1019func isSelfReferencedObj(obj *unstructured.Unstructured, aiv argo.AppInstanceValue) bool {1020return (obj.GetNamespace() == aiv.Namespace || obj.GetNamespace() == "") &&1021obj.GetName() == aiv.Name &&1022obj.GetObjectKind().GroupVersionKind().Group == aiv.Group &&1023obj.GetObjectKind().GroupVersionKind().Kind == aiv.Kind1024}
1025