cubefs
780 строк · 33.7 Кб
1package sarama2
3import (4"compress/gzip"5"crypto/tls"6"fmt"7"io"8"net"9"regexp"10"time"11
12"github.com/rcrowley/go-metrics"13"golang.org/x/net/proxy"14)
15
16const defaultClientID = "sarama"17
18var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)19
20// Config is used to pass multiple configuration options to Sarama's constructors.
21type Config struct {22// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.23Admin struct {24Retry struct {25// The total number of times to retry sending (retriable) admin requests (default 5).26// Similar to the `retries` setting of the JVM AdminClientConfig.27Max int28// Backoff time between retries of a failed request (default 100ms)29Backoff time.Duration30}31// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,32// including topics, brokers, configurations and ACLs (defaults to 3 seconds).33Timeout time.Duration34}35
36// Net is the namespace for network-level properties used by the Broker, and37// shared by the Client/Producer/Consumer.38Net struct {39// How many outstanding requests a connection is allowed to have before40// sending on it blocks (default 5).41// Throughput can improve but message ordering is not guaranteed if Producer.Idempotent is disabled, see:42// https://kafka.apache.org/protocol#protocol_network43// https://kafka.apache.org/28/documentation.html#producerconfigs_max.in.flight.requests.per.connection44MaxOpenRequests int45
46// All three of the below configurations are similar to the47// `socket.timeout.ms` setting in JVM kafka. All of them default48// to 30 seconds.49DialTimeout time.Duration // How long to wait for the initial connection.50ReadTimeout time.Duration // How long to wait for a response.51WriteTimeout time.Duration // How long to wait for a transmit.52
53TLS struct {54// Whether or not to use TLS when connecting to the broker55// (defaults to false).56Enable bool57// The TLS configuration to use for secure connections if58// enabled (defaults to nil).59Config *tls.Config60}61
62// SASL based authentication with broker. While there are multiple SASL authentication methods63// the current implementation is limited to plaintext (SASL/PLAIN) authentication64SASL struct {65// Whether or not to use SASL authentication when connecting to the broker66// (defaults to false).67Enable bool68// SASLMechanism is the name of the enabled SASL mechanism.69// Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).70Mechanism SASLMechanism
71// Version is the SASL Protocol Version to use72// Kafka > 1.x should use V1, except on Azure EventHub which use V073Version int1674// Whether or not to send the Kafka SASL handshake first if enabled75// (defaults to true). You should only set this to false if you're using76// a non-Kafka SASL proxy.77Handshake bool78// AuthIdentity is an (optional) authorization identity (authzid) to79// use for SASL/PLAIN authentication (if different from User) when80// an authenticated user is permitted to act as the presented81// alternative user. See RFC4616 for details.82AuthIdentity string83// User is the authentication identity (authcid) to present for84// SASL/PLAIN or SASL/SCRAM authentication85User string86// Password for SASL/PLAIN authentication87Password string88// authz id used for SASL/SCRAM authentication89SCRAMAuthzID string90// SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM91// client used to perform the SCRAM exchange with the server.92SCRAMClientGeneratorFunc func() SCRAMClient93// TokenProvider is a user-defined callback for generating94// access tokens for SASL/OAUTHBEARER auth. See the95// AccessTokenProvider interface docs for proper implementation96// guidelines.97TokenProvider AccessTokenProvider
98
99GSSAPI GSSAPIConfig
100}101
102// KeepAlive specifies the keep-alive period for an active network connection (defaults to 0).103// If zero or positive, keep-alives are enabled.104// If negative, keep-alives are disabled.105KeepAlive time.Duration106
107// LocalAddr is the local address to use when dialing an108// address. The address must be of a compatible type for the109// network being dialed.110// If nil, a local address is automatically chosen.111LocalAddr net.Addr112
113Proxy struct {114// Whether or not to use proxy when connecting to the broker115// (defaults to false).116Enable bool117// The proxy dialer to use enabled (defaults to nil).118Dialer proxy.Dialer119}120}121
122// Metadata is the namespace for metadata management properties used by the123// Client, and shared by the Producer/Consumer.124Metadata struct {125Retry struct {126// The total number of times to retry a metadata request when the127// cluster is in the middle of a leader election (default 3).128Max int129// How long to wait for leader election to occur before retrying130// (default 250ms). Similar to the JVM's `retry.backoff.ms`.131Backoff time.Duration132// Called to compute backoff time dynamically. Useful for implementing133// more sophisticated backoff strategies. This takes precedence over134// `Backoff` if set.135BackoffFunc func(retries, maxRetries int) time.Duration136}137// How frequently to refresh the cluster metadata in the background.138// Defaults to 10 minutes. Set to 0 to disable. Similar to139// `topic.metadata.refresh.interval.ms` in the JVM version.140RefreshFrequency time.Duration141
142// Whether to maintain a full set of metadata for all topics, or just143// the minimal set that has been necessary so far. The full set is simpler144// and usually more convenient, but can take up a substantial amount of145// memory if you have many topics and partitions. Defaults to true.146Full bool147
148// How long to wait for a successful metadata response.149// Disabled by default which means a metadata request against an unreachable150// cluster (all brokers are unreachable or unresponsive) can take up to151// `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`152// to fail.153Timeout time.Duration154
155// Whether to allow auto-create topics in metadata refresh. If set to true,156// the broker may auto-create topics that we requested which do not already exist,157// if it is configured to do so (`auto.create.topics.enable` is true). Defaults to true.158AllowAutoTopicCreation bool159}160
161// Producer is the namespace for configuration related to producing messages,162// used by the Producer.163Producer struct {164// The maximum permitted size of a message (defaults to 1000000). Should be165// set equal to or smaller than the broker's `message.max.bytes`.166MaxMessageBytes int167// The level of acknowledgement reliability needed from the broker (defaults168// to WaitForLocal). Equivalent to the `request.required.acks` setting of the169// JVM producer.170RequiredAcks RequiredAcks
171// The maximum duration the broker will wait the receipt of the number of172// RequiredAcks (defaults to 10 seconds). This is only relevant when173// RequiredAcks is set to WaitForAll or a number > 1. Only supports174// millisecond resolution, nanoseconds will be truncated. Equivalent to175// the JVM producer's `request.timeout.ms` setting.176Timeout time.Duration177// The type of compression to use on messages (defaults to no compression).178// Similar to `compression.codec` setting of the JVM producer.179Compression CompressionCodec
180// The level of compression to use on messages. The meaning depends181// on the actual compression type used and defaults to default compression182// level for the codec.183CompressionLevel int184// Generates partitioners for choosing the partition to send messages to185// (defaults to hashing the message key). Similar to the `partitioner.class`186// setting for the JVM producer.187Partitioner PartitionerConstructor
188// If enabled, the producer will ensure that exactly one copy of each message is189// written.190Idempotent bool191
192// Return specifies what channels will be populated. If they are set to true,193// you must read from the respective channels to prevent deadlock. If,194// however, this config is used to create a `SyncProducer`, both must be set195// to true and you shall not read from the channels since the producer does196// this internally.197Return struct {198// If enabled, successfully delivered messages will be returned on the199// Successes channel (default disabled).200Successes bool201
202// If enabled, messages that failed to deliver will be returned on the203// Errors channel, including error (default enabled).204Errors bool205}206
207// The following config options control how often messages are batched up and208// sent to the broker. By default, messages are sent as fast as possible, and209// all messages received while the current batch is in-flight are placed210// into the subsequent batch.211Flush struct {212// The best-effort number of bytes needed to trigger a flush. Use the213// global sarama.MaxRequestSize to set a hard upper limit.214Bytes int215// The best-effort number of messages needed to trigger a flush. Use216// `MaxMessages` to set a hard upper limit.217Messages int218// The best-effort frequency of flushes. Equivalent to219// `queue.buffering.max.ms` setting of JVM producer.220Frequency time.Duration221// The maximum number of messages the producer will send in a single222// broker request. Defaults to 0 for unlimited. Similar to223// `queue.buffering.max.messages` in the JVM producer.224MaxMessages int225}226
227Retry struct {228// The total number of times to retry sending a message (default 3).229// Similar to the `message.send.max.retries` setting of the JVM producer.230Max int231// How long to wait for the cluster to settle between retries232// (default 100ms). Similar to the `retry.backoff.ms` setting of the233// JVM producer.234Backoff time.Duration235// Called to compute backoff time dynamically. Useful for implementing236// more sophisticated backoff strategies. This takes precedence over237// `Backoff` if set.238BackoffFunc func(retries, maxRetries int) time.Duration239}240
241// Interceptors to be called when the producer dispatcher reads the242// message for the first time. Interceptors allows to intercept and243// possible mutate the message before they are published to Kafka244// cluster. *ProducerMessage modified by the first interceptor's245// OnSend() is passed to the second interceptor OnSend(), and so on in246// the interceptor chain.247Interceptors []ProducerInterceptor248}249
250// Consumer is the namespace for configuration related to consuming messages,251// used by the Consumer.252Consumer struct {253
254// Group is the namespace for configuring consumer group.255Group struct {256Session struct {257// The timeout used to detect consumer failures when using Kafka's group management facility.258// The consumer sends periodic heartbeats to indicate its liveness to the broker.259// If no heartbeats are received by the broker before the expiration of this session timeout,260// then the broker will remove this consumer from the group and initiate a rebalance.261// Note that the value must be in the allowable range as configured in the broker configuration262// by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)263Timeout time.Duration264}265Heartbeat struct {266// The expected time between heartbeats to the consumer coordinator when using Kafka's group267// management facilities. Heartbeats are used to ensure that the consumer's session stays active and268// to facilitate rebalancing when new consumers join or leave the group.269// The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no270// higher than 1/3 of that value.271// It can be adjusted even lower to control the expected time for normal rebalances (default 3s)272Interval time.Duration273}274Rebalance struct {275// Strategy for allocating topic partitions to members (default BalanceStrategyRange)276Strategy BalanceStrategy
277// The maximum allowed time for each worker to join the group once a rebalance has begun.278// This is basically a limit on the amount of time needed for all tasks to flush any pending279// data and commit offsets. If the timeout is exceeded, then the worker will be removed from280// the group, which will cause offset commit failures (default 60s).281Timeout time.Duration282
283Retry struct {284// When a new consumer joins a consumer group the set of consumers attempt to "rebalance"285// the load to assign partitions to each consumer. If the set of consumers changes while286// this assignment is taking place the rebalance will fail and retry. This setting controls287// the maximum number of attempts before giving up (default 4).288Max int289// Backoff time between retries during rebalance (default 2s)290Backoff time.Duration291}292}293Member struct {294// Custom metadata to include when joining the group. The user data for all joined members295// can be retrieved by sending a DescribeGroupRequest to the broker that is the296// coordinator for the group.297UserData []byte298}299}300
301Retry struct {302// How long to wait after a failing to read from a partition before303// trying again (default 2s).304Backoff time.Duration305// Called to compute backoff time dynamically. Useful for implementing306// more sophisticated backoff strategies. This takes precedence over307// `Backoff` if set.308BackoffFunc func(retries int) time.Duration309}310
311// Fetch is the namespace for controlling how many bytes are retrieved by any312// given request.313Fetch struct {314// The minimum number of message bytes to fetch in a request - the broker315// will wait until at least this many are available. The default is 1,316// as 0 causes the consumer to spin when no messages are available.317// Equivalent to the JVM's `fetch.min.bytes`.318Min int32319// The default number of message bytes to fetch from the broker in each320// request (default 1MB). This should be larger than the majority of321// your messages, or else the consumer will spend a lot of time322// negotiating sizes and not actually consuming. Similar to the JVM's323// `fetch.message.max.bytes`.324Default int32325// The maximum number of message bytes to fetch from the broker in a326// single request. Messages larger than this will return327// ErrMessageTooLarge and will not be consumable, so you must be sure328// this is at least as large as your largest message. Defaults to 0329// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The330// global `sarama.MaxResponseSize` still applies.331Max int32332}333// The maximum amount of time the broker will wait for Consumer.Fetch.Min334// bytes to become available before it returns fewer than that anyways. The335// default is 250ms, since 0 causes the consumer to spin when no events are336// available. 100-500ms is a reasonable range for most cases. Kafka only337// supports precision up to milliseconds; nanoseconds will be truncated.338// Equivalent to the JVM's `fetch.wait.max.ms`.339MaxWaitTime time.Duration340
341// The maximum amount of time the consumer expects a message takes to342// process for the user. If writing to the Messages channel takes longer343// than this, that partition will stop fetching more messages until it344// can proceed again.345// Note that, since the Messages channel is buffered, the actual grace time is346// (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.347// If a message is not written to the Messages channel between two ticks348// of the expiryTicker then a timeout is detected.349// Using a ticker instead of a timer to detect timeouts should typically350// result in many fewer calls to Timer functions which may result in a351// significant performance improvement if many messages are being sent352// and timeouts are infrequent.353// The disadvantage of using a ticker instead of a timer is that354// timeouts will be less accurate. That is, the effective timeout could355// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For356// example, if `MaxProcessingTime` is 100ms then a delay of 180ms357// between two messages being sent may not be recognized as a timeout.358MaxProcessingTime time.Duration359
360// Return specifies what channels will be populated. If they are set to true,361// you must read from them to prevent deadlock.362Return struct {363// If enabled, any errors that occurred while consuming are returned on364// the Errors channel (default disabled).365Errors bool366}367
368// Offsets specifies configuration for how and when to commit consumed369// offsets. This currently requires the manual use of an OffsetManager370// but will eventually be automated.371Offsets struct {372// Deprecated: CommitInterval exists for historical compatibility373// and should not be used. Please use Consumer.Offsets.AutoCommit374CommitInterval time.Duration375
376// AutoCommit specifies configuration for commit messages automatically.377AutoCommit struct {378// Whether or not to auto-commit updated offsets back to the broker.379// (default enabled).380Enable bool381
382// How frequently to commit updated offsets. Ineffective unless383// auto-commit is enabled (default 1s)384Interval time.Duration385}386
387// The initial offset to use if no offset was previously committed.388// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.389Initial int64390
391// The retention duration for committed offsets. If zero, disabled392// (in which case the `offsets.retention.minutes` option on the393// broker will be used). Kafka only supports precision up to394// milliseconds; nanoseconds will be truncated. Requires Kafka395// broker version 0.9.0 or later.396// (default is 0: disabled).397Retention time.Duration398
399Retry struct {400// The total number of times to retry failing commit401// requests during OffsetManager shutdown (default 3).402Max int403}404}405
406// IsolationLevel support 2 mode:407// - use `ReadUncommitted` (default) to consume and return all messages in message channel408// - use `ReadCommitted` to hide messages that are part of an aborted transaction409IsolationLevel IsolationLevel
410
411// Interceptors to be called just before the record is sent to the412// messages channel. Interceptors allows to intercept and possible413// mutate the message before they are returned to the client.414// *ConsumerMessage modified by the first interceptor's OnConsume() is415// passed to the second interceptor OnConsume(), and so on in the416// interceptor chain.417Interceptors []ConsumerInterceptor418}419
420// A user-provided string sent with every request to the brokers for logging,421// debugging, and auditing purposes. Defaults to "sarama", but you should422// probably set it to something specific to your application.423ClientID string424// A rack identifier for this client. This can be any string value which425// indicates where this client is physically located.426// It corresponds with the broker config 'broker.rack'427RackID string428// The number of events to buffer in internal and external channels. This429// permits the producer and consumer to continue processing some messages430// in the background while user code is working, greatly improving throughput.431// Defaults to 256.432ChannelBufferSize int433// ApiVersionsRequest determines whether Sarama should send an434// ApiVersionsRequest message to each broker as part of its initial435// connection. This defaults to `true` to match the official Java client436// and most 3rdparty ones.437ApiVersionsRequest bool438// The version of Kafka that Sarama will assume it is running against.439// Defaults to the oldest supported stable version. Since Kafka provides440// backwards-compatibility, setting it to a version older than you have441// will not break anything, although it may prevent you from using the442// latest features. Setting it to a version greater than you are actually443// running may lead to random breakage.444Version KafkaVersion
445// The registry to define metrics into.446// Defaults to a local registry.447// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"448// prior to starting Sarama.449// See Examples on how to use the metrics registry450MetricRegistry metrics.Registry451}
452
453// NewConfig returns a new configuration instance with sane defaults.
454func NewConfig() *Config {455c := &Config{}456
457c.Admin.Retry.Max = 5458c.Admin.Retry.Backoff = 100 * time.Millisecond459c.Admin.Timeout = 3 * time.Second460
461c.Net.MaxOpenRequests = 5462c.Net.DialTimeout = 30 * time.Second463c.Net.ReadTimeout = 30 * time.Second464c.Net.WriteTimeout = 30 * time.Second465c.Net.SASL.Handshake = true466c.Net.SASL.Version = SASLHandshakeV0467
468c.Metadata.Retry.Max = 3469c.Metadata.Retry.Backoff = 250 * time.Millisecond470c.Metadata.RefreshFrequency = 10 * time.Minute471c.Metadata.Full = true472c.Metadata.AllowAutoTopicCreation = true473
474c.Producer.MaxMessageBytes = 1000000475c.Producer.RequiredAcks = WaitForLocal476c.Producer.Timeout = 10 * time.Second477c.Producer.Partitioner = NewHashPartitioner478c.Producer.Retry.Max = 3479c.Producer.Retry.Backoff = 100 * time.Millisecond480c.Producer.Return.Errors = true481c.Producer.CompressionLevel = CompressionLevelDefault482
483c.Consumer.Fetch.Min = 1484c.Consumer.Fetch.Default = 1024 * 1024485c.Consumer.Retry.Backoff = 2 * time.Second486c.Consumer.MaxWaitTime = 500 * time.Millisecond487c.Consumer.MaxProcessingTime = 100 * time.Millisecond488c.Consumer.Return.Errors = false489c.Consumer.Offsets.AutoCommit.Enable = true490c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second491c.Consumer.Offsets.Initial = OffsetNewest492c.Consumer.Offsets.Retry.Max = 3493
494c.Consumer.Group.Session.Timeout = 10 * time.Second495c.Consumer.Group.Heartbeat.Interval = 3 * time.Second496c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange497c.Consumer.Group.Rebalance.Timeout = 60 * time.Second498c.Consumer.Group.Rebalance.Retry.Max = 4499c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second500
501c.ClientID = defaultClientID502c.ChannelBufferSize = 256503c.ApiVersionsRequest = true504c.Version = DefaultVersion505c.MetricRegistry = metrics.NewRegistry()506
507return c508}
509
510// Validate checks a Config instance. It will return a
511// ConfigurationError if the specified values don't make sense.
512func (c *Config) Validate() error {513// some configuration values should be warned on but not fail completely, do those first514if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {515Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")516}517if !c.Net.SASL.Enable {518if c.Net.SASL.User != "" {519Logger.Println("Net.SASL is disabled but a non-empty username was provided.")520}521if c.Net.SASL.Password != "" {522Logger.Println("Net.SASL is disabled but a non-empty password was provided.")523}524}525if c.Producer.RequiredAcks > 1 {526Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")527}528if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {529Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")530}531if c.Producer.Flush.Bytes >= int(MaxRequestSize) {532Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")533}534if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {535Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")536}537if c.Producer.Timeout%time.Millisecond != 0 {538Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")539}540if c.Consumer.MaxWaitTime < 100*time.Millisecond {541Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")542}543if c.Consumer.MaxWaitTime%time.Millisecond != 0 {544Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")545}546if c.Consumer.Offsets.Retention%time.Millisecond != 0 {547Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")548}549if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {550Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")551}552if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {553Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")554}555if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {556Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")557}558if c.ClientID == defaultClientID {559Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")560}561
562// validate Net values563switch {564case c.Net.MaxOpenRequests <= 0:565return ConfigurationError("Net.MaxOpenRequests must be > 0")566case c.Net.DialTimeout <= 0:567return ConfigurationError("Net.DialTimeout must be > 0")568case c.Net.ReadTimeout <= 0:569return ConfigurationError("Net.ReadTimeout must be > 0")570case c.Net.WriteTimeout <= 0:571return ConfigurationError("Net.WriteTimeout must be > 0")572case c.Net.SASL.Enable:573if c.Net.SASL.Mechanism == "" {574c.Net.SASL.Mechanism = SASLTypePlaintext575}576
577switch c.Net.SASL.Mechanism {578case SASLTypePlaintext:579if c.Net.SASL.User == "" {580return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")581}582if c.Net.SASL.Password == "" {583return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")584}585case SASLTypeOAuth:586if c.Net.SASL.TokenProvider == nil {587return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")588}589case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:590if c.Net.SASL.User == "" {591return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")592}593if c.Net.SASL.Password == "" {594return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")595}596if c.Net.SASL.SCRAMClientGeneratorFunc == nil {597return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")598}599case SASLTypeGSSAPI:600if c.Net.SASL.GSSAPI.ServiceName == "" {601return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")602}603
604if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH {605if c.Net.SASL.GSSAPI.Password == "" {606return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +607"mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")608}609} else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH {610if c.Net.SASL.GSSAPI.KeyTabPath == "" {611return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +612" and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")613}614} else {615return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH")616}617if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {618return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")619}620if c.Net.SASL.GSSAPI.Username == "" {621return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")622}623if c.Net.SASL.GSSAPI.Realm == "" {624return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")625}626default:627msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",628SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)629return ConfigurationError(msg)630}631}632
633// validate the Admin values634switch {635case c.Admin.Timeout <= 0:636return ConfigurationError("Admin.Timeout must be > 0")637}638
639// validate the Metadata values640switch {641case c.Metadata.Retry.Max < 0:642return ConfigurationError("Metadata.Retry.Max must be >= 0")643case c.Metadata.Retry.Backoff < 0:644return ConfigurationError("Metadata.Retry.Backoff must be >= 0")645case c.Metadata.RefreshFrequency < 0:646return ConfigurationError("Metadata.RefreshFrequency must be >= 0")647}648
649// validate the Producer values650switch {651case c.Producer.MaxMessageBytes <= 0:652return ConfigurationError("Producer.MaxMessageBytes must be > 0")653case c.Producer.RequiredAcks < -1:654return ConfigurationError("Producer.RequiredAcks must be >= -1")655case c.Producer.Timeout <= 0:656return ConfigurationError("Producer.Timeout must be > 0")657case c.Producer.Partitioner == nil:658return ConfigurationError("Producer.Partitioner must not be nil")659case c.Producer.Flush.Bytes < 0:660return ConfigurationError("Producer.Flush.Bytes must be >= 0")661case c.Producer.Flush.Messages < 0:662return ConfigurationError("Producer.Flush.Messages must be >= 0")663case c.Producer.Flush.Frequency < 0:664return ConfigurationError("Producer.Flush.Frequency must be >= 0")665case c.Producer.Flush.MaxMessages < 0:666return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")667case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:668return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")669case c.Producer.Retry.Max < 0:670return ConfigurationError("Producer.Retry.Max must be >= 0")671case c.Producer.Retry.Backoff < 0:672return ConfigurationError("Producer.Retry.Backoff must be >= 0")673}674
675if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {676return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")677}678
679if c.Producer.Compression == CompressionGZIP {680if c.Producer.CompressionLevel != CompressionLevelDefault {681if _, err := gzip.NewWriterLevel(io.Discard, c.Producer.CompressionLevel); err != nil {682return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))683}684}685}686
687if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) {688return ConfigurationError("zstd compression requires Version >= V2_1_0_0")689}690
691if c.Producer.Idempotent {692if !c.Version.IsAtLeast(V0_11_0_0) {693return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")694}695if c.Producer.Retry.Max == 0 {696return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")697}698if c.Producer.RequiredAcks != WaitForAll {699return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")700}701if c.Net.MaxOpenRequests > 1 {702return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")703}704}705
706// validate the Consumer values707switch {708case c.Consumer.Fetch.Min <= 0:709return ConfigurationError("Consumer.Fetch.Min must be > 0")710case c.Consumer.Fetch.Default <= 0:711return ConfigurationError("Consumer.Fetch.Default must be > 0")712case c.Consumer.Fetch.Max < 0:713return ConfigurationError("Consumer.Fetch.Max must be >= 0")714case c.Consumer.MaxWaitTime < 1*time.Millisecond:715return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")716case c.Consumer.MaxProcessingTime <= 0:717return ConfigurationError("Consumer.MaxProcessingTime must be > 0")718case c.Consumer.Retry.Backoff < 0:719return ConfigurationError("Consumer.Retry.Backoff must be >= 0")720case c.Consumer.Offsets.AutoCommit.Interval <= 0:721return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0")722case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:723return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")724case c.Consumer.Offsets.Retry.Max < 0:725return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")726case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:727return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")728}729
730if c.Consumer.Offsets.CommitInterval != 0 {731Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" +732" and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored")733}734
735// validate IsolationLevel736if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {737return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")738}739
740// validate the Consumer Group values741switch {742case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:743return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")744case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:745return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")746case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:747return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")748case c.Consumer.Group.Rebalance.Strategy == nil:749return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty")750case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:751return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")752case c.Consumer.Group.Rebalance.Retry.Max < 0:753return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")754case c.Consumer.Group.Rebalance.Retry.Backoff < 0:755return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")756}757
758// validate misc shared values759switch {760case c.ChannelBufferSize < 0:761return ConfigurationError("ChannelBufferSize must be >= 0")762case !validID.MatchString(c.ClientID):763return ConfigurationError("ClientID is invalid")764}765
766return nil767}
768
769func (c *Config) getDialer() proxy.Dialer {770if c.Net.Proxy.Enable {771Logger.Printf("using proxy %s", c.Net.Proxy.Dialer)772return c.Net.Proxy.Dialer773} else {774return &net.Dialer{775Timeout: c.Net.DialTimeout,776KeepAlive: c.Net.KeepAlive,777LocalAddr: c.Net.LocalAddr,778}779}780}
781