From 8c16f62693fac3715f6d9264a7a45a3ad885cad1 Mon Sep 17 00:00:00 2001 From: Nathan Baulch Date: Tue, 10 Sep 2024 20:15:21 +1000 Subject: [PATCH] Fix typos --- RELEASE_NOTES | 4 ++-- THIRD-PARTY | 2 +- cfg/aws/aws_sdk_logging.go | 2 +- .../refreshable_shared_credentials_provider.go | 2 +- internal/httpclient/httpclient.go | 2 +- internal/k8sCommon/k8sclient/node.go | 2 +- internal/logscommon/const.go | 2 +- internal/mapWithExpiry/mapWIthExpiry.go | 18 +++++++++--------- internal/util/user/userutil.go | 4 ++-- licensing/THIRD-PARTY-LICENSES | 2 +- packaging/darwin/amazon-cloudwatch-agent-ctl | 6 +++--- .../dependencies/amazon-cloudwatch-agent-ctl | 6 +++--- .../windows/amazon-cloudwatch-agent-ctl.ps1 | 6 +++--- plugins/inputs/logfile/fileconfig.go | 2 +- plugins/inputs/logfile/logfile.go | 6 +++--- plugins/inputs/logfile/logfile_test.go | 2 +- plugins/inputs/logfile/tail/tail.go | 6 +++--- plugins/inputs/logfile/tailersrc.go | 2 +- plugins/inputs/nvidia_smi/nvidia_smi_test.go | 2 +- plugins/inputs/prometheus/metrics_handler.go | 10 +++++----- plugins/inputs/statsd/graphite/config.go | 2 +- plugins/inputs/statsd/graphite/errors.go | 8 ++++---- plugins/inputs/statsd/graphite/parser.go | 8 ++++---- plugins/inputs/statsd/graphite/parser_test.go | 8 ++++---- plugins/inputs/win_perf_counters/README.md | 8 ++++---- plugins/inputs/win_perf_counters/pdh.go | 6 +++--- .../win_perf_counters_test.go | 2 +- .../wineventlog/wineventlog.go | 2 +- plugins/outputs/cloudwatch/aggregator.go | 2 +- plugins/outputs/cloudwatch/aggregator_test.go | 2 +- .../outputs/cloudwatchlogs/cloudwatchlogs.go | 2 +- plugins/outputs/cloudwatchlogs/pusher.go | 12 ++++++------ plugins/plugins.go | 2 +- .../normalizer/attributesnormalizer.go | 2 +- plugins/processors/ec2tagger/ec2tagger_test.go | 2 +- plugins/processors/ecsdecorator/cgroup.go | 2 +- plugins/processors/ecsdecorator/ecsinfo.go | 6 +++--- receiver/adapter/accumulator/metrics_test.go | 2 +- receiver/adapter/plugins_linux_test.go | 2 +- tool/clean/clean_ami/clean_ami.go | 2 +- .../clean_dedicated_host.go | 2 +- tool/clean/clean_ebs/clean_ebs.go | 2 +- tool/clean/clean_ecs/clean_ecs.go | 2 +- tool/data/config/traces.go | 2 +- .../tracesconfig/tracesconfig_test.go | 4 ++-- tool/xraydaemonmigration/migrate_test.go | 6 +++--- ...invalidMetricsWithAdditionalProperties.json | 2 +- .../sampleSchema/validWindowsMetrics.json | 2 +- translator/jsonconfig/mergeJsonConfig.go | 2 +- .../test_2/expected_output.json | 2 +- .../sampleJsonConfig/test_2/input_2.json | 2 +- .../test_4/expected_output.json | 2 +- .../sampleJsonConfig/test_4/input_3.json | 2 +- translator/processNoRuleToApply.go | 2 +- .../sampleConfig/complete_windows_config.conf | 2 +- .../sampleConfig/complete_windows_config.json | 2 +- translator/tocwconfig/tocwconfig_test.go | 6 +++--- .../translate/agent/ruleGlobalCredentials.go | 2 +- .../files/collect_list/ruleTimestampFormat.go | 4 ++-- .../translate/logs/ruleLogCredentials.go | 2 +- .../translate/metrics/ruleMetricCredentials.go | 2 +- .../translate/metrics/util/subminuteutil.go | 4 ++-- .../otel/exporter/awsemf/prometheus.go | 4 ++-- .../otel/receiver/adapter/translator.go | 6 +++--- .../otel/receiver/adapter/translators.go | 2 +- .../receiver/awscontainerinsight/translator.go | 2 +- translator/util/ecsutil/ecsutil.go | 12 ++++++------ 67 files changed, 127 insertions(+), 127 deletions(-) diff --git a/RELEASE_NOTES b/RELEASE_NOTES index c2c2124fd7..4d62fe72ab 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -45,7 +45,7 @@ Enhancements: Amazon CloudWatch Agent 1.300044.0 (2024-08-14) ======================================================================== Bug Fixes: -* [ContainerInsights] Update GPU usage metrics emitted +* [ContainerInsights] Update GPU usage metrics emitted * [ContainerInsights] Deprecate runtime tag from neuron metrics to fix false average calculation Enhancements: @@ -182,7 +182,7 @@ Enhancements: Amazon CloudWatch Agent 1.300033.0 (2024-01-31) ======================================================================== -Enchancements: +Enhancements: * [AppSignals] Log correlation * [AppSignals] New Metric Rollup * [AppSignals] Add metrics cardinality control diff --git a/THIRD-PARTY b/THIRD-PARTY index bbd99ff166..cd0c63a0d9 100644 --- a/THIRD-PARTY +++ b/THIRD-PARTY @@ -1705,7 +1705,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------- -internal/common/binary.go in the gopsutil is copied and modifid from +internal/common/binary.go in the gopsutil is copied and modified from golang/encoding/binary.go. diff --git a/cfg/aws/aws_sdk_logging.go b/cfg/aws/aws_sdk_logging.go index e02a0c958a..f4f06a584b 100644 --- a/cfg/aws/aws_sdk_logging.go +++ b/cfg/aws/aws_sdk_logging.go @@ -27,7 +27,7 @@ var sdkLogLevel aws.LogLevelType = aws.LogOff // The levels are a bit field that is OR'd together. // So the user can specify multiple levels and we OR them together. // Example: "aws_sdk_log_level": "LogDebugWithSigning | LogDebugWithRequestErrors". -// JSON string value must contain the levels seperated by "|" and optionally whitespace. +// JSON string value must contain the levels separated by "|" and optionally whitespace. func SetSDKLogLevel(sdkLogLevelString string) { var temp aws.LogLevelType = aws.LogOff diff --git a/cfg/aws/refreshable_shared_credentials_provider.go b/cfg/aws/refreshable_shared_credentials_provider.go index 9fb0c260b2..eccdeccb5f 100644 --- a/cfg/aws/refreshable_shared_credentials_provider.go +++ b/cfg/aws/refreshable_shared_credentials_provider.go @@ -13,7 +13,7 @@ type Refreshable_shared_credentials_provider struct { credentials.Expiry sharedCredentialsProvider *credentials.SharedCredentialsProvider - // Retrival frequency, if the value is 15 minutes, the credentials will be retrieved every 15 minutes. + // Retrieval frequency, if the value is 15 minutes, the credentials will be retrieved every 15 minutes. ExpiryWindow time.Duration } diff --git a/internal/httpclient/httpclient.go b/internal/httpclient/httpclient.go index 0fc32170e3..14f40dc4dd 100644 --- a/internal/httpclient/httpclient.go +++ b/internal/httpclient/httpclient.go @@ -84,7 +84,7 @@ func (h *HttpClient) request(endpoint string) ([]byte, error) { } if len(body) == maxHttpResponseLength { - return nil, fmt.Errorf("response from %s, execeeds the maximum length: %v", endpoint, maxHttpResponseLength) + return nil, fmt.Errorf("response from %s, exceeds the maximum length: %v", endpoint, maxHttpResponseLength) } return body, nil } diff --git a/internal/k8sCommon/k8sclient/node.go b/internal/k8sCommon/k8sclient/node.go index 4de0f0d94e..9bafa5beaa 100644 --- a/internal/k8sCommon/k8sclient/node.go +++ b/internal/k8sCommon/k8sclient/node.go @@ -166,7 +166,7 @@ func createNodeListWatch(client kubernetes.Interface) cache.ListerWatcher { return &cache.ListWatch{ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { opts.ResourceVersion = "" - // Passing emput context as this was not required by old List() + // Passing empty context as this was not required by old List() return client.CoreV1().Nodes().List(ctx, opts) }, WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { diff --git a/internal/logscommon/const.go b/internal/logscommon/const.go index 1881603add..588492e33d 100644 --- a/internal/logscommon/const.go +++ b/internal/logscommon/const.go @@ -11,7 +11,7 @@ const ( //Field key in metrics indicting if the line is the start of the multiline. //If this key is not present, it means the multiline mode is not enabled, - // we set it to true, it indicates it is a real event, but not part of a mulltiple line. + // we set it to true, it indicates it is a real event, but not part of a multiple line. //If this key is false, it means the line is not start line of multiline entry. //If this key is true, it means the line is the start of multiline entry. MultiLineStartField = "multi_line_start" diff --git a/internal/mapWithExpiry/mapWIthExpiry.go b/internal/mapWithExpiry/mapWIthExpiry.go index 3e5ed0b411..175f64f69f 100644 --- a/internal/mapWithExpiry/mapWIthExpiry.go +++ b/internal/mapWithExpiry/mapWIthExpiry.go @@ -12,24 +12,24 @@ type mapEntry struct { // MapWithExpiry act like a map which provide a method to clean up expired entries type MapWithExpiry struct { - ttl time.Duration - entris map[string]*mapEntry + ttl time.Duration + entries map[string]*mapEntry } func NewMapWithExpiry(ttl time.Duration) *MapWithExpiry { - return &MapWithExpiry{ttl: ttl, entris: make(map[string]*mapEntry)} + return &MapWithExpiry{ttl: ttl, entries: make(map[string]*mapEntry)} } func (m *MapWithExpiry) CleanUp(now time.Time) { - for k, v := range m.entris { + for k, v := range m.entries { if now.Sub(v.creation) >= m.ttl { - delete(m.entris, k) + delete(m.entries, k) } } } func (m *MapWithExpiry) Get(key string) (interface{}, bool) { - res, ok := m.entris[key] + res, ok := m.entries[key] if ok { return res.content, true } @@ -37,13 +37,13 @@ func (m *MapWithExpiry) Get(key string) (interface{}, bool) { } func (m *MapWithExpiry) Set(key string, content interface{}) { - m.entris[key] = &mapEntry{content: content, creation: time.Now()} + m.entries[key] = &mapEntry{content: content, creation: time.Now()} } func (m *MapWithExpiry) Size() int { - return len(m.entris) + return len(m.entries) } func (m *MapWithExpiry) Delete(key string) { - delete(m.entris, key) + delete(m.entries, key) } diff --git a/internal/util/user/userutil.go b/internal/util/user/userutil.go index a9e26d7cdf..7acac8b4b9 100644 --- a/internal/util/user/userutil.go +++ b/internal/util/user/userutil.go @@ -55,7 +55,7 @@ func changeFileOwner(uid, gid int) error { } // chownRecursive would recursively change the ownership of the directory -// similar to `chown -R `, except it will igore any files that are: +// similar to `chown -R `, except it will ignore any files that are: // - Executable // - With SUID or SGID bit set // - Allow anyone to write to @@ -78,7 +78,7 @@ func chownRecursive(uid, gid int, dir string) error { } // Do not change ownership of executable files - // Perm() returns the lower 7 bit of permission of file, which represes rwxrwxrws + // Perm() returns the lower 7 bit of permission of file, which represents rwxrwxrws // 0111 maps to --x--x--x, so it would check any user have the execution right if fmode.Perm()&0111 != 0 { return nil diff --git a/licensing/THIRD-PARTY-LICENSES b/licensing/THIRD-PARTY-LICENSES index 389742d02f..fd562eeb54 100644 --- a/licensing/THIRD-PARTY-LICENSES +++ b/licensing/THIRD-PARTY-LICENSES @@ -1704,7 +1704,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ------- -internal/common/binary.go in the gopsutil is copied and modifid from +internal/common/binary.go in the gopsutil is copied and modified from golang/encoding/binary.go. diff --git a/packaging/darwin/amazon-cloudwatch-agent-ctl b/packaging/darwin/amazon-cloudwatch-agent-ctl index 31e9084c95..2e13ae1ecd 100755 --- a/packaging/darwin/amazon-cloudwatch-agent-ctl +++ b/packaging/darwin/amazon-cloudwatch-agent-ctl @@ -30,7 +30,7 @@ readonly OTEL_YAML="${CONFDIR}/amazon-cloudwatch-agent.yaml" readonly JSON="${CONFDIR}/amazon-cloudwatch-agent.json" readonly JSON_DIR="${CONFDIR}/amazon-cloudwatch-agent.d" readonly CV_LOG_FILE="${AGENTDIR}/logs/configuration-validation.log" -readonly COMMON_CONIG="${CONFDIR}/common-config.toml" +readonly COMMON_CONFIG="${CONFDIR}/common-config.toml" readonly ALL_CONFIG='all' @@ -176,7 +176,7 @@ cwa_config() { if [ "${config_location}" = "${ALL_CONFIG}" ]; then rm -rf "${JSON_DIR}"/* else - runDownloaderCommand=$("${CMDDIR}/config-downloader" --output-dir "${JSON_DIR}" --download-source "${config_location}" --mode ${mode} --config "${COMMON_CONIG}" --multi-config ${multi_config}) + runDownloaderCommand=$("${CMDDIR}/config-downloader" --output-dir "${JSON_DIR}" --download-source "${config_location}" --mode ${mode} --config "${COMMON_CONFIG}" --multi-config ${multi_config}) echo "${runDownloaderCommand}" fi @@ -185,7 +185,7 @@ cwa_config() { rm -f "${TOML}" rm -f "${OTEL_YAML}" else - runTranslatorCommand=$("${CMDDIR}/config-translator" --input "${JSON}" --input-dir "${JSON_DIR}" --output "${TOML}" --mode ${mode} --config "${COMMON_CONIG}" --multi-config ${multi_config}) + runTranslatorCommand=$("${CMDDIR}/config-translator" --input "${JSON}" --input-dir "${JSON_DIR}" --output "${TOML}" --mode ${mode} --config "${COMMON_CONFIG}" --multi-config ${multi_config}) echo "${runTranslatorCommand}" runAgentSchemaTestCommand="${CMDDIR}/amazon-cloudwatch-agent -schematest -config ${TOML}" diff --git a/packaging/dependencies/amazon-cloudwatch-agent-ctl b/packaging/dependencies/amazon-cloudwatch-agent-ctl index bd0d47000e..78965c580b 100755 --- a/packaging/dependencies/amazon-cloudwatch-agent-ctl +++ b/packaging/dependencies/amazon-cloudwatch-agent-ctl @@ -18,7 +18,7 @@ readonly OTEL_YAML="${CONFDIR}/amazon-cloudwatch-agent.yaml" readonly JSON="${CONFDIR}/amazon-cloudwatch-agent.json" readonly JSON_DIR="${CONFDIR}/amazon-cloudwatch-agent.d" readonly CV_LOG_FILE="${AGENTDIR}/logs/configuration-validation.log" -readonly COMMON_CONIG="${CONFDIR}/common-config.toml" +readonly COMMON_CONFIG="${CONFDIR}/common-config.toml" readonly ENV_CONFIG="${CONFDIR}/env-config.json" readonly CWA_NAME='amazon-cloudwatch-agent' @@ -263,7 +263,7 @@ cwa_config() { if [ "${cwa_config_location}" = "${ALL_CONFIG}" ]; then rm -rf "${JSON_DIR}"/* else - runDownloaderCommand=$("${CMDDIR}/config-downloader" --output-dir "${JSON_DIR}" --download-source "${cwa_config_location}" --mode ${param_mode} --config "${COMMON_CONIG}" --multi-config ${multi_config}) + runDownloaderCommand=$("${CMDDIR}/config-downloader" --output-dir "${JSON_DIR}" --download-source "${cwa_config_location}" --mode ${param_mode} --config "${COMMON_CONFIG}" --multi-config ${multi_config}) echo ${runDownloaderCommand} || return fi @@ -273,7 +273,7 @@ cwa_config() { rm -f "${OTEL_YAML}" else echo "Start configuration validation..." - runTranslatorCommand=$("${CMDDIR}/config-translator" --input "${JSON}" --input-dir "${JSON_DIR}" --output "${TOML}" --mode ${param_mode} --config "${COMMON_CONIG}" --multi-config ${multi_config}) + runTranslatorCommand=$("${CMDDIR}/config-translator" --input "${JSON}" --input-dir "${JSON_DIR}" --output "${TOML}" --mode ${param_mode} --config "${COMMON_CONFIG}" --multi-config ${multi_config}) echo "${runTranslatorCommand}" || return runAgentSchemaTestCommand="${CMDDIR}/amazon-cloudwatch-agent -schematest -config ${TOML}" diff --git a/packaging/windows/amazon-cloudwatch-agent-ctl.ps1 b/packaging/windows/amazon-cloudwatch-agent-ctl.ps1 index a63c14bb36..8dff6bbf8a 100644 --- a/packaging/windows/amazon-cloudwatch-agent-ctl.ps1 +++ b/packaging/windows/amazon-cloudwatch-agent-ctl.ps1 @@ -91,7 +91,7 @@ $TOML="${CWAProgramData}\amazon-cloudwatch-agent.toml" $OTEL_YAML="${CWAProgramData}\amazon-cloudwatch-agent.yaml" $JSON="${CWAProgramData}\amazon-cloudwatch-agent.json" $JSON_DIR = "${CWAProgramData}\Configs" -$COMMON_CONIG="${CWAProgramData}\common-config.toml" +$COMMON_CONFIG="${CWAProgramData}\common-config.toml" $ENV_CONFIG="${CWAProgramData}\env-config.json" $EC2 = $false @@ -301,7 +301,7 @@ Function CWAConfig() { if ($ConfigLocation -eq $AllConfig) { Remove-Item -Path "${JSON_DIR}\*" -Force -ErrorAction SilentlyContinue } else { - & $CWAProgramFiles\config-downloader.exe --output-dir "${JSON_DIR}" --download-source "${ConfigLocation}" --mode "${param_mode}" --config "${COMMON_CONIG}" --multi-config "${multi_config}" + & $CWAProgramFiles\config-downloader.exe --output-dir "${JSON_DIR}" --download-source "${ConfigLocation}" --mode "${param_mode}" --config "${COMMON_CONFIG}" --multi-config "${multi_config}" CheckCMDResult } @@ -313,7 +313,7 @@ Function CWAConfig() { Remove-Item "${OTEL_YAML}" -Force -ErrorAction SilentlyContinue } else { Write-Output "Start configuration validation..." - & cmd /c "`"$CWAProgramFiles\config-translator.exe`" --input ${JSON} --input-dir ${JSON_DIR} --output ${TOML} --mode ${param_mode} --config ${COMMON_CONIG} --multi-config ${multi_config} 2>&1" + & cmd /c "`"$CWAProgramFiles\config-translator.exe`" --input ${JSON} --input-dir ${JSON_DIR} --output ${TOML} --mode ${param_mode} --config ${COMMON_CONFIG} --multi-config ${multi_config} 2>&1" CheckCMDResult # Let command pass so we can check return code and give user-friendly error-message $ErrorActionPreference = "Continue" diff --git a/plugins/inputs/logfile/fileconfig.go b/plugins/inputs/logfile/fileconfig.go index 00aec4f7e6..984029891a 100644 --- a/plugins/inputs/logfile/fileconfig.go +++ b/plugins/inputs/logfile/fileconfig.go @@ -61,7 +61,7 @@ type FileConfig struct { //Indicate whether to tail the log file from the beginning or not. //The default value for this field should be set as true in configuration. - //Otherwise, it may skip some log entries for timestampFromLogLine suffix roatated new file. + //Otherwise, it may skip some log entries for timestampFromLogLine suffix rotated new file. FromBeginning bool `toml:"from_beginning"` //Indicate whether it is a named pipe. Pipe bool `toml:"pipe"` diff --git a/plugins/inputs/logfile/logfile.go b/plugins/inputs/logfile/logfile.go index f253262812..beab97c550 100644 --- a/plugins/inputs/logfile/logfile.go +++ b/plugins/inputs/logfile/logfile.go @@ -137,7 +137,7 @@ func (t *LogFile) Start(acc telegraf.Accumulator) error { func (t *LogFile) Stop() { // Tailer srcs are stopped by log agent after the output plugin is stopped instead of here - // because the tailersrc would like to record an accurate uploaded offset + // because the tailerSrc would like to record an accurate uploaded offset close(t.done) } @@ -358,7 +358,7 @@ func (t *LogFile) cleanupStateFolder() { } for _, file := range files { if info, err := os.Stat(file); err != nil || info.IsDir() { - t.Log.Debugf("File %v does not exist or is a dirctory: %v, %v", file, err, info) + t.Log.Debugf("File %v does not exist or is a directory: %v, %v", file, err, info) continue } @@ -368,7 +368,7 @@ func (t *LogFile) cleanupStateFolder() { byteArray, err := os.ReadFile(file) if err != nil { - t.Log.Errorf("Error happens when reading the content from file %s in clean up state fodler step: %v", file, err) + t.Log.Errorf("Error happens when reading the content from file %s in clean up state folder step: %v", file, err) continue } contentArray := strings.Split(string(byteArray), "\n") diff --git a/plugins/inputs/logfile/logfile_test.go b/plugins/inputs/logfile/logfile_test.go index 2c1a2e98dd..1dddb27f77 100644 --- a/plugins/inputs/logfile/logfile_test.go +++ b/plugins/inputs/logfile/logfile_test.go @@ -553,7 +553,7 @@ append line` } func TestLogsMultilineTimeout(t *testing.T) { - // multline line starter as [^/s] + // multiline line starter as [^/s] logEntryString1 := `multiline begin append line append line` diff --git a/plugins/inputs/logfile/tail/tail.go b/plugins/inputs/logfile/tail/tail.go index 826f5c6c4b..72620c1f40 100644 --- a/plugins/inputs/logfile/tail/tail.go +++ b/plugins/inputs/logfile/tail/tail.go @@ -52,7 +52,7 @@ type limiter interface { // Config is used to specify how a file must be tailed. type Config struct { - // File-specifc + // File-specific Location *SeekInfo // Seek to this location before tailing ReOpen bool // Reopen recreated files (tail -F) MustExist bool // Fail early if the file does not exist @@ -137,8 +137,8 @@ func TailFile(filename string, config Config) (*Tail, error) { // Return the file's current position, like stdio's ftell(). // But this value is not very accurate. -// it may readed one line in the chan(tail.Lines), -// so it may lost one line. +// it may read one line in the chan(tail.Lines), +// so it may lose one line. func (tail *Tail) Tell() (offset int64, err error) { if tail.file == nil { return diff --git a/plugins/inputs/logfile/tailersrc.go b/plugins/inputs/logfile/tailersrc.go index 67dae23a8f..fff36977c0 100644 --- a/plugins/inputs/logfile/tailersrc.go +++ b/plugins/inputs/logfile/tailersrc.go @@ -27,7 +27,7 @@ var ( ) type fileOffset struct { - seq, offset int64 // Seq handles file trucation, when file is trucated, we increase the offset seq + seq, offset int64 // Seq handles file truncation, when file is truncated, we increase the offset seq } func (fo *fileOffset) SetOffset(o int64) { diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index 3d8202fd67..554cc2fa25 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -35,7 +35,7 @@ func TestErrorBehaviorDefault(t *testing.T) { require.Error(t, plugin.Init()) } -func TestErorBehaviorIgnore(t *testing.T) { +func TestErrorBehaviorIgnore(t *testing.T) { // make sure we can't find nvidia-smi in $PATH somewhere os.Unsetenv("PATH") plugin := &NvidiaSMI{ diff --git a/plugins/inputs/prometheus/metrics_handler.go b/plugins/inputs/prometheus/metrics_handler.go index d515e015bf..43c74f5a64 100644 --- a/plugins/inputs/prometheus/metrics_handler.go +++ b/plugins/inputs/prometheus/metrics_handler.go @@ -67,7 +67,7 @@ func (mh *metricsHandler) handle(pmb PrometheusMetricBatch) { func (mh *metricsHandler) setEmfMetadata(mms []*metricMaterial) { for _, mm := range mms { if mh.clusterName != "" { - // Customer can specified the cluster name in the scraping job's relabel_config + // Customer can specify the cluster name in the scraping job's relabel_config // CWAgent won't overwrite in this case to support cross-cluster monitoring if _, ok := mm.tags[containerinsightscommon.ClusterNameKey]; !ok { mm.tags[containerinsightscommon.ClusterNameKey] = mh.clusterName @@ -76,17 +76,17 @@ func (mh *metricsHandler) setEmfMetadata(mms []*metricMaterial) { // Historically, for Prometheus pipelines, we use the "job" corresponding to the target in the prometheus config as the log stream name // https://github.com/aws/amazon-cloudwatch-agent/blob/59cfe656152e31ca27e7983fac4682d0c33d3316/plugins/inputs/prometheus_scraper/metrics_handler.go#L80-L84 - // As can be seen, if the "job" tag was available, the log_stream_name would be set to it and if it wasnt available for some reason, the log_stream_name would be set as "default". + // As can be seen, if the "job" tag was available, the log_stream_name would be set to it and if it wasn't available for some reason, the log_stream_name would be set as "default". // The old cloudwatchlogs exporter had logic to look for log_stream_name and if not found, it would use the log_stream_name defined in the config // https://github.com/aws/amazon-cloudwatch-agent/blob/60ca11244badf0cb3ae9dd9984c29f41d7a69302/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go#L175-L180 - // But as we see above, there should never be a case for Prometheus pipelines where log_stream_name wasnt being set in metrics_handler - so the log_stream_name in the config would have never been used. + // But as we see above, there should never be a case for Prometheus pipelines where log_stream_name wasn't being set in metrics_handler - so the log_stream_name in the config would have never been used. // Now that we have switched to awsemfexporter, we leverage the token replacement logic to dynamically set the log stream name // https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/897db04f747f0bda1707c916b1ec9f6c79a0c678/exporter/awsemfexporter/util.go#L29-L37 // Hence we always set the log stream name in the default exporter config as {JobName} during config translation. // If we have a "job" tag, we do NOT add a tag for "JobName" here since the fallback logic in awsemfexporter while doing pattern matching will fallback from "JobName" -> "job" and use that. - // Only when "job" tag isnt available, we set the "JobName" tag to default to retain same logic as before. - // We do it this way so we dont unnecessarily add an extra tag (that the awsemfexporter wont know to drop) for most cases where "job" will be defined. + // Only when "job" tag isn't available, we set the "JobName" tag to default to retain same logic as before. + // We do it this way so we don't unnecessarily add an extra tag (that the awsemfexporter won't know to drop) for most cases where "job" will be defined. if _, ok := mm.tags["job"]; !ok { mm.tags["JobName"] = "default" diff --git a/plugins/inputs/statsd/graphite/config.go b/plugins/inputs/statsd/graphite/config.go index 3068756c31..0929f28b89 100644 --- a/plugins/inputs/statsd/graphite/config.go +++ b/plugins/inputs/statsd/graphite/config.go @@ -10,7 +10,7 @@ import ( const ( // DefaultSeparator is the default join character to use when joining multiple - // measurment parts in a template. + // measurement parts in a template. DefaultSeparator = "." ) diff --git a/plugins/inputs/statsd/graphite/errors.go b/plugins/inputs/statsd/graphite/errors.go index f2b425b3f7..d76c0f7872 100644 --- a/plugins/inputs/statsd/graphite/errors.go +++ b/plugins/inputs/statsd/graphite/errors.go @@ -5,13 +5,13 @@ package graphite import "fmt" -// An UnsupposedValueError is returned when a parsed value is not -// supposed. -type UnsupposedValueError struct { +// An UnsupportedValueError is returned when a parsed value is not +// supported. +type UnsupportedValueError struct { Field string Value float64 } -func (err *UnsupposedValueError) Error() string { +func (err *UnsupportedValueError) Error() string { return fmt.Sprintf(`field "%s" value: "%v" is unsupported`, err.Field, err.Value) } diff --git a/plugins/inputs/statsd/graphite/parser.go b/plugins/inputs/statsd/graphite/parser.go index 393347f535..1039209800 100644 --- a/plugins/inputs/statsd/graphite/parser.go +++ b/plugins/inputs/statsd/graphite/parser.go @@ -183,7 +183,7 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) { } if math.IsNaN(v) || math.IsInf(v, 0) { - return nil, &UnsupposedValueError{Field: fields[0], Value: v} + return nil, &UnsupportedValueError{Field: fields[0], Value: v} } fieldValues := map[string]interface{}{} @@ -223,7 +223,7 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) { return metric.New(measurement, tags, fieldValues, timestamp), nil } -// ApplyTempleteForMetricName extracts the template fields from the given metric name line and +// ApplyTemplateForMetricName extracts the template fields from the given metric name line and // returns the measurement name and tags. func (p *GraphiteParser) ApplyTemplateForMetricName(metricNameLine string) (string, map[string]string, string, error) { // decode the name and tags @@ -252,7 +252,7 @@ func (p *GraphiteParser) ApplyTemplate(line string) (string, map[string]string, return p.ApplyTemplateForMetricName(fields[0]) } -// template represents a pattern and tags to map a graphite metric string to a influxdb Point +// template represents a pattern and tags to map a graphite metric string to an influxdb Point type template struct { tags []string defaultTags map[string]string @@ -398,7 +398,7 @@ func (n *node) insert(values []string, template *template) { return } - // See if the the current element already exists in the tree. If so, insert the + // See if the current element already exists in the tree. If so, insert the // into that sub-tree for _, v := range n.children { if v.value == values[0] { diff --git a/plugins/inputs/statsd/graphite/parser_test.go b/plugins/inputs/statsd/graphite/parser_test.go index 8b266a1a5a..0569e4f5ef 100644 --- a/plugins/inputs/statsd/graphite/parser_test.go +++ b/plugins/inputs/statsd/graphite/parser_test.go @@ -132,7 +132,7 @@ func TestTemplateApply(t *testing.T) { measurement, tags, _, _ := tmpl.Apply(test.input) if measurement != test.measurement { - t.Fatalf("name parse failer. expected %v, got %v", test.measurement, measurement) + t.Fatalf("name parse failure. expected %v, got %v", test.measurement, measurement) } if len(tags) != len(test.tags) { t.Fatalf("unexpected number of tags. expected %v, got %v", test.tags, tags) @@ -234,7 +234,7 @@ func TestParseLine(t *testing.T) { continue } if metric.Name() != test.measurement { - t.Fatalf("name parse failer. expected %v, got %v", + t.Fatalf("name parse failure. expected %v, got %v", test.measurement, metric.Name()) } if len(metric.Tags()) != len(test.tags) { @@ -335,7 +335,7 @@ func TestParse(t *testing.T) { continue } if metrics[0].Name() != test.measurement { - t.Fatalf("name parse failer. expected %v, got %v", + t.Fatalf("name parse failure. expected %v, got %v", test.measurement, metrics[0].Name()) } if len(metrics[0].Tags()) != len(test.tags) { @@ -361,7 +361,7 @@ func TestParseNaN(t *testing.T) { _, err = p.ParseLine("servers.localhost.cpu_load NaN 1435077219") assert.Error(t, err) - if _, ok := err.(*UnsupposedValueError); !ok { + if _, ok := err.(*UnsupportedValueError); !ok { t.Fatalf("expected *ErrUnsupportedValue, got %v", reflect.TypeOf(err)) } } diff --git a/plugins/inputs/win_perf_counters/README.md b/plugins/inputs/win_perf_counters/README.md index 87a945f3c8..caf8cb8b70 100644 --- a/plugins/inputs/win_perf_counters/README.md +++ b/plugins/inputs/win_perf_counters/README.md @@ -13,7 +13,7 @@ when it is querying for all (*) as this is redundant. The examples contained in this file have been found on the internet as counters used when performance monitoring - Active Directory and IIS in perticular. + Active Directory and IIS in particular. There are a lot other good objects to monitor, if you know what to look for. This file is likely to be updated in the future with more examples for useful configurations for separate scenarios. @@ -38,7 +38,7 @@ Example: #### PreVistaSupport -Bool, if set to `true` will use the localized PerfCounter interface that is present before Vista for backwards compatability. +Bool, if set to `true` will use the localized PerfCounter interface that is present before Vista for backwards compatibility. It is recommended NOT to use this on OSes starting with Vista and newer because it requires more configuration to use this than the newer interface present since Vista. @@ -107,7 +107,7 @@ Example: `Measurement = "win_disk" This key is optional, it is a simple bool. If it is not set to true or included it is treated as false. This key only has an effect if Instances is set to "*" -and you would also like all instances containg _Total returned, +and you would also like all instances containing _Total returned, like "_Total", "0,_Total" and so on where applicable (Processor Information is one example). @@ -291,7 +291,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ``` -### .NET Montioring +### .NET Monitoring ``` [[inputs.win_perf_counters.object]] # .NET CLR Exceptions, in this case for IIS only diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index cd3bacd73a..00c65505e9 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -242,8 +242,8 @@ func init() { // \\LogicalDisk(C:)\% Free Space // // To view all (internationalized...) counters on a system, there are three non-programmatic ways: perfmon utility, -// the typeperf command, and the the registry editor. perfmon.exe is perhaps the easiest way, because it's basically a -// full implemention of the pdh.dll API, except with a GUI and all that. The registry setting also provides an +// the typeperf command, and the registry editor. perfmon.exe is perhaps the easiest way, because it's basically a +// full implementation of the pdh.dll API, except with a GUI and all that. The registry setting also provides an // interface to the available counters, and can be found at the following key: // // HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\CurrentLanguage @@ -348,7 +348,7 @@ func PdhGetFormattedCounterValueDouble(hCounter PDH_HCOUNTER, lpdwType *uint32, // // okPath := "\\Process(*)\\% Processor Time" // notice the wildcard * character // -// // ommitted all necessary stuff ... +// // omitted all necessary stuff ... // // var bufSize uint32 // var bufCount uint32 diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 38e85cede4..512e6ec21d 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -312,7 +312,7 @@ func TestWinPerfcountersConfigGet7(t *testing.T) { require.NoError(t, err) // We made change to allow non-existent counter to be parsed so counters that initially not - // showing up at agent start up time would still possbily be picked up later + // showing up at agent start up time would still possibly be picked up later if len(metrics.items) != 3 { t.Errorf("expecting exactly 3 result from query but got: %v", metrics.items) } diff --git a/plugins/inputs/windows_event_log/wineventlog/wineventlog.go b/plugins/inputs/windows_event_log/wineventlog/wineventlog.go index f83ce82b5b..33cb8a96a8 100644 --- a/plugins/inputs/windows_event_log/wineventlog/wineventlog.go +++ b/plugins/inputs/windows_event_log/wineventlog/wineventlog.go @@ -361,7 +361,7 @@ func (w *windowsEventLog) getRecords(handles []EvtHandle) (records []*windowsEve return records } -// getRecord attemps to render and format the message for the given EvtHandle. +// getRecord attempts to render and format the message for the given EvtHandle. func (w *windowsEventLog) getRecord(evtHandle EvtHandle) (*windowsEventLogRecord, error) { // Notes on the process: // - We first call RenderEventXML to get the publisher details. This piece of information is then used diff --git a/plugins/outputs/cloudwatch/aggregator.go b/plugins/outputs/cloudwatch/aggregator.go index 43c0a253c7..6724076a42 100644 --- a/plugins/outputs/cloudwatch/aggregator.go +++ b/plugins/outputs/cloudwatch/aggregator.go @@ -57,7 +57,7 @@ func getAggregationKey(m *aggregationDatum, unixTime int64) string { tmp := make([]string, len(m.Dimensions)) for i, d := range m.Dimensions { if d.Name == nil || d.Value == nil { - log.Printf("E! dimentions key and/or val is nil") + log.Printf("E! dimensions key and/or val is nil") continue } tmp[i] = fmt.Sprintf("%s=%s", *d.Name, *d.Value) diff --git a/plugins/outputs/cloudwatch/aggregator_test.go b/plugins/outputs/cloudwatch/aggregator_test.go index c41af65677..5a8034be4f 100644 --- a/plugins/outputs/cloudwatch/aggregator_test.go +++ b/plugins/outputs/cloudwatch/aggregator_test.go @@ -131,7 +131,7 @@ func TestAggregator_ShutdownBehavior(t *testing.T) { assertNoMetricsInChan(t, metricChan) } -// TestDurationAggregator_aggregating verifies the metric's timetstamp is used to aggregate. +// TestDurationAggregator_aggregating verifies the metric's timestamp is used to aggregate. // If the same metric appears multiple times in a single aggregation interval then just expect 1 aggregated metric. // If the same metric appears multiple times in different aggregation intervals then expect multiple aggregated metrics. func TestDurationAggregator_aggregating(t *testing.T) { diff --git a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go index 7cd89fb4b1..73faafd4d4 100644 --- a/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go +++ b/plugins/outputs/cloudwatchlogs/cloudwatchlogs.go @@ -272,7 +272,7 @@ func (c *CloudWatchLogs) getLogEventFromMetric(metric telegraf.Metric) *structur jsonMap, err := json.Marshal(content) if err != nil { - c.Log.Errorf("Unalbe to marshal structured log content: %v", err) + c.Log.Errorf("Unable to marshal structured log content: %v", err) } message = string(jsonMap) } diff --git a/plugins/outputs/cloudwatchlogs/pusher.go b/plugins/outputs/cloudwatchlogs/pusher.go index b5f8ad7e97..f9bd137bab 100644 --- a/plugins/outputs/cloudwatchlogs/pusher.go +++ b/plugins/outputs/cloudwatchlogs/pusher.go @@ -48,7 +48,7 @@ type pusher struct { doneCallbacks []func() eventsCh chan logs.LogEvent nonBlockingEventsCh chan logs.LogEvent - bufferredSize int + bufferedSize int flushTimer *time.Timer sequenceToken *string lastValidTime int64 @@ -99,7 +99,7 @@ func (p *pusher) AddEventNonBlocking(e logs.LogEvent) { p.initNonBlockingChOnce.Do(func() { p.nonBlockingEventsCh = make(chan logs.LogEvent, reqEventsLimit*2) - p.startNonBlockCh <- struct{}{} // Unblock the select loop to recogonize the channel merge + p.startNonBlockCh <- struct{}{} // Unblock the select loop to recognize the channel merge }) // Drain the channel until new event can be added @@ -165,7 +165,7 @@ func (p *pusher) start() { } size := len(*ce.Message) + eventHeaderSize - if p.bufferredSize+size > reqSizeLimit || len(p.events) == reqEventsLimit { + if p.bufferedSize+size > reqSizeLimit || len(p.events) == reqEventsLimit { p.send() } @@ -175,7 +175,7 @@ func (p *pusher) start() { p.events = append(p.events, ce) p.doneCallbacks = append(p.doneCallbacks, e.Done) - p.bufferredSize += size + p.bufferedSize += size if p.minT == nil || p.minT.After(et) { p.minT = &et } @@ -207,7 +207,7 @@ func (p *pusher) reset() { p.doneCallbacks[i] = nil } p.doneCallbacks = p.doneCallbacks[:0] - p.bufferredSize = 0 + p.bufferedSize = 0 p.needSort = false p.minT = nil p.maxT = nil @@ -255,7 +255,7 @@ func (p *pusher) send() { } p.Log.Debugf("Pusher published %v log events to group: %v stream: %v with size %v KB in %v.", len(p.events), p.Group, p.Stream, p.bufferredSize/1024, time.Since(startTime)) - p.addStats("rawSize", float64(p.bufferredSize)) + p.addStats("rawSize", float64(p.bufferedSize)) p.reset() p.lastSentTime = time.Now() diff --git a/plugins/plugins.go b/plugins/plugins.go index 4bf1c8a918..0469870223 100644 --- a/plugins/plugins.go +++ b/plugins/plugins.go @@ -22,7 +22,7 @@ import ( // Enabled telegraf input plugins // NOTE: any plugins that are dependencies of the plugins enabled will be enabled too - // e.g.: cpu plguin from telegraf would enable the system plugin as its dependency + // e.g.: cpu plugin from telegraf would enable the system plugin as its dependency _ "github.com/influxdata/telegraf/plugins/inputs/cpu" _ "github.com/influxdata/telegraf/plugins/inputs/disk" _ "github.com/influxdata/telegraf/plugins/inputs/diskio" diff --git a/plugins/processors/awsapplicationsignals/internal/normalizer/attributesnormalizer.go b/plugins/processors/awsapplicationsignals/internal/normalizer/attributesnormalizer.go index 2e5cf826ed..e46896187d 100644 --- a/plugins/processors/awsapplicationsignals/internal/normalizer/attributesnormalizer.go +++ b/plugins/processors/awsapplicationsignals/internal/normalizer/attributesnormalizer.go @@ -119,7 +119,7 @@ func (n *attributesNormalizer) copyResourceAttributesToAttributes(attributes, re } attributes.PutStr(v, resourceAttrValue.AsString()) if k == semconv.AttributeK8SPodName { - // only copy "host.id" from resource attributes to "K8s.Node" in attributesif the pod name is set + // only copy "host.id" from resource attributes to "K8s.Node" in attributes if the pod name is set if host, ok := resourceAttributes.Get("host.id"); ok { attributes.PutStr("K8s.Node", host.AsString()) } diff --git a/plugins/processors/ec2tagger/ec2tagger_test.go b/plugins/processors/ec2tagger/ec2tagger_test.go index 0f3d3d84c8..4a276a05e1 100644 --- a/plugins/processors/ec2tagger/ec2tagger_test.go +++ b/plugins/processors/ec2tagger/ec2tagger_test.go @@ -193,7 +193,7 @@ func (m *mockVolumeCache) Devices() []string { // pm.ResourceMetrics().At(0).ScopeMetrics().Len() == 1 // pm.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().Len() == len(metrics) // -// and for each metric from metrics it create one single datapoint that appy all tags/attributes from metric +// and for each metric from metrics it create one single datapoint that apply all tags/attributes from metric func createTestMetrics(metrics []map[string]string) pmetric.Metrics { pm := pmetric.NewMetrics() rm := pm.ResourceMetrics().AppendEmpty() diff --git a/plugins/processors/ecsdecorator/cgroup.go b/plugins/processors/ecsdecorator/cgroup.go index 90ca090c0c..a076ca7851 100644 --- a/plugins/processors/ecsdecorator/cgroup.go +++ b/plugins/processors/ecsdecorator/cgroup.go @@ -148,7 +148,7 @@ func getCGroupMountPoint(mountConfigPath string) (string, error) { return "", fmt.Errorf("Found no fields post '-' in %q", text) } if postSeparatorFields[0] == "cgroup" { - // check that the mount is properly formated. + // check that the mount is properly formatted. if numPostFields < 3 { return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text) } diff --git a/plugins/processors/ecsdecorator/ecsinfo.go b/plugins/processors/ecsdecorator/ecsinfo.go index bee0a15d54..26420685a6 100644 --- a/plugins/processors/ecsdecorator/ecsinfo.go +++ b/plugins/processors/ecsdecorator/ecsinfo.go @@ -146,10 +146,10 @@ func (e *ecsInfo) getContainerInstanceIdFromArn(arn string) (containerInstanceId // When splitting the ARN with ":", the 6th segments could be either: // container-instance/47c0ab6e-2c2c-475e-9c30-b878fa7a8c3d or // container-instance/cluster-name/47c0ab6e-2c2c-475e-9c30-b878fa7a8c3d - if splitedList := strings.Split(arn, ":"); len(splitedList) >= 6 { - // Further splitting tmpResult with "/", it could be splitted into either 2 or 3 + if splitList := strings.Split(arn, ":"); len(splitList) >= 6 { + // Further splitting tmpResult with "/", it could be split into either 2 or 3 // Characters of "cluster-name" is only allowed to be letters, numbers and hyphens - tmpResult := strings.Split(splitedList[5], "/") + tmpResult := strings.Split(splitList[5], "/") if len(tmpResult) == 2 { containerInstanceId = tmpResult[1] return diff --git a/receiver/adapter/accumulator/metrics_test.go b/receiver/adapter/accumulator/metrics_test.go index 97d4f8689f..28fb3cb1e1 100644 --- a/receiver/adapter/accumulator/metrics_test.go +++ b/receiver/adapter/accumulator/metrics_test.go @@ -174,7 +174,7 @@ func Test_ConvertToOtelMetrics_WithDifferentTypes(t *testing.T) { } } -func Test_ConvertTelegrafToOtelMetrics_WithUnsupportTyped(t *testing.T) { +func Test_ConvertTelegrafToOtelMetrics_WithUnsupportedTyped(t *testing.T) { t.Helper() as := assert.New(t) diff --git a/receiver/adapter/plugins_linux_test.go b/receiver/adapter/plugins_linux_test.go index c102b32ddd..f57a750708 100644 --- a/receiver/adapter/plugins_linux_test.go +++ b/receiver/adapter/plugins_linux_test.go @@ -154,7 +154,7 @@ func Test_DiskIOPlugin(t *testing.T) { }) } -// Failing in Github Action; however, not local. Therefore, comment it for avoid causing disruptness and +// Failing in GitHub Action; however, not local. Therefore, comment it for avoid causing disruption and // the test only serves as sanity. /* func Test_StatsdPlugin(t *testing.T) { diff --git a/tool/clean/clean_ami/clean_ami.go b/tool/clean/clean_ami/clean_ami.go index 82ea91068a..73f0da9998 100644 --- a/tool/clean/clean_ami/clean_ami.go +++ b/tool/clean/clean_ami/clean_ami.go @@ -58,7 +58,7 @@ func cleanAMI() error { errList = append(errList, err) continue } - log.Printf("image name %v image id %v experation date %v creation date parsed %v image creation date raw %v", + log.Printf("image name %v image id %v expiration date %v creation date parsed %v image creation date raw %v", *image.Name, *image.ImageId, creationDate, expirationDate, *image.CreationDate) if expirationDate.After(creationDate) { log.Printf("Try to delete ami %s tags %v launch-date %s", *image.Name, image.Tags, *image.CreationDate) diff --git a/tool/clean/clean_dedicated_host/clean_dedicated_host.go b/tool/clean/clean_dedicated_host/clean_dedicated_host.go index b4406554e6..a721891122 100644 --- a/tool/clean/clean_dedicated_host/clean_dedicated_host.go +++ b/tool/clean/clean_dedicated_host/clean_dedicated_host.go @@ -48,7 +48,7 @@ func cleanDedicatedHost() error { dedicatedHostIds := make([]string, 0) for _, dedicatedHost := range dedicatedHosts { - log.Printf("dedicated host id %v experation date %v dedicated host creation date raw %v host state %v", + log.Printf("dedicated host id %v expiration date %v dedicated host creation date raw %v host state %v", *dedicatedHost.HostId, expirationDateDedicatedHost, *dedicatedHost.AllocationTime, dedicatedHost.State) if expirationDateDedicatedHost.After(*dedicatedHost.AllocationTime) && dedicatedHost.State == types.AllocationStateAvailable { log.Printf("Try to delete dedicated host %s tags %v launch-date %s", *dedicatedHost.HostId, dedicatedHost.Tags, *dedicatedHost.AllocationTime) diff --git a/tool/clean/clean_ebs/clean_ebs.go b/tool/clean/clean_ebs/clean_ebs.go index 195edb0983..066d1ab60c 100644 --- a/tool/clean/clean_ebs/clean_ebs.go +++ b/tool/clean/clean_ebs/clean_ebs.go @@ -42,7 +42,7 @@ func deleteUnusedVolumes(ctx context.Context, client *ec2.Client) error { input := &ec2.DescribeVolumesInput{ Filters: []types.Filter{ { - //if the status is availble, then EBS volume is not currently attached to any ec2 instance (so not being used) + //if the status is available, then EBS volume is not currently attached to any ec2 instance (so not being used) Name: aws.String("status"), Values: []string{"available"}, }, diff --git a/tool/clean/clean_ecs/clean_ecs.go b/tool/clean/clean_ecs/clean_ecs.go index 2e7b6b74b8..2cd443cfa5 100644 --- a/tool/clean/clean_ecs/clean_ecs.go +++ b/tool/clean/clean_ecs/clean_ecs.go @@ -121,7 +121,7 @@ func terminateClusters(ctx context.Context, client *ecs.Client) { deleteServiceInput := ecs.DeleteServiceInput{Cluster: clusterId, Service: aws.String(service)} _, err := client.DeleteService(ctx, &deleteServiceInput) if err != nil { - log.Printf("Error %v deleteing service %s cluster %s", err, serviceInput, *clusterId) + log.Printf("Error %v deleting service %s cluster %s", err, serviceInput, *clusterId) continue } } diff --git a/tool/data/config/traces.go b/tool/data/config/traces.go index ba05f5eacf..7618594ac8 100644 --- a/tool/data/config/traces.go +++ b/tool/data/config/traces.go @@ -22,7 +22,7 @@ type Traces struct { BufferSizeMB int `json:"buffer_size_mb"` ResourceArn string `json:"resource_arn,omitempty"` LocalMode bool `json:"local_mode,omitempty"` //local - Insecure bool `json:"insecure, omitempty"` //noverifyssl + Insecure bool `json:"insecure,omitempty"` //noverifyssl Credentials *struct { RoleArn string `json:"role_arn,omitempty"` } `json:"credentials,omitempty"` diff --git a/tool/processors/tracesconfig/tracesconfig_test.go b/tool/processors/tracesconfig/tracesconfig_test.go index 2d0ec93edb..321de943e5 100644 --- a/tool/processors/tracesconfig/tracesconfig_test.go +++ b/tool/processors/tracesconfig/tracesconfig_test.go @@ -109,7 +109,7 @@ func TestGenerateTracesConfiguration(t *testing.T) { assert.JSONEq(t, string(cmdlineConfigFile), string(jsonFile)) ctx = &runtime.Context{TracesOnly: true} - //Xray run as a servie + //Xray run as a service xraydaemonmigration.GetProcesses = mockProcessesXrayService inputChan = testutil.SetUpTestInputStream() testutil.Type(inputChan, "2", "2000", "2000", "", "", "") @@ -137,7 +137,7 @@ func TestGenerateTracesConfiguration(t *testing.T) { jsonFile, err = json.Marshal(*jsonStruct) assert.JSONEq(t, string(expectedDefaultConfigFile), string(jsonFile)) - //multiple proccess chose cmdline with no args + //multiple process chose cmdline with no args xraydaemonmigration.GetProcesses = mockProcesses testutil.Type(inputChan, "3") jsonStruct, err = generateTracesConfiguration(ctx) diff --git a/tool/xraydaemonmigration/migrate_test.go b/tool/xraydaemonmigration/migrate_test.go index a5cc21403e..b509114741 100644 --- a/tool/xraydaemonmigration/migrate_test.go +++ b/tool/xraydaemonmigration/migrate_test.go @@ -135,7 +135,7 @@ func TestCovertYamlToJson(t *testing.T) { jsonStruct, err := ConvertYamlToJson(yamlFile, duplicateDaemonProcess) assert.NotNil(t, err) - exptectedFilePath := filepath.Join("testdata", "actualConfig.json") + expectedFilePath := filepath.Join("testdata", "actualConfig.json") yamlFile, err = os.ReadFile(configFilePath) assert.NoError(t, err) var correctDaemonProcess = &proc{ @@ -147,10 +147,10 @@ func TestCovertYamlToJson(t *testing.T) { jsonStruct, err = ConvertYamlToJson(yamlFile, correctDaemonProcess) assert.NoError(t, err) - exptectedFile, err := os.ReadFile(exptectedFilePath) + expectedFile, err := os.ReadFile(expectedFilePath) assert.NoError(t, err) jsonFile, err := json.MarshalIndent(jsonStruct, "", "\t") assert.NoError(t, err) - assert.JSONEq(t, string(exptectedFile), string(jsonFile)) + assert.JSONEq(t, string(expectedFile), string(jsonFile)) } diff --git a/translator/config/sampleSchema/invalidMetricsWithAdditionalProperties.json b/translator/config/sampleSchema/invalidMetricsWithAdditionalProperties.json index 9811026f27..a998c30d8d 100644 --- a/translator/config/sampleSchema/invalidMetricsWithAdditionalProperties.json +++ b/translator/config/sampleSchema/invalidMetricsWithAdditionalProperties.json @@ -23,7 +23,7 @@ "metric2" ], "resources": [ - "customizedInstaces" + "customizedInstances" ], "measurement": [ "customizedCounter1" diff --git a/translator/config/sampleSchema/validWindowsMetrics.json b/translator/config/sampleSchema/validWindowsMetrics.json index 2c0794c350..0bc4b14552 100644 --- a/translator/config/sampleSchema/validWindowsMetrics.json +++ b/translator/config/sampleSchema/validWindowsMetrics.json @@ -34,7 +34,7 @@ "metric2" ], "resources": [ - "customizedInstaces" + "customizedInstances" ], "measurement": [ "customizedCounter1" diff --git a/translator/jsonconfig/mergeJsonConfig.go b/translator/jsonconfig/mergeJsonConfig.go index 2ac0cb9ab1..3015640a6e 100644 --- a/translator/jsonconfig/mergeJsonConfig.go +++ b/translator/jsonconfig/mergeJsonConfig.go @@ -20,7 +20,7 @@ import ( func MergeJsonConfigMaps(jsonConfigMapMap map[string]map[string]interface{}, defaultJsonConfigMap map[string]interface{}, multiConfig string) (map[string]interface{}, error) { if len(jsonConfigMapMap) == 0 { if os.Getenv(config.USE_DEFAULT_CONFIG) == config.USE_DEFAULT_CONFIG_TRUE { - // When USE_DEFAULT_CONFIG is true, ECS and EKS will be supposed to use different default config. EKS default config logic will be added when necessary + // When USE_DEFAULT_CONFIG is true, ECS and EKS are supposed to use different default config. EKS default config logic will be added when necessary if ecsutil.GetECSUtilSingleton().IsECS() { log.Println("No json config files found, use the default ecs config") return util.GetJsonMapFromJsonBytes([]byte(config.DefaultECSJsonConfig())) diff --git a/translator/jsonconfig/sampleJsonConfig/test_2/expected_output.json b/translator/jsonconfig/sampleJsonConfig/test_2/expected_output.json index 85ad588d21..21bf9d9191 100644 --- a/translator/jsonconfig/sampleJsonConfig/test_2/expected_output.json +++ b/translator/jsonconfig/sampleJsonConfig/test_2/expected_output.json @@ -165,7 +165,7 @@ ], "metrics_collection_interval": 60, "resources": [ - "customizedInstaces" + "customizedInstances" ] }, "statsd": { diff --git a/translator/jsonconfig/sampleJsonConfig/test_2/input_2.json b/translator/jsonconfig/sampleJsonConfig/test_2/input_2.json index f8590d954b..3d59bf4d1b 100644 --- a/translator/jsonconfig/sampleJsonConfig/test_2/input_2.json +++ b/translator/jsonconfig/sampleJsonConfig/test_2/input_2.json @@ -34,7 +34,7 @@ "metric2" ], "resources": [ - "customizedInstaces" + "customizedInstances" ], "measurement": [ "customizedCounter1" diff --git a/translator/jsonconfig/sampleJsonConfig/test_4/expected_output.json b/translator/jsonconfig/sampleJsonConfig/test_4/expected_output.json index 7984d1a5ad..b44731fb71 100644 --- a/translator/jsonconfig/sampleJsonConfig/test_4/expected_output.json +++ b/translator/jsonconfig/sampleJsonConfig/test_4/expected_output.json @@ -177,7 +177,7 @@ ], "metrics_collection_interval": 60, "resources": [ - "customizedInstaces" + "customizedInstances" ] }, "statsd": { diff --git a/translator/jsonconfig/sampleJsonConfig/test_4/input_3.json b/translator/jsonconfig/sampleJsonConfig/test_4/input_3.json index 9b9301544a..d0bde26dae 100644 --- a/translator/jsonconfig/sampleJsonConfig/test_4/input_3.json +++ b/translator/jsonconfig/sampleJsonConfig/test_4/input_3.json @@ -38,7 +38,7 @@ ], "metrics_collection_interval": 60, "resources": [ - "customizedInstaces" + "customizedInstances" ] }, "statsd": { diff --git a/translator/processNoRuleToApply.go b/translator/processNoRuleToApply.go index 61443d0a88..418f6b256c 100644 --- a/translator/processNoRuleToApply.go +++ b/translator/processNoRuleToApply.go @@ -7,7 +7,7 @@ import ( "reflect" ) -// ProcessNoRuleToApply check if the the specification configuration provides some configs that don't need translation with rules. +// ProcessNoRuleToApply check if the specification configuration provides some configs that don't need translation with rules. // In this case, the translation of this config entry should be 1:1 map // In json the default configuration should be like "cpu":{"interval":"10s"} func ProcessNoRuleToApply(input interface{}, childRule map[string]Rule, result map[string]interface{}) map[string]interface{} { diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.conf b/translator/tocwconfig/sampleConfig/complete_windows_config.conf index 9d06e7e62b..462be2a44d 100644 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.conf +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.conf @@ -108,7 +108,7 @@ [[inputs.win_perf_counters.object]] Counters = ["customizedCounter1"] - Instances = ["customizedInstaces"] + Instances = ["customizedInstances"] Measurement = "customizedObjectName" ObjectName = "customizedObjectName" WarnOnMissing = true diff --git a/translator/tocwconfig/sampleConfig/complete_windows_config.json b/translator/tocwconfig/sampleConfig/complete_windows_config.json index f528ca3428..16ae54ff09 100755 --- a/translator/tocwconfig/sampleConfig/complete_windows_config.json +++ b/translator/tocwconfig/sampleConfig/complete_windows_config.json @@ -64,7 +64,7 @@ "metric2" ], "resources": [ - "customizedInstaces" + "customizedInstances" ], "measurement": [ "customizedCounter1" diff --git a/translator/tocwconfig/tocwconfig_test.go b/translator/tocwconfig/tocwconfig_test.go index ab59deafd8..50834151a3 100644 --- a/translator/tocwconfig/tocwconfig_test.go +++ b/translator/tocwconfig/tocwconfig_test.go @@ -44,7 +44,7 @@ import ( const ( prometheusFileNameToken = "prometheusFileName" - ecsSdFileNamToken = "ecsSdFileName" + ecsSdFileNameToken = "ecsSdFileName" ) //go:embed sampleConfig/prometheus_config.yaml @@ -275,10 +275,10 @@ func TestPrometheusConfig(t *testing.T) { expectedEnvVars := map[string]string{} tokenReplacements := map[string]string{ prometheusFileNameToken: strings.ReplaceAll(prometheusConfigFileName, "\\", "\\\\"), - ecsSdFileNamToken: strings.ReplaceAll(ecsSdFileName, "\\", "\\\\"), + ecsSdFileNameToken: strings.ReplaceAll(ecsSdFileName, "\\", "\\\\"), } // Load prometheus config and replace ecs sd results file name token with temp file name - prometheusConfig = strings.ReplaceAll(prometheusConfig, "{"+ecsSdFileNamToken+"}", ecsSdFileName) + prometheusConfig = strings.ReplaceAll(prometheusConfig, "{"+ecsSdFileNameToken+"}", ecsSdFileName) // Write the modified prometheus config to temp prometheus config file err := os.WriteFile(prometheusConfigFileName, []byte(prometheusConfig), os.ModePerm) require.NoError(t, err) diff --git a/translator/translate/agent/ruleGlobalCredentials.go b/translator/translate/agent/ruleGlobalCredentials.go index b87396809c..832650999b 100644 --- a/translator/translate/agent/ruleGlobalCredentials.go +++ b/translator/translate/agent/ruleGlobalCredentials.go @@ -20,7 +20,7 @@ var credsTargetList = []string{Role_Arn_Key} func (c *GlobalCreds) ApplyRule(input interface{}) (returnKey string, returnVal interface{}) { result := map[string]interface{}{} - // Read fromm Json first. + // Read from Json first. if val, ok := input.(map[string]interface{})[CredentialsSectionKey]; ok { util.SetWithSameKeyIfFound(val, credsTargetList, result) } diff --git a/translator/translate/logs/logs_collected/files/collect_list/ruleTimestampFormat.go b/translator/translate/logs/logs_collected/files/collect_list/ruleTimestampFormat.go index 34690e88c9..9ba269f835 100644 --- a/translator/translate/logs/logs_collected/files/collect_list/ruleTimestampFormat.go +++ b/translator/translate/logs/logs_collected/files/collect_list/ruleTimestampFormat.go @@ -17,8 +17,8 @@ The reference time used in the layouts in Golang is the specific time. For example : "Mon Jan 2 15:04:05 MST 2006" So the TimeFormatMap records time_format code and its corresponding Golang specific reference time. -And the TimeFormatRexMap records time_format code and its corresponding regax expression. -When process the user's input, the translator will translate the timestamp_format into the Golang reference time layout and the regax expression based on those two maps. +And the TimeFormatRexMap records time_format code and its corresponding regex expression. +When process the user's input, the translator will translate the timestamp_format into the Golang reference time layout and the regex expression based on those two maps. Based on https://golang.org/src/time/format.go and http://strftime.org/, here is the mapping below: stdLongMonth // "January" //%B diff --git a/translator/translate/logs/ruleLogCredentials.go b/translator/translate/logs/ruleLogCredentials.go index 701ba1af78..96231e003b 100644 --- a/translator/translate/logs/ruleLogCredentials.go +++ b/translator/translate/logs/ruleLogCredentials.go @@ -25,7 +25,7 @@ func (c *LogCreds) ApplyRule(input interface{}) (returnKey string, returnVal int result[Role_Arn_Key] = agent.Global_Config.Role_arn } - // Read fromm Json first. + // Read from Json first. if val, ok := input.(map[string]interface{})[CredentialsSectionKey]; ok { util.SetWithSameKeyIfFound(val, credsTargetList, result) } diff --git a/translator/translate/metrics/ruleMetricCredentials.go b/translator/translate/metrics/ruleMetricCredentials.go index 9f7878d217..da9b0c1f30 100644 --- a/translator/translate/metrics/ruleMetricCredentials.go +++ b/translator/translate/metrics/ruleMetricCredentials.go @@ -25,7 +25,7 @@ func (c *MetricsCreds) ApplyRule(input interface{}) (returnKey string, returnVal result[Role_Arn_Key] = agent.Global_Config.Role_arn } - // Read fromm Json first. + // Read from Json first. if val, ok := input.(map[string]interface{})[CredentialsSectionKey]; ok { util.SetWithSameKeyIfFound(val, credsTargetList, result) } diff --git a/translator/translate/metrics/util/subminuteutil.go b/translator/translate/metrics/util/subminuteutil.go index 4f8b36138b..9b4936cc2b 100644 --- a/translator/translate/metrics/util/subminuteutil.go +++ b/translator/translate/metrics/util/subminuteutil.go @@ -5,11 +5,11 @@ package util import "time" -const Metric_High_Resolution_Threhold = 60 * time.Second +const Metric_High_Resolution_Threshold = 60 * time.Second func IsHighResolution(intervalVal string) bool { if actualInterval, err := time.ParseDuration(intervalVal); err == nil { - if actualInterval < Metric_High_Resolution_Threhold { + if actualInterval < Metric_High_Resolution_Threshold { return true } } diff --git a/translator/translate/otel/exporter/awsemf/prometheus.go b/translator/translate/otel/exporter/awsemf/prometheus.go index 0e72c337f5..8307fcc109 100644 --- a/translator/translate/otel/exporter/awsemf/prometheus.go +++ b/translator/translate/otel/exporter/awsemf/prometheus.go @@ -19,7 +19,7 @@ import ( const ( metricUnit = "metric_unit" metricNamespace = "metric_namespace" - metricDeclartion = "metric_declaration" + metricDeclaration = "metric_declaration" ecsDefaultCloudWatchNamespace = "ECS/ContainerInsights/Prometheus" k8sDefaultCloudWatchNamespace = "ContainerInsights/Prometheus" ec2DefaultCloudWatchNamespace = "CWAgent/Prometheus" @@ -97,7 +97,7 @@ func setPrometheusMetricDescriptors(conf *confmap.Conf, cfg *awsemfexporter.Conf } func setPrometheusMetricDeclarations(conf *confmap.Conf, cfg *awsemfexporter.Config) error { - metricDeclarationKey := common.ConfigKey(emfProcessorBasePathKey, metricDeclartion) + metricDeclarationKey := common.ConfigKey(emfProcessorBasePathKey, metricDeclaration) if !conf.IsSet(metricDeclarationKey) { return nil } diff --git a/translator/translate/otel/receiver/adapter/translator.go b/translator/translate/otel/receiver/adapter/translator.go index d180ef6190..8c05e11bb1 100644 --- a/translator/translate/otel/receiver/adapter/translator.go +++ b/translator/translate/otel/receiver/adapter/translator.go @@ -24,12 +24,12 @@ type translator struct { // See otel.ConfigKey. cfgKey string - // preferMetricCollectionInterval is an option to using the preferaable metric collection interval before + // preferMetricCollectionInterval is an option to using the preferable metric collection interval before // using the interval key chain and defaultMetricCollectionInterval preferMetricCollectionInterval time.Duration // defaultMetricCollectionInterval is the fallback interval if it - // it is not present in the interval keychain. + // is not present in the interval keychain. defaultMetricCollectionInterval time.Duration } @@ -72,7 +72,7 @@ func (t *translator) Translate(conf *confmap.Conf) (component.Config, error) { } cfg.AliasName = t.name - // The fall back interval is 0 when there is no plugin's collection interval or the plugin's collection interval cannot be scraped. + // The fallback interval is 0 when there is no plugin's collection interval or the plugin's collection interval cannot be scraped. // Therefore, using 0 as a gate for procstat plugin if t.preferMetricCollectionInterval != time.Duration(0) { cfg.CollectionInterval = t.preferMetricCollectionInterval diff --git a/translator/translate/otel/receiver/adapter/translators.go b/translator/translate/otel/receiver/adapter/translators.go index 0b9484e7c0..cd81667a81 100644 --- a/translator/translate/otel/receiver/adapter/translators.go +++ b/translator/translate/otel/receiver/adapter/translators.go @@ -208,7 +208,7 @@ func fromMultipleInput(conf *confmap.Conf, inputName, os string) common.Translat */ for _, procStatKey := range common.GetArray[any](conf, cfgKey) { // Each of the procstat monitored process has their own process; therefore, overriding the interval key chain - // and setting dirrectly + // and setting directly psKey := procStatKey.(map[string]interface{}) psCollectionInterval, _ := common.ParseDuration(psKey[common.MetricsCollectionIntervalKey]) diff --git a/translator/translate/otel/receiver/awscontainerinsight/translator.go b/translator/translate/otel/receiver/awscontainerinsight/translator.go index 193fdbe87b..8a4bdcaa20 100644 --- a/translator/translate/otel/receiver/awscontainerinsight/translator.go +++ b/translator/translate/otel/receiver/awscontainerinsight/translator.go @@ -31,7 +31,7 @@ const ( eks = "eks" defaultMetricsCollectionInterval = time.Minute - defaultLeaderLockName = "cwagent-clusterleader" // To maintain backwards compatability with https://github.com/aws/amazon-cloudwatch-agent/blob/2dd89abaab4590cffbbc31ef89319b62809b09d1/plugins/inputs/k8sapiserver/k8sapiserver.go#L30 + defaultLeaderLockName = "cwagent-clusterleader" // To maintain backwards compatibility with https://github.com/aws/amazon-cloudwatch-agent/blob/2dd89abaab4590cffbbc31ef89319b62809b09d1/plugins/inputs/k8sapiserver/k8sapiserver.go#L30 ) type translator struct { diff --git a/translator/util/ecsutil/ecsutil.go b/translator/util/ecsutil/ecsutil.go index 04743f50cf..25e481f587 100644 --- a/translator/util/ecsutil/ecsutil.go +++ b/translator/util/ecsutil/ecsutil.go @@ -100,21 +100,21 @@ func (e *ecsUtil) getMetadataResponse(endpoint string) (em *ecsMetadataResponse, // arn:aws:ecs:region:aws_account_id:task/cluster-name/task-id // This function will return region extracted from Task ARN func (e *ecsUtil) parseRegion(em *ecsMetadataResponse) { - splitedContent := strings.Split(em.TaskARN, ":") + splitContent := strings.Split(em.TaskARN, ":") // When splitting the ARN with ":", the 4th segment is the region - if len(splitedContent) < 4 { + if len(splitContent) < 4 { log.Printf("E! Invalid ecs task arn: %s", em.TaskARN) } - e.Region = splitedContent[3] + e.Region = splitContent[3] } // There is only one format for ClusterArn (https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Cluster.html) // arn:aws:ecs:region:aws_account_id:cluster/cluster-name func (e *ecsUtil) parseClusterName(em *ecsMetadataResponse) { - splitedContent := strings.Split(em.Cluster, "/") + splitContent := strings.Split(em.Cluster, "/") // When splitting the ClusterName with /, the last is always the cluster name - if len(splitedContent) == 0 { + if len(splitContent) == 0 { log.Printf("E! Invalid cluster arn: %s", em.Cluster) } - e.Cluster = splitedContent[len(splitedContent)-1] + e.Cluster = splitContent[len(splitContent)-1] }