From 0708e19a22f230557b769b012527379716af1e25 Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Wed, 19 Feb 2025 09:34:47 +0100 Subject: [PATCH 01/41] [x-pack/winlogbeat] Port pipeline changes from integrations (#42619) * Port pipeline changes from integrations * Sync windows pipelines * add changelog entry --- CHANGELOG.next.asciidoc | 1 + .../module/powershell/ingest/powershell.yml | 170 +- .../ingest/powershell_operational.yml | 176 +- .../module/security/ingest/security.yml | 2219 +++++++---------- .../module/sysmon/ingest/sysmon.yml | 256 +- 5 files changed, 1402 insertions(+), 1420 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index bdf657982986..134ba65ff8fc 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -277,6 +277,7 @@ otherwise no tag is added. {issue}42208[42208] {pull}42403[42403] *Winlogbeat* - Fix message handling in the experimental api. {issue}19338[19338] {pull}41730[41730] +- Sync missing changes in modules pipelines. {pull}42619[42619] *Elastic Logging Plugin* diff --git a/x-pack/winlogbeat/module/powershell/ingest/powershell.yml b/x-pack/winlogbeat/module/powershell/ingest/powershell.yml index 34f537bce36a..d14a9e25aa32 100644 --- a/x-pack/winlogbeat/module/powershell/ingest/powershell.yml +++ b/x-pack/winlogbeat/module/powershell/ingest/powershell.yml @@ -9,35 +9,65 @@ processors: trim_key: "\n\t" trim_value: "\n\t" value_split: "=" - if: ctx?.winlog?.event_id == "800" - - kv: - description: Split Events 4xx and 600 event data fields. - field: winlog.event_data.param3 - target_field: winlog.event_data - field_split: "\n\t" - trim_key: "\n\t" - trim_value: "\n\t" - value_split: "=" - if: ctx?.winlog?.event_id != "800" + if: ctx.winlog?.event_id == "800" + - script: + description: |- + Split Events 4xx and 600 event data fields. + Some events can contain multiline values containing also '\n', '\s', and '=' characters, + for this reason a simple KV processor is not reliable enough and we need a more specific parsing. + lang: painless + if: ctx.winlog?.event_id != "800" && ctx.winlog?.event_data?.param3 != null + params: + field: param3 + source: |- + def p = ctx.winlog?.event_data[params["field"]]; + // Define the pattern that will match all keys + def pat = /(^|(^[\n]?))?\t([^\s\W]+)=/m; + def m = pat.matcher(p); + + // we position ourselves in the first matching key + m.find(); + def key = m.group(3).trim(); + def previousEnd = m.end(); + + // while new keys are found, we add everything between one key and the next + // as the value, regardless of its contents + while(m.find()) + { + ctx.winlog.event_data[key] = p.substring(previousEnd, m.start()).trim(); + previousEnd = m.end(); + key = m.group(3).trim(); + } + + // add remaining value + ctx.winlog.event_data[key] = p.substring(previousEnd).trim(); ## ECS and Event fields. - set: field: ecs.version - value: '1.12.0' + value: '8.0.0' - set: field: log.level copy_from: winlog.level ignore_empty_value: true ignore_failure: true - if: ctx?.winlog?.level != "" + if: ctx.winlog?.level != "" - date: field: winlog.time_created + tag: "time_created_date" formats: - ISO8601 - ignore_failure: true - if: ctx?.winlog?.time_created != null - + if: ctx.winlog?.time_created != null + on_failure: + - remove: + field: winlog.time_created + ignore_failure: true + - append: + field: error.message + value: "fail-{{{ _ingest.on_failure_processor_tag }}}" + - fail: + message: "Processor {{ _ingest.on_failure_processor_type }} with tag {{ _ingest.on_failure_processor_tag }} in pipeline {{ _ingest.on_failure_pipeline }} failed with message: {{ _ingest.on_failure_message }}" - set: field: event.module value: powershell @@ -49,19 +79,19 @@ processors: value: '{{winlog.event_id}}' - set: field: event.category - value: process + value: ["process"] - set: field: event.type - value: start - if: ctx?.event.code == "400" + value: ["start"] + if: ctx.event.code == "400" - set: field: event.type - value: end - if: ctx?.event.code == "403" + value: ["end"] + if: ctx.event.code == "403" - set: field: event.type - value: info - if: ctx?.event?.type == null + value: ["info"] + if: ctx.event?.type == null - convert: field: winlog.event_data.SequenceNumber target_field: event.sequence @@ -81,19 +111,19 @@ processors: target_field: process.entity_id ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.HostId != "" + if: ctx.winlog?.event_data?.HostId != "" - rename: field: winlog.event_data.HostApplication target_field: process.command_line ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.HostApplication != "" + if: ctx.winlog?.event_data?.HostApplication != "" - rename: field: winlog.event_data.HostName target_field: process.title ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.HostName != "" + if: ctx.winlog?.event_data?.HostName != "" ## User fields. @@ -101,25 +131,48 @@ processors: field: winlog.event_data.UserId target_field: "_temp.user_parts" separator: '\\' - if: ctx?.winlog?.event_data?.UserId != null + if: ctx.winlog?.event_data?.UserId != null - set: field: user.domain value: "{{_temp.user_parts.0}}" ignore_failure: true ignore_empty_value: true - if: ctx?._temp?.user_parts != null && ctx._temp.user_parts.size() == 2 + if: ctx._temp?.user_parts != null && ctx._temp.user_parts.size() == 2 - set: field: user.name value: "{{_temp.user_parts.1}}" ignore_failure: true ignore_empty_value: true - if: ctx?._temp?.user_parts != null && ctx._temp.user_parts.size() == 2 + if: ctx._temp?.user_parts != null && ctx._temp.user_parts.size() == 2 - append: field: related.user value: "{{user.name}}" ignore_failure: true allow_duplicates: false - if: ctx?.user?.name != null + if: ctx.user?.name != null + # Get user details from the translate_sid processor enrichment + # if they are available and we don't already have them. + - rename: + field: winlog.event_data._MemberUserName + target_field: user.name + ignore_failure: true + ignore_missing: true + - rename: + field: winlog.event_data._MemberDomain + target_field: user.domain + ignore_failure: true + ignore_missing: true + - append: + value: '{{{winlog.event_data._MemberAccountType}}}' + field: user.roles + ignore_failure: true + allow_duplicates: false + if: ctx.winlog?.event_data?._MemberAccountType != null + - remove: + field: winlog.event_data._MemberAccountType + ignore_missing: true + ignore_failure: true + if: ctx.user?.roles != null && ctx.winlog?.event_data?._MemberAccountType != null && ctx.user.roles.contains(ctx.winlog.event_data._MemberAccountType) ## PowerShell fields. @@ -128,87 +181,87 @@ processors: target_field: powershell.engine.new_state ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.NewEngineState != "" + if: ctx.winlog?.event_data?.NewEngineState != "" - rename: field: winlog.event_data.PreviousEngineState target_field: powershell.engine.previous_state ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.PreviousEngineState != "" + if: ctx.winlog?.event_data?.PreviousEngineState != "" - rename: field: winlog.event_data.NewProviderState target_field: powershell.provider.new_state ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.NewProviderState != "" + if: ctx.winlog?.event_data?.NewProviderState != "" - rename: field: winlog.event_data.ProviderName target_field: powershell.provider.name ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.ProviderName != "" + if: ctx.winlog?.event_data?.ProviderName != "" - convert: field: winlog.event_data.DetailTotal target_field: powershell.total type: long ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.DetailTotal != "" + if: ctx.winlog?.event_data?.DetailTotal != "" - convert: field: winlog.event_data.DetailSequence target_field: powershell.sequence type: long ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.DetailSequence != "" + if: ctx.winlog?.event_data?.DetailSequence != "" - rename: field: winlog.event_data.EngineVersion target_field: powershell.engine.version ignore_missing: true ignore_failure: true - if: ctx?.winlog?.event_data?.EngineVersion != "" + if: ctx.winlog?.event_data?.EngineVersion != "" - rename: field: winlog.event_data.PipelineId target_field: powershell.pipeline_id ignore_missing: true ignore_failure: true - if: ctx?.winlog?.event_data?.PipelineId != "" + if: ctx.winlog?.event_data?.PipelineId != "" - rename: field: winlog.event_data.RunspaceId target_field: powershell.runspace_id ignore_missing: true ignore_failure: true - if: ctx?.winlog?.event_data?.RunspaceId != "" + if: ctx.winlog?.event_data?.RunspaceId != "" - rename: field: winlog.event_data.HostVersion target_field: powershell.process.executable_version ignore_missing: true ignore_failure: true - if: ctx?.winlog?.event_data?.HostVersion != "" + if: ctx.winlog?.event_data?.HostVersion != "" - rename: field: winlog.event_data.CommandLine target_field: powershell.command.value ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.CommandLine != "" + if: ctx.winlog?.event_data?.CommandLine != "" - rename: field: winlog.event_data.CommandPath target_field: powershell.command.path ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.CommandPath != "" + if: ctx.winlog?.event_data?.CommandPath != "" - rename: field: winlog.event_data.CommandName target_field: powershell.command.name ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.CommandName != "" + if: ctx.winlog?.event_data?.CommandName != "" - rename: field: winlog.event_data.CommandType target_field: powershell.command.type ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.CommandType != "" + if: ctx.winlog?.event_data?.CommandType != "" - split: description: Split Event 800 command invocation details. @@ -276,7 +329,7 @@ processors: ]; } - if (ctx?._temp == null) { + if (ctx._temp == null) { ctx._temp = new HashMap(); } @@ -284,7 +337,7 @@ processors: ctx._temp.details = new ArrayList(); } - def values = ctx?.winlog?.event_data[params["field"]]; + def values = ctx.winlog?.event_data[params["field"]]; if (values != null && values.length > 0) { for (v in values) { ctx._temp.details.add(parseRawDetail(v)); @@ -293,12 +346,12 @@ processors: - rename: field: _temp.details target_field: powershell.command.invocation_details - if: ctx?._temp?.details != null && ctx?._temp?.details.length > 0 + if: ctx._temp?.details != null && ctx._temp?.details.length > 0 - script: description: Implements Windows-like SplitCommandLine lang: painless - if: ctx?.process?.command_line != null && ctx.process.command_line != "" + if: ctx.process?.command_line != null && ctx.process.command_line != "" source: |- // appendBSBytes appends n '\\' bytes to b and returns the resulting slice. def appendBSBytes(StringBuilder b, int n) { @@ -377,12 +430,12 @@ processors: - script: description: Adds file information. lang: painless - if: ctx?.winlog?.event_data?.ScriptName != null && ctx.winlog.event_data.ScriptName.length() > 1 + if: ctx.winlog?.event_data?.ScriptName != null && ctx.winlog.event_data.ScriptName.length() > 1 source: |- def path = ctx.winlog.event_data.ScriptName; def idx = path.lastIndexOf("\\"); if (idx > -1) { - if (ctx?.file == null) { + if (ctx.file == null) { ctx.file = new HashMap(); } ctx.file.name = path.substring(idx+1); @@ -398,7 +451,12 @@ processors: target_field: file.path ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.ScriptName != "" + if: ctx.winlog?.event_data?.ScriptName != "" + + - convert: + field: error.code + type: string + ignore_missing: true ## Cleanup. @@ -419,15 +477,19 @@ processors: - script: description: Remove all empty values from event_data. lang: painless - source: ctx?.winlog?.event_data?.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue().equals("")); + source: ctx.winlog?.event_data?.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue().equals("")); + if: ctx.winlog?.event_data != null - remove: description: Remove empty event data. field: winlog.event_data ignore_missing: true ignore_failure: true - if: ctx?.winlog?.event_data != null && ctx.winlog.event_data.size() == 0 + if: ctx.winlog?.event_data != null && ctx.winlog.event_data.size() == 0 on_failure: - set: - field: "error.message" - value: "{{ _ingest.on_failure_message }}" + field: event.kind + value: pipeline_error + - append: + field: error.message + value: "{{{ _ingest.on_failure_message }}}" diff --git a/x-pack/winlogbeat/module/powershell/ingest/powershell_operational.yml b/x-pack/winlogbeat/module/powershell/ingest/powershell_operational.yml index db36f6aed995..206d25db3de8 100644 --- a/x-pack/winlogbeat/module/powershell/ingest/powershell_operational.yml +++ b/x-pack/winlogbeat/module/powershell/ingest/powershell_operational.yml @@ -8,12 +8,12 @@ processors: field_split: "\n" trim_key: " \n\t" trim_value: " \n\t" - value_split: "=" - if: ctx?.winlog?.event_id == "4103" + value_split: "[:=]" + if: ctx.winlog?.event_id == "4103" - script: description: Remove spaces from all event_data keys. lang: painless - if: ctx?.winlog?.event_data != null + if: ctx.winlog?.event_data != null source: |- def newEventData = new HashMap(); for (entry in ctx.winlog.event_data.entrySet()) { @@ -26,20 +26,28 @@ processors: - set: field: ecs.version - value: '1.12.0' + value: '8.0.0' - set: field: log.level copy_from: winlog.level ignore_empty_value: true ignore_failure: true - if: ctx?.winlog?.level != "" + if: ctx.winlog?.level != "" - date: field: winlog.time_created + tag: "time_created_date" formats: - ISO8601 - ignore_failure: true - if: ctx?.winlog?.time_created != null - + if: ctx.winlog?.time_created != null + on_failure: + - remove: + field: winlog.time_created + ignore_failure: true + - append: + field: error.message + value: "fail-{{{ _ingest.on_failure_processor_tag }}}" + - fail: + message: "Processor {{ _ingest.on_failure_processor_type }} with tag {{ _ingest.on_failure_processor_tag }} in pipeline {{ _ingest.on_failure_pipeline }} failed with message: {{ _ingest.on_failure_message }}" - set: field: event.module value: powershell @@ -51,19 +59,19 @@ processors: value: '{{winlog.event_id}}' - set: field: event.category - value: process + value: ["process"] - set: field: event.type - value: start - if: ctx?.event.code == "4105" + value: ["start"] + if: ctx.event.code == "4105" - set: field: event.type - value: end - if: ctx?.event.code == "4106" + value: ["end"] + if: ctx.event.code == "4106" - set: field: event.type - value: info - if: ctx?.event?.type == null + value: ["info"] + if: ctx.event?.type == null - convert: field: winlog.event_data.SequenceNumber target_field: event.sequence @@ -83,19 +91,19 @@ processors: target_field: process.entity_id ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.HostID != "" + if: ctx.winlog?.event_data?.HostID != "" - rename: field: winlog.event_data.HostApplication target_field: process.command_line ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.HostApplication != "" + if: ctx.winlog?.event_data?.HostApplication != "" - rename: field: winlog.event_data.HostName target_field: process.title ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.HostName != "" + if: ctx.winlog?.event_data?.HostName != "" ## User fields. @@ -108,72 +116,95 @@ processors: field: winlog.event_data.User target_field: "_temp.user_parts" separator: '\\' - if: ctx?.winlog?.event_data?.User != null + if: ctx.winlog?.event_data?.User != null - set: field: user.domain value: "{{_temp.user_parts.0}}" ignore_failure: true ignore_empty_value: true - if: ctx?._temp?.user_parts != null && ctx._temp.user_parts.size() == 2 + if: ctx._temp?.user_parts != null && ctx._temp.user_parts.size() == 2 - set: field: user.name value: "{{_temp.user_parts.1}}" ignore_failure: true ignore_empty_value: true - if: ctx?._temp?.user_parts != null && ctx._temp.user_parts.size() == 2 + if: ctx._temp?.user_parts != null && ctx._temp.user_parts.size() == 2 - append: field: related.user value: "{{user.name}}" ignore_failure: true allow_duplicates: false - if: ctx?.user?.name != null + if: ctx.user?.name != null - split: field: winlog.event_data.ConnectedUser target_field: "_temp.connected_user_parts" separator: '\\' - if: ctx?.winlog?.event_data?.ConnectedUser != null + if: ctx.winlog?.event_data?.ConnectedUser != null - set: field: source.user.domain value: "{{_temp.connected_user_parts.0}}" ignore_failure: true ignore_empty_value: true - if: ctx?._temp?.connected_user_parts != null && ctx._temp.connected_user_parts.size() == 2 + if: ctx._temp?.connected_user_parts != null && ctx._temp.connected_user_parts.size() == 2 - set: field: source.user.name value: "{{_temp.connected_user_parts.1}}" ignore_failure: true ignore_empty_value: true - if: ctx?._temp?.connected_user_parts != null && ctx._temp.connected_user_parts.size() == 2 + if: ctx._temp?.connected_user_parts != null && ctx._temp.connected_user_parts.size() == 2 - append: field: related.user value: "{{source.user.name}}" ignore_failure: true allow_duplicates: false - if: ctx?.source?.user?.name != null + if: ctx.source?.user?.name != null - rename: field: user.domain target_field: destination.user.domain ignore_failure: true ignore_missing: true - if: ctx?.source?.user != null + if: ctx.source?.user != null - rename: field: user.name target_field: destination.user.name ignore_failure: true ignore_missing: true - if: ctx?.source?.user != null + if: ctx.source?.user != null - set: field: user.domain copy_from: source.user.domain ignore_failure: true ignore_empty_value: true - if: ctx?.source?.user != null + if: ctx.source?.user != null - set: field: user.name copy_from: source.user.name ignore_failure: true ignore_empty_value: true - if: ctx?.source?.user != null + if: ctx.source?.user != null + # Get user details from the translate_sid processor enrichment + # if they are available and we don't already have them. + - rename: + field: winlog.event_data._MemberUserName + target_field: user.name + ignore_failure: true + ignore_missing: true + - rename: + field: winlog.event_data._MemberDomain + target_field: user.domain + ignore_failure: true + ignore_missing: true + - append: + value: '{{{winlog.event_data._MemberAccountType}}}' + field: user.roles + ignore_failure: true + allow_duplicates: false + if: ctx.winlog?.event_data?._MemberAccountType != null + - remove: + field: winlog.event_data._MemberAccountType + ignore_missing: true + ignore_failure: true + if: ctx.user?.roles != null && ctx.winlog?.event_data?._MemberAccountType != null && ctx.user.roles.contains(ctx.winlog.event_data._MemberAccountType) ## PowerShell fields. @@ -194,76 +225,100 @@ processors: target_field: powershell.id ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.ShellID != "" + if: ctx.winlog?.event_data?.ShellID != "" - rename: field: winlog.event_data.EngineVersion target_field: powershell.engine.version ignore_missing: true ignore_failure: true - if: ctx?.winlog?.event_data?.EngineVersion != "" + if: ctx.winlog?.event_data?.EngineVersion != "" - rename: field: winlog.event_data.PipelineID target_field: powershell.pipeline_id ignore_missing: true ignore_failure: true - if: ctx?.winlog?.event_data?.PipelineID != "" + if: ctx.winlog?.event_data?.PipelineID != "" - rename: field: winlog.event_data.RunspaceID target_field: powershell.runspace_id ignore_missing: true ignore_failure: true - if: ctx?.winlog?.event_data?.RunspaceID != "" + if: ctx.winlog?.event_data?.RunspaceID != "" - rename: field: winlog.event_data.RunspaceId target_field: powershell.runspace_id ignore_missing: true ignore_failure: true - if: ctx?.winlog?.event_data?.RunspaceId != "" + if: ctx.winlog?.event_data?.RunspaceId != "" - rename: field: winlog.event_data.HostVersion target_field: powershell.process.executable_version ignore_missing: true ignore_failure: true - if: ctx?.winlog?.event_data?.HostVersion != "" + if: ctx.winlog?.event_data?.HostVersion != "" - rename: field: winlog.event_data.CommandLine target_field: powershell.command.value ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.CommandLine != "" + if: ctx.winlog?.event_data?.CommandLine != "" - rename: field: winlog.event_data.CommandPath target_field: powershell.command.path ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.CommandPath != "" + if: ctx.winlog?.event_data?.CommandPath != "" - rename: field: winlog.event_data.CommandName target_field: powershell.command.name ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.CommandName != "" + if: ctx.winlog?.event_data?.CommandName != "" - rename: field: winlog.event_data.CommandType target_field: powershell.command.type ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.CommandType != "" + if: ctx.winlog?.event_data?.CommandType != "" - rename: field: winlog.event_data.ScriptBlockId target_field: powershell.file.script_block_id ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.ScriptBlockId != "" + if: ctx.winlog?.event_data?.ScriptBlockId != "" - rename: field: winlog.event_data.ScriptBlockText target_field: powershell.file.script_block_text ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.ScriptBlockText != "" + if: ctx.winlog?.event_data?.ScriptBlockText != "" + - trim: + field: powershell.file.script_block_text + ignore_missing: true + - dissect: + field: powershell.file.script_block_text + pattern: "# SIG # Begin signature block%{powershell.file.script_block_signature}# SIG # End signature block" + ignore_missing: true + ignore_failure: true + - gsub: + field: powershell.file.script_block_signature + pattern: "\\n# " + replacement: "" + ignore_missing: true + - gsub: + field: powershell.file.script_block_text + target_field: _temp.script_block_no_space + pattern: "\\s" + replacement: "" + ignore_missing: true + - fingerprint: + fields: + - _temp.script_block_no_space + target_field: powershell.file.script_block_hash + ignore_missing: true - split: - description: Split Event 800 command invocation details. + description: Split Event 4103 command invocation details. field: winlog.event_data.Payload separator: "\n" ignore_failure: true @@ -284,7 +339,7 @@ processors: field: Payload source: |- def parseRawDetail(String raw) { - Pattern detailRegex = /^([^:(]+)\(([^)]+)\)\:\s*(.+)?$/; + Pattern detailRegex = /^([^(]+)\(([^)]+)\)\:\s*(.+)?$/; Pattern parameterBindingRegex = /name\=(.+);\s*value\=(.+)$/; def matcher = detailRegex.matcher(raw); @@ -328,7 +383,7 @@ processors: ]; } - if (ctx?._temp == null) { + if (ctx._temp == null) { ctx._temp = new HashMap(); } @@ -336,7 +391,7 @@ processors: ctx._temp.details = new ArrayList(); } - def values = ctx?.winlog?.event_data[params["field"]]; + def values = ctx.winlog?.event_data[params["field"]]; if (values != null && values.length > 0) { for (v in values) { ctx._temp.details.add(parseRawDetail(v)); @@ -345,12 +400,12 @@ processors: - rename: field: _temp.details target_field: powershell.command.invocation_details - if: ctx?._temp?.details != null && ctx?._temp?.details.length > 0 + if: ctx._temp?.details != null && ctx._temp?.details.length > 0 - script: description: Implements Windows-like SplitCommandLine lang: painless - if: ctx?.process?.command_line != null && ctx.process.command_line != "" + if: ctx.process?.command_line != null && ctx.process.command_line != "" source: |- // appendBSBytes appends n '\\' bytes to b and returns the resulting slice. def appendBSBytes(StringBuilder b, int n) { @@ -431,16 +486,16 @@ processors: target_field: winlog.event_data.ScriptName ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.Path != "" + if: ctx.winlog?.event_data?.Path != "" - script: description: Adds file information. lang: painless - if: ctx?.winlog?.event_data?.ScriptName != null && ctx.winlog.event_data.ScriptName.length() > 1 + if: ctx.winlog?.event_data?.ScriptName != null && ctx.winlog.event_data.ScriptName.length() > 1 source: |- def path = ctx.winlog.event_data.ScriptName; def idx = path.lastIndexOf("\\"); if (idx > -1) { - if (ctx?.file == null) { + if (ctx.file == null) { ctx.file = new HashMap(); } ctx.file.name = path.substring(idx+1); @@ -456,7 +511,12 @@ processors: target_field: file.path ignore_failure: true ignore_missing: true - if: ctx?.winlog?.event_data?.ScriptName != "" + if: ctx.winlog?.event_data?.ScriptName != "" + + - convert: + field: error.code + type: string + ignore_missing: true ## Cleanup. @@ -478,15 +538,19 @@ processors: - script: description: Remove all empty values from event_data. lang: painless - source: ctx?.winlog?.event_data?.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue().equals("")); + source: ctx.winlog?.event_data?.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue().equals("")); + if: ctx.winlog?.event_data != null - remove: description: Remove empty event data. field: winlog.event_data ignore_missing: true ignore_failure: true - if: ctx?.winlog?.event_data != null && ctx.winlog.event_data.size() == 0 + if: ctx.winlog?.event_data != null && ctx.winlog.event_data.size() == 0 on_failure: - set: - field: "error.message" - value: "{{ _ingest.on_failure_message }}" + field: event.kind + value: pipeline_error + - append: + field: error.message + value: "{{{ _ingest.on_failure_message }}}" diff --git a/x-pack/winlogbeat/module/security/ingest/security.yml b/x-pack/winlogbeat/module/security/ingest/security.yml index 313f0333ab45..55687df728cb 100644 --- a/x-pack/winlogbeat/module/security/ingest/security.yml +++ b/x-pack/winlogbeat/module/security/ingest/security.yml @@ -112,6 +112,14 @@ processors: type: - change action: registry-value-modified + "4662": + category: + - iam + - configuration + type: + - admin + - change + action: object-operation-performed "4670": category: - iam @@ -750,6 +758,14 @@ processors: type: - end action: windows-firewall-driver-error + "5136": + category: + - iam + - configuration + type: + - admin + - change + action: directory-service-object-modified "5140": category: - network @@ -795,11 +811,16 @@ processors: - info action: vault-credentials-were-read source: |- - if (ctx?.event?.code == null || params.get(ctx.event.code) == null) { + if (ctx.event?.code == null || params.get(ctx.event.code) == null) { return; } - def hm = new HashMap(params.get(ctx.event.code)); - hm.forEach((k, v) -> ctx.event[k] = v); + params.get(ctx.event.code).forEach((k, v) -> { + if (v instanceof List) { + ctx.event[k] = new ArrayList(v); + } else { + ctx.event[k] = v; + } + }); - script: lang: painless ignore_failure: false @@ -818,14 +839,14 @@ processors: "10": RemoteInteractive "11": CachedInteractive source: |- - if (ctx?.winlog?.event_data?.LogonType == null) { + if (ctx.winlog?.event_data?.LogonType == null) { return; } def t = params.get(ctx.winlog.event_data.LogonType); if (t == null) { return; } - if (ctx?.winlog?.logon == null ) { + if (ctx.winlog?.logon == null ) { Map map = new HashMap(); ctx.winlog.put("logon", map); } @@ -861,27 +882,25 @@ processors: "0x00100000": USER_PARTIAL_SECRETS_ACCOUNT "0x00200000": USER_USE_AES_KEYS source: |- - if (ctx?.winlog?.event_data?.NewUacValue == null) { + if (ctx.winlog?.event_data == null) { + return; + } + if (ctx.winlog.event_data.NewUacValue == null || ctx.winlog.event_data.NewUacValue == "-") { return; } Long newUacValue = Long.decode(ctx.winlog.event_data.NewUacValue); ArrayList uacResult = new ArrayList(); - def[] w = new def[] { null }; - for (long b = 0; b < 32; b++) { - long flag = 1L << b; - if ((newUacValue.longValue() & flag) == flag) { - w[0] = flag; - def desc = params[String.format("0x%08X", w)]; - if (desc != null) { - uacResult.add(desc); - } + for (entry in params.entrySet()) { + Long flag = Long.decode(entry.getKey()); + if ((newUacValue.longValue() & flag.longValue()) == flag.longValue()) { + uacResult.add(entry.getValue()); } } if (uacResult.length == 0) { return; } ctx.winlog.event_data.put("NewUACList", uacResult); - if (ctx?.winlog?.event_data?.UserAccountControl == null) { + if (ctx.winlog.event_data.UserAccountControl == null || ctx.winlog.event_data.UserAccountControl == "-") { return; } ArrayList uac_array = new ArrayList(); @@ -922,20 +941,15 @@ processors: "0x00000002": Renew "0x00000001": Validate source: |- - if (ctx?.winlog?.event_data?.TicketOptions == null) { + if (ctx.winlog?.event_data?.TicketOptions == null) { return; } Long tOpts = Long.decode(ctx.winlog.event_data.TicketOptions); ArrayList tDescs = new ArrayList(); - def[] w = new def[] { null }; - for (long b = 0; b < 32; b++) { - long flag = 1L << b; - if ((tOpts.longValue() & flag) == flag) { - w[0] = flag; - def desc = params[String.format("0x%08X", w)]; - if (desc != null) { - tDescs.add(desc); - } + for (entry in params.entrySet()) { + Long flag = Long.decode(entry.getKey()); + if ((tOpts.longValue() & flag.longValue()) == flag.longValue()) { + tDescs.add(entry.getValue()); } } if (tDescs.length == 0) { @@ -959,7 +973,7 @@ processors: "0x18": RC4-HMAC-EXP "0xffffffff": FAIL source: |- - if (ctx?.winlog?.event_data?.TicketEncryptionType == null) { + if (ctx.winlog?.event_data?.TicketEncryptionType == null) { return; } ctx.winlog.event_data.put("TicketEncryptionTypeDescription", @@ -1032,8 +1046,8 @@ processors: "0x43": KRB_AP_ERR_NO_TGT "0x44": KDC_ERR_WRONG_REALM source: |- - if (ctx?.winlog?.event_data?.Status == null || - ctx?.event?.code == null || + if (ctx.winlog?.event_data?.Status == null || + ctx.event?.code == null || !["4768", "4769", "4770", "4771"].contains(ctx.event.code)) { return; } @@ -1054,15 +1068,15 @@ processors: "0x110": Interactive Own Process "0x120": Interactive Share Process source: |- - if (ctx?.winlog?.event_data?.ServiceName != null) { - if (ctx?.service == null) { + if (ctx.winlog?.event_data?.ServiceName != null) { + if (ctx.service == null) { HashMap hm = new HashMap(); ctx.put("service", hm); } ctx.service.put("name", ctx.winlog.event_data.ServiceName); } - if (ctx?.winlog.event_data?.ServiceType != null) { - if (ctx?.service == null) { + if (ctx.winlog.event_data?.ServiceType != null) { + if (ctx.service == null) { HashMap hm = new HashMap(); ctx.put("service", hm); } @@ -1073,8 +1087,6 @@ processors: ignore_failure: false tag: Set Audit Information description: Set Audit Information - # Audit Categories Description - # https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-gpac/77878370-0712-47cd-997d-b07053429f6d params: "0CCE9210-69AE-11D9-BED3-505054503030": ["Security State Change", "System"] "0CCE9211-69AE-11D9-BED3-505054503030": ["Security System Extension", "System"] @@ -1082,59 +1094,59 @@ processors: "0CCE9213-69AE-11D9-BED3-505054503030": ["IPsec Driver", "System"] "0CCE9214-69AE-11D9-BED3-505054503030": ["Other System Events", "System"] "0CCE9215-69AE-11D9-BED3-505054503030": ["Logon", "Logon/Logoff"] - "0CCE9216-69AE-11D9-BED3-505054503030": ["Logoff", "Logon/Logoff"] - "0CCE9217-69AE-11D9-BED3-505054503030": ["Account Lockout", "Logon/Logoff"] - "0CCE9218-69AE-11D9-BED3-505054503030": ["IPsec Main Mode", "Logon/Logoff"] - "0CCE9219-69AE-11D9-BED3-505054503030": ["IPsec Quick Mode", "Logon/Logoff"] - "0CCE921A-69AE-11D9-BED3-505054503030": ["IPsec Extended Mode", "Logon/Logoff"] - "0CCE921B-69AE-11D9-BED3-505054503030": ["Special Logon", "Logon/Logoff"] - "0CCE921C-69AE-11D9-BED3-505054503030": ["Other Logon/Logoff Events", "Logon/Logoff"] - "0CCE9243-69AE-11D9-BED3-505054503030": ["Network Policy Server", "Logon/Logoff"] - "0CCE9247-69AE-11D9-BED3-505054503030": ["User / Device Claims", "Logon/Logoff"] - "0CCE921D-69AE-11D9-BED3-505054503030": ["File System", "Object Access"] - "0CCE921E-69AE-11D9-BED3-505054503030": ["Registry", "Object Access"] - "0CCE921F-69AE-11D9-BED3-505054503030": ["Kernel Object", "Object Access"] - "0CCE9220-69AE-11D9-BED3-505054503030": ["SAM", "Object Access"] - "0CCE9221-69AE-11D9-BED3-505054503030": ["Certification Services", "Object Access"] - "0CCE9222-69AE-11D9-BED3-505054503030": ["Application Generated", "Object Access"] - "0CCE9223-69AE-11D9-BED3-505054503030": ["Handle Manipulation", "Object Access"] - "0CCE9224-69AE-11D9-BED3-505054503030": ["File Share", "Object Access"] - "0CCE9225-69AE-11D9-BED3-505054503030": ["Filtering Platform Packet Drop", "Object Access"] - "0CCE9226-69AE-11D9-BED3-505054503030": ["Filtering Platform Connection ", "Object Access"] - "0CCE9227-69AE-11D9-BED3-505054503030": ["Other Object Access Events", "Object Access"] - "0CCE9244-69AE-11D9-BED3-505054503030": ["Detailed File Share", "Object Access"] - "0CCE9245-69AE-11D9-BED3-505054503030": ["Removable Storage", "Object Access"] - "0CCE9246-69AE-11D9-BED3-505054503030": ["Central Policy Staging", "Object Access"] - "0CCE9228-69AE-11D9-BED3-505054503030": ["Sensitive Privilege Use", "Privilege Use"] - "0CCE9229-69AE-11D9-BED3-505054503030": ["Non Sensitive Privilege Use", "Privilege Use"] - "0CCE922A-69AE-11D9-BED3-505054503030": ["Other Privilege Use Events", "Privilege Use"] - "0CCE922B-69AE-11D9-BED3-505054503030": ["Process Creation", "Detailed Tracking"] - "0CCE922C-69AE-11D9-BED3-505054503030": ["Process Termination", "Detailed Tracking"] - "0CCE922D-69AE-11D9-BED3-505054503030": ["DPAPI Activity", "Detailed Tracking"] - "0CCE922E-69AE-11D9-BED3-505054503030": ["RPC Events", "Detailed Tracking"] - "0CCE9248-69AE-11D9-BED3-505054503030": ["Plug and Play Events", "Detailed Tracking"] - "0CCE922F-69AE-11D9-BED3-505054503030": ["Audit Policy Change", "Policy Change"] - "0CCE9230-69AE-11D9-BED3-505054503030": ["Authentication Policy Change", "Policy Change"] - "0CCE9231-69AE-11D9-BED3-505054503030": ["Authorization Policy Change", "Policy Change"] - "0CCE9232-69AE-11D9-BED3-505054503030": ["MPSSVC Rule-Level Policy Change", "Policy Change"] - "0CCE9233-69AE-11D9-BED3-505054503030": ["Filtering Platform Policy Change", "Policy Change"] - "0CCE9234-69AE-11D9-BED3-505054503030": ["Other Policy Change Events", "Policy Change"] - "0CCE9235-69AE-11D9-BED3-505054503030": ["User Account Management", "Account Management"] - "0CCE9236-69AE-11D9-BED3-505054503030": ["Computer Account Management", "Account Management"] - "0CCE9237-69AE-11D9-BED3-505054503030": ["Security Group Management", "Account Management"] - "0CCE9238-69AE-11D9-BED3-505054503030": ["Distribution Group Management", "Account Management"] - "0CCE9239-69AE-11D9-BED3-505054503030": ["Application Group Management", "Account Management"] - "0CCE923A-69AE-11D9-BED3-505054503030": ["Other Account Management Events", "Account Management"] - "0CCE923B-69AE-11D9-BED3-505054503030": ["Directory Service Access", "Account Management"] - "0CCE923C-69AE-11D9-BED3-505054503030": ["Directory Service Changes", "Account Management"] - "0CCE923D-69AE-11D9-BED3-505054503030": ["Directory Service Replication", "Account Management"] - "0CCE923E-69AE-11D9-BED3-505054503030": ["Detailed Directory Service Replication", "Account Management"] - "0CCE923F-69AE-11D9-BED3-505054503030": ["Credential Validation", "Account Logon"] - "0CCE9240-69AE-11D9-BED3-505054503030": ["Kerberos Service Ticket Operations", "Account Logon"] - "0CCE9241-69AE-11D9-BED3-505054503030": ["Other Account Logon Events", "Account Logon"] - "0CCE9242-69AE-11D9-BED3-505054503030": ["Kerberos Authentication Service", "Account Logon"] + "0CCE9216-69AE-11D9-BED3-505054503030": ["Logoff","Logon/Logoff"] + "0CCE9217-69AE-11D9-BED3-505054503030": ["Account Lockout","Logon/Logoff"] + "0CCE9218-69AE-11D9-BED3-505054503030": ["IPsec Main Mode","Logon/Logoff"] + "0CCE9219-69AE-11D9-BED3-505054503030": ["IPsec Quick Mode","Logon/Logoff"] + "0CCE921A-69AE-11D9-BED3-505054503030": ["IPsec Extended Mode","Logon/Logoff"] + "0CCE921B-69AE-11D9-BED3-505054503030": ["Special Logon","Logon/Logoff"] + "0CCE921C-69AE-11D9-BED3-505054503030": ["Other Logon/Logoff Events","Logon/Logoff"] + "0CCE9243-69AE-11D9-BED3-505054503030": ["Network Policy Server","Logon/Logoff"] + "0CCE9247-69AE-11D9-BED3-505054503030": ["User / Device Claims","Logon/Logoff"] + "0CCE921D-69AE-11D9-BED3-505054503030": ["File System","Object Access"] + "0CCE921E-69AE-11D9-BED3-505054503030": ["Registry","Object Access"] + "0CCE921F-69AE-11D9-BED3-505054503030": ["Kernel Object","Object Access"] + "0CCE9220-69AE-11D9-BED3-505054503030": ["SAM","Object Access"] + "0CCE9221-69AE-11D9-BED3-505054503030": ["Certification Services","Object Access"] + "0CCE9222-69AE-11D9-BED3-505054503030": ["Application Generated","Object Access"] + "0CCE9223-69AE-11D9-BED3-505054503030": ["Handle Manipulation","Object Access"] + "0CCE9224-69AE-11D9-BED3-505054503030": ["File Share","Object Access"] + "0CCE9225-69AE-11D9-BED3-505054503030": ["Filtering Platform Packet Drop","Object Access"] + "0CCE9226-69AE-11D9-BED3-505054503030": ["Filtering Platform Connection ","Object Access"] + "0CCE9227-69AE-11D9-BED3-505054503030": ["Other Object Access Events","Object Access"] + "0CCE9244-69AE-11D9-BED3-505054503030": ["Detailed File Share","Object Access"] + "0CCE9245-69AE-11D9-BED3-505054503030": ["Removable Storage","Object Access"] + "0CCE9246-69AE-11D9-BED3-505054503030": ["Central Policy Staging","Object Access"] + "0CCE9228-69AE-11D9-BED3-505054503030": ["Sensitive Privilege Use","Privilege Use"] + "0CCE9229-69AE-11D9-BED3-505054503030": ["Non Sensitive Privilege Use","Privilege Use"] + "0CCE922A-69AE-11D9-BED3-505054503030": ["Other Privilege Use Events","Privilege Use"] + "0CCE922B-69AE-11D9-BED3-505054503030": ["Process Creation","Detailed Tracking"] + "0CCE922C-69AE-11D9-BED3-505054503030": ["Process Termination","Detailed Tracking"] + "0CCE922D-69AE-11D9-BED3-505054503030": ["DPAPI Activity","Detailed Tracking"] + "0CCE922E-69AE-11D9-BED3-505054503030": ["RPC Events","Detailed Tracking"] + "0CCE9248-69AE-11D9-BED3-505054503030": ["Plug and Play Events","Detailed Tracking"] + "0CCE922F-69AE-11D9-BED3-505054503030": ["Audit Policy Change","Policy Change"] + "0CCE9230-69AE-11D9-BED3-505054503030": ["Authentication Policy Change","Policy Change"] + "0CCE9231-69AE-11D9-BED3-505054503030": ["Authorization Policy Change","Policy Change"] + "0CCE9232-69AE-11D9-BED3-505054503030": ["MPSSVC Rule-Level Policy Change","Policy Change"] + "0CCE9233-69AE-11D9-BED3-505054503030": ["Filtering Platform Policy Change","Policy Change"] + "0CCE9234-69AE-11D9-BED3-505054503030": ["Other Policy Change Events","Policy Change"] + "0CCE9235-69AE-11D9-BED3-505054503030": ["User Account Management","Account Management"] + "0CCE9236-69AE-11D9-BED3-505054503030": ["Computer Account Management","Account Management"] + "0CCE9237-69AE-11D9-BED3-505054503030": ["Security Group Management","Account Management"] + "0CCE9238-69AE-11D9-BED3-505054503030": ["Distribution Group Management","Account Management"] + "0CCE9239-69AE-11D9-BED3-505054503030": ["Application Group Management","Account Management"] + "0CCE923A-69AE-11D9-BED3-505054503030": ["Other Account Management Events","Account Management"] + "0CCE923B-69AE-11D9-BED3-505054503030": ["Directory Service Access","Account Management"] + "0CCE923C-69AE-11D9-BED3-505054503030": ["Directory Service Changes","Account Management"] + "0CCE923D-69AE-11D9-BED3-505054503030": ["Directory Service Replication","Account Management"] + "0CCE923E-69AE-11D9-BED3-505054503030": ["Detailed Directory Service Replication","Account Management"] + "0CCE923F-69AE-11D9-BED3-505054503030": ["Credential Validation","Account Logon"] + "0CCE9240-69AE-11D9-BED3-505054503030": ["Kerberos Service Ticket Operations","Account Logon"] + "0CCE9241-69AE-11D9-BED3-505054503030": ["Other Account Logon Events","Account Logon"] + "0CCE9242-69AE-11D9-BED3-505054503030": ["Kerberos Authentication Service","Account Logon"] source: |- - if (ctx?.winlog?.event_data?.SubcategoryGuid == null) { + if (ctx.winlog?.event_data?.SubcategoryGuid == null) { return; } def subCatGuid = ctx.winlog.event_data.SubcategoryGuid.replace("{","").replace("}","").toUpperCase(); @@ -2124,975 +2136,663 @@ processors: "16902": "Subscribe" "16903": "Publish" reversed_descriptions: - "Undefined Access (no effect) Bit 7" : "279" - "Unused message ID" : "1536" - "DELETE" : "1537" - "READ_CONTROL" : "1538" - "WRITE_DAC" : "1539" - "WRITE_OWNER" : "1540" - "SYNCHRONIZE" : "1541" - "ACCESS_SYS_SEC" : "1542" - "MAX_ALLOWED" : "1543" - "Unknown specific access (bit 0)" : "1552" - "Unknown specific access (bit 1)" : "1553" - "Unknown specific access (bit 2)" : "1554" - "Unknown specific access (bit 3)" : "1555" - "Unknown specific access (bit 4)" : "1556" - "Unknown specific access (bit 5)" : "1557" - "Unknown specific access (bit 6)" : "1558" - "Unknown specific access (bit 7)" : "1559" - "Unknown specific access (bit 8)" : "1560" - "Unknown specific access (bit 9)" : "1561" - "Unknown specific access (bit 10)" : "1562" - "Unknown specific access (bit 11)" : "1563" - "Unknown specific access (bit 12)" : "1564" - "Unknown specific access (bit 13)" : "1565" - "Unknown specific access (bit 14)" : "1566" - "Unknown specific access (bit 15)" : "1567" - "Not used" : "1601" - "Assign Primary Token Privilege" : "1603" - "Lock Memory Privilege" : "1604" - "Increase Memory Quota Privilege" : "1605" - "Unsolicited Input Privilege" : "1606" - "Trusted Computer Base Privilege" : "1607" - "Security Privilege" : "1608" - "Take Ownership Privilege" : "1609" - "Load/Unload Driver Privilege" : "1610" - "Profile System Privilege" : "1611" - "Set System Time Privilege" : "1612" - "Profile Single Process Privilege" : "1613" - "Increment Base Priority Privilege" : "1614" - "Create Pagefile Privilege" : "1615" - "Create Permanent Object Privilege" : "1616" - "Backup Privilege" : "1617" - "Restore From Backup Privilege" : "1618" - "Shutdown System Privilege" : "1619" - "Debug Privilege" : "1620" - "View or Change Audit Log Privilege" : "1621" - "Change Hardware Environment Privilege" : "1622" - "Change Notify (and Traverse) Privilege" : "1623" - "Remotely Shut System Down Privilege" : "1624" - "" : "1793" - "" : "1794" - "Enabled" : "1795" - "Disabled" : "1796" - "All" : "1797" - "None" : "1798" - "Audit Policy query/set API Operation" : "1799" - "" : "1800" - "Granted by" : "1801" - "Denied by" : "1802" - "Denied by Integrity Policy check" : "1803" - "Granted by Ownership" : "1804" - "Not granted" : "1805" - "Granted by NULL DACL" : "1806" - "Denied by Empty DACL" : "1807" - "Granted by NULL Security Descriptor" : "1808" - "Unknown or unchecked" : "1809" - "Not granted due to missing" : "1810" - "Granted by ACE on parent folder" : "1811" - "Denied by ACE on parent folder" : "1812" - "Granted by Central Access Rule" : "1813" - "NOT Granted by Central Access Rule" : "1814" - "Granted by parent folder's Central Access Rule" : "1815" - "NOT Granted by parent folder's Central Access Rule" : "1816" - "Unknown Type" : "1817" - "String" : "1818" - "Unsigned 64-bit Integer" : "1819" - "64-bit Integer" : "1820" - "FQBN" : "1821" - "Blob" : "1822" - "Sid" : "1823" - "Boolean" : "1824" - "TRUE" : "1825" - "FALSE" : "1826" - "Invalid" : "1827" - "an ACE too long to display" : "1828" - "a Security Descriptor too long to display" : "1829" - "Not granted to AppContainers" : "1830" - "..." : "1831" - "Identification" : "1832" - "Impersonation" : "1833" - "Delegation" : "1840" - "Denied by Process Trust Label ACE" : "1841" - "Yes" : "1842" - "No" : "1843" - "System" : "1844" - "Not Available" : "1845" - "Default" : "1846" - "DisallowMmConfig" : "1847" - "Off" : "1848" - "Auto" : "1849" - "REG_NONE" : "1872" - "REG_SZ" : "1873" - "REG_EXPAND_SZ" : "1874" - "REG_BINARY" : "1875" - "REG_DWORD" : "1876" - "REG_DWORD_BIG_ENDIAN" : "1877" - "REG_LINK" : "1878" - "REG_MULTI_SZ (New lines are replaced with *. A * is replaced with **)" : "1879" - "REG_RESOURCE_LIST" : "1880" - "REG_FULL_RESOURCE_DESCRIPTOR" : "1881" - "REG_RESOURCE_REQUIREMENTS_LIST" : "1882" - "REG_QWORD" : "1883" - "New registry value created" : "1904" - "Existing registry value modified" : "1905" - "Registry value deleted" : "1906" - "Sunday" : "1920" - "Monday" : "1921" - "Tuesday" : "1922" - "Wednesday" : "1923" - "Thursday" : "1924" - "Friday" : "1925" - "Saturday" : "1926" - "TokenElevationTypeDefault (1)" : "1936" - "TokenElevationTypeFull (2)" : "1937" - "TokenElevationTypeLimited (3)" : "1938" - "Account Enabled" : "2048" - "Home Directory Required' - Disabled" : "2049" - "Password Not Required' - Disabled" : "2050" - "Temp Duplicate Account' - Disabled" : "2051" - "Normal Account' - Disabled" : "2052" - "MNS Logon Account' - Disabled" : "2053" - "Interdomain Trust Account' - Disabled" : "2054" - "Workstation Trust Account' - Disabled" : "2055" - "Server Trust Account' - Disabled" : "2056" - "Don't Expire Password' - Disabled" : "2057" - "Account Unlocked" : "2058" - "Encrypted Text Password Allowed' - Disabled" : "2059" - "Smartcard Required' - Disabled" : "2060" - "Trusted For Delegation' - Disabled" : "2061" - "Not Delegated' - Disabled" : "2062" - "Use DES Key Only' - Disabled" : "2063" - "Don't Require Preauth' - Disabled" : "2064" - "Password Expired' - Disabled" : "2065" - "Trusted To Authenticate For Delegation' - Disabled" : "2066" - "Exclude Authorization Information' - Disabled" : "2067" - "Undefined UserAccountControl Bit 20' - Disabled" : "2068" - "Protect Kerberos Service Tickets with AES Keys' - Disabled" : "2069" - "Undefined UserAccountControl Bit 22' - Disabled" : "2070" - "Undefined UserAccountControl Bit 23' - Disabled" : "2071" - "Undefined UserAccountControl Bit 24' - Disabled" : "2072" - "Undefined UserAccountControl Bit 25' - Disabled" : "2073" - "Undefined UserAccountControl Bit 26' - Disabled" : "2074" - "Undefined UserAccountControl Bit 27' - Disabled" : "2075" - "Undefined UserAccountControl Bit 28' - Disabled" : "2076" - "Undefined UserAccountControl Bit 29' - Disabled" : "2077" - "Undefined UserAccountControl Bit 30' - Disabled" : "2078" - "Undefined UserAccountControl Bit 31' - Disabled" : "2079" - "Account Disabled" : "2080" - "Home Directory Required' - Enabled" : "2081" - "Password Not Required' - Enabled" : "2082" - "Temp Duplicate Account' - Enabled" : "2083" - "Normal Account' - Enabled" : "2084" - "MNS Logon Account' - Enabled" : "2085" - "Interdomain Trust Account' - Enabled" : "2086" - "Workstation Trust Account' - Enabled" : "2087" - "Server Trust Account' - Enabled" : "2088" - "Don't Expire Password' - Enabled" : "2089" - "Account Locked" : "2090" - "Encrypted Text Password Allowed' - Enabled" : "2091" - "Smartcard Required' - Enabled" : "2092" - "Trusted For Delegation' - Enabled" : "2093" - "Not Delegated' - Enabled" : "2094" - "Use DES Key Only' - Enabled" : "2095" - "Don't Require Preauth' - Enabled" : "2096" - "Password Expired' - Enabled" : "2097" - "Trusted To Authenticate For Delegation' - Enabled" : "2098" - "Exclude Authorization Information' - Enabled" : "2099" - "Undefined UserAccountControl Bit 20' - Enabled" : "2100" - "Protect Kerberos Service Tickets with AES Keys' - Enabled" : "2101" - "Undefined UserAccountControl Bit 22' - Enabled" : "2102" - "Undefined UserAccountControl Bit 23' - Enabled" : "2103" - "Undefined UserAccountControl Bit 24' - Enabled" : "2104" - "Undefined UserAccountControl Bit 25' - Enabled" : "2105" - "Undefined UserAccountControl Bit 26' - Enabled" : "2106" - "Undefined UserAccountControl Bit 27' - Enabled" : "2107" - "Undefined UserAccountControl Bit 28' - Enabled" : "2108" - "Undefined UserAccountControl Bit 29' - Enabled" : "2109" - "Undefined UserAccountControl Bit 30' - Enabled" : "2110" - "Undefined UserAccountControl Bit 31' - Enabled" : "2111" - "An Error occured during Logon." : "2304" - "The specified user account has expired." : "2305" - "The NetLogon component is not active." : "2306" - "Account locked out." : "2307" - "The user has not been granted the requested logon type at this machine." : "2308" - "The specified account's password has expired." : "2309" - "Account currently disabled." : "2310" - "Account logon time restriction violation." : "2311" - "User not allowed to logon at this computer." : "2312" - "Unknown user name or bad password." : "2313" - "Domain sid inconsistent." : "2314" - "Smartcard logon is required and was not used." : "2315" - "Not Available." : "2432" - "Random number generator failure." : "2436" - "Random number generation failed FIPS-140 pre-hash check." : "2437" - "Failed to zero secret data." : "2438" - "Key failed pair wise consistency check." : "2439" - "Failed to unprotect persistent cryptographic key." : "2448" - "Key export checks failed." : "2449" - "Validation of public key failed." : "2450" - "Signature verification failed." : "2451" - "Open key file." : "2456" - "Delete key file." : "2457" - "Read persisted key from file." : "2458" - "Write persisted key to file." : "2459" - "Export of persistent cryptographic key." : "2464" - "Import of persistent cryptographic key." : "2465" - "Open Key." : "2480" - "Create Key." : "2481" - "Delete Key." : "2482" - "Encrypt." : "2483" - "Decrypt." : "2484" - "Sign hash." : "2485" - "Secret agreement." : "2486" - "Domain settings" : "2487" - "Local settings" : "2488" - "Add provider." : "2489" - "Remove provider." : "2490" - "Add context." : "2491" - "Remove context." : "2492" - "Add function." : "2493" - "Remove function." : "2494" - "Add function provider." : "2495" - "Remove function provider." : "2496" - "Add function property." : "2497" - "Remove function property." : "2498" - "Machine key." : "2499" - "User key." : "2500" - "Key Derivation." : "2501" - "Device Access Bit 0" : "4352" - "Device Access Bit 1" : "4353" - "Device Access Bit 2" : "4354" - "Device Access Bit 3" : "4355" - "Device Access Bit 4" : "4356" - "Device Access Bit 5" : "4357" - "Device Access Bit 6" : "4358" - "Device Access Bit 7" : "4359" - "Device Access Bit 8" : "4360" - "Undefined Access (no effect) Bit 9" : "4361" - "Undefined Access (no effect) Bit 10" : "4362" - "Undefined Access (no effect) Bit 11" : "4363" - "Undefined Access (no effect) Bit 12" : "4364" - "Undefined Access (no effect) Bit 13" : "4365" - "Undefined Access (no effect) Bit 14" : "4366" - "Undefined Access (no effect) Bit 15" : "4367" - "Query directory" : "4368" - "Traverse" : "4369" - "Create object in directory" : "4370" - "Create sub-directory" : "4371" - "Undefined Access (no effect) Bit 4" : "4372" - "Undefined Access (no effect) Bit 5" : "4373" - "Undefined Access (no effect) Bit 6" : "4374" - "Undefined Access (no effect) Bit 7" : "4375" - "Undefined Access (no effect) Bit 8" : "4376" - "Undefined Access (no effect) Bit 9" : "4377" - "Undefined Access (no effect) Bit 10" : "4378" - "Undefined Access (no effect) Bit 11" : "4379" - "Undefined Access (no effect) Bit 12" : "4380" - "Undefined Access (no effect) Bit 13" : "4381" - "Undefined Access (no effect) Bit 14" : "4382" - "Undefined Access (no effect) Bit 15" : "4383" - "Query event state" : "4384" - "Modify event state" : "4385" - "Undefined Access (no effect) Bit 2" : "4386" - "Undefined Access (no effect) Bit 3" : "4387" - "Undefined Access (no effect) Bit 4" : "4388" - "Undefined Access (no effect) Bit 5" : "4389" - "Undefined Access (no effect) Bit 6" : "4390" - "Undefined Access (no effect) Bit 7" : "4391" - "Undefined Access (no effect) Bit 8" : "4392" - "Undefined Access (no effect) Bit 9" : "4393" - "Undefined Access (no effect) Bit 10" : "4394" - "Undefined Access (no effect) Bit 11" : "4395" - "Undefined Access (no effect) Bit 12" : "4396" - "Undefined Access (no effect) Bit 13" : "4397" - "Undefined Access (no effect) Bit 14" : "4398" - "Undefined Access (no effect) Bit 15" : "4399" - "ReadData (or ListDirectory)" : "4416" - "WriteData (or AddFile)" : "4417" - "AppendData (or AddSubdirectory or CreatePipeInstance)" : "4418" - "ReadEA" : "4419" - "WriteEA" : "4420" - "Execute/Traverse" : "4421" - "DeleteChild" : "4422" - "ReadAttributes" : "4423" - "WriteAttributes" : "4424" - "Undefined Access (no effect) Bit 9" : "4425" - "Undefined Access (no effect) Bit 10" : "4426" - "Undefined Access (no effect) Bit 11" : "4427" - "Undefined Access (no effect) Bit 12" : "4428" - "Undefined Access (no effect) Bit 13" : "4429" - "Undefined Access (no effect) Bit 14" : "4430" - "Undefined Access (no effect) Bit 15" : "4431" - "Query key value" : "4432" - "Set key value" : "4433" - "Create sub-key" : "4434" - "Enumerate sub-keys" : "4435" - "Notify about changes to keys" : "4436" - "Create Link" : "4437" - "Undefined Access (no effect) Bit 6" : "4438" - "Undefined Access (no effect) Bit 7" : "4439" - "Enable 64(or 32) bit application to open 64 bit key" : "4440" - "Enable 64(or 32) bit application to open 32 bit key" : "4441" - "Undefined Access (no effect) Bit 10" : "4442" - "Undefined Access (no effect) Bit 11" : "4443" - "Undefined Access (no effect) Bit 12" : "4444" - "Undefined Access (no effect) Bit 13" : "4445" - "Undefined Access (no effect) Bit 14" : "4446" - "Undefined Access (no effect) Bit 15" : "4447" - "Query mutant state" : "4448" - "Undefined Access (no effect) Bit 1" : "4449" - "Undefined Access (no effect) Bit 2" : "4450" - "Undefined Access (no effect) Bit 3" : "4451" - "Undefined Access (no effect) Bit 4" : "4452" - "Undefined Access (no effect) Bit 5" : "4453" - "Undefined Access (no effect) Bit 6" : "4454" - "Undefined Access (no effect) Bit 7" : "4455" - "Undefined Access (no effect) Bit 8" : "4456" - "Undefined Access (no effect) Bit 9" : "4457" - "Undefined Access (no effect) Bit 10" : "4458" - "Undefined Access (no effect) Bit 11" : "4459" - "Undefined Access (no effect) Bit 12" : "4460" - "Undefined Access (no effect) Bit 13" : "4461" - "Undefined Access (no effect) Bit 14" : "4462" - "Undefined Access (no effect) Bit 15" : "4463" - "Communicate using port" : "4464" - "Undefined Access (no effect) Bit 1" : "4465" - "Undefined Access (no effect) Bit 2" : "4466" - "Undefined Access (no effect) Bit 3" : "4467" - "Undefined Access (no effect) Bit 4" : "4468" - "Undefined Access (no effect) Bit 5" : "4469" - "Undefined Access (no effect) Bit 6" : "4470" - "Undefined Access (no effect) Bit 7" : "4471" - "Undefined Access (no effect) Bit 8" : "4472" - "Undefined Access (no effect) Bit 9" : "4473" - "Undefined Access (no effect) Bit 10" : "4474" - "Undefined Access (no effect) Bit 11" : "4475" - "Undefined Access (no effect) Bit 12" : "4476" - "Undefined Access (no effect) Bit 13" : "4477" - "Undefined Access (no effect) Bit 14" : "4478" - "Undefined Access (no effect) Bit 15" : "4479" - "Force process termination" : "4480" - "Create new thread in process" : "4481" - "Set process session ID" : "4482" - "Perform virtual memory operation" : "4483" - "Read from process memory" : "4484" - "Write to process memory" : "4485" - "Duplicate handle into or out of process" : "4486" - "Create a subprocess of process" : "4487" - "Set process quotas" : "4488" - "Set process information" : "4489" - "Query process information" : "4490" - "Set process termination port" : "4491" - "Undefined Access (no effect) Bit 12" : "4492" - "Undefined Access (no effect) Bit 13" : "4493" - "Undefined Access (no effect) Bit 14" : "4494" - "Undefined Access (no effect) Bit 15" : "4495" - "Control profile" : "4496" - "Undefined Access (no effect) Bit 1" : "4497" - "Undefined Access (no effect) Bit 2" : "4498" - "Undefined Access (no effect) Bit 3" : "4499" - "Undefined Access (no effect) Bit 4" : "4500" - "Undefined Access (no effect) Bit 5" : "4501" - "Undefined Access (no effect) Bit 6" : "4502" - "Undefined Access (no effect) Bit 7" : "4503" - "Undefined Access (no effect) Bit 8" : "4504" - "Undefined Access (no effect) Bit 9" : "4505" - "Undefined Access (no effect) Bit 10" : "4506" - "Undefined Access (no effect) Bit 11" : "4507" - "Undefined Access (no effect) Bit 12" : "4508" - "Undefined Access (no effect) Bit 13" : "4509" - "Undefined Access (no effect) Bit 14" : "4510" - "Undefined Access (no effect) Bit 15" : "4511" - "Query section state" : "4512" - "Map section for write" : "4513" - "Map section for read" : "4514" - "Map section for execute" : "4515" - "Extend size" : "4516" - "Undefined Access (no effect) Bit 5" : "4517" - "Undefined Access (no effect) Bit 6" : "4518" - "Undefined Access (no effect) Bit 7" : "4519" - "Undefined Access (no effect) Bit 8" : "4520" - "Undefined Access (no effect) Bit 9" : "4521" - "Undefined Access (no effect) Bit 10" : "4522" - "Undefined Access (no effect) Bit 11" : "4523" - "Undefined Access (no effect) Bit 12" : "4524" - "Undefined Access (no effect) Bit 13" : "4525" - "Undefined Access (no effect) Bit 14" : "4526" - "Undefined Access (no effect) Bit 15" : "4527" - "Query semaphore state" : "4528" - "Modify semaphore state" : "4529" - "Undefined Access (no effect) Bit 2" : "4530" - "Undefined Access (no effect) Bit 3" : "4531" - "Undefined Access (no effect) Bit 4" : "4532" - "Undefined Access (no effect) Bit 5" : "4533" - "Undefined Access (no effect) Bit 6" : "4534" - "Undefined Access (no effect) Bit 7" : "4535" - "Undefined Access (no effect) Bit 8" : "4536" - "Undefined Access (no effect) Bit 9" : "4537" - "Undefined Access (no effect) Bit 10" : "4538" - "Undefined Access (no effect) Bit 11" : "4539" - "Undefined Access (no effect) Bit 12" : "4540" - "Undefined Access (no effect) Bit 13" : "4541" - "Undefined Access (no effect) Bit 14" : "4542" - "Undefined Access (no effect) Bit 15" : "4543" - "Use symbolic link" : "4544" - "Undefined Access (no effect) Bit 1" : "4545" - "Undefined Access (no effect) Bit 2" : "4546" - "Undefined Access (no effect) Bit 3" : "4547" - "Undefined Access (no effect) Bit 4" : "4548" - "Undefined Access (no effect) Bit 5" : "4549" - "Undefined Access (no effect) Bit 6" : "4550" - "Undefined Access (no effect) Bit 7" : "4551" - "Undefined Access (no effect) Bit 8" : "4552" - "Undefined Access (no effect) Bit 9" : "4553" - "Undefined Access (no effect) Bit 10" : "4554" - "Undefined Access (no effect) Bit 11" : "4555" - "Undefined Access (no effect) Bit 12" : "4556" - "Undefined Access (no effect) Bit 13" : "4557" - "Undefined Access (no effect) Bit 14" : "4558" - "Undefined Access (no effect) Bit 15" : "4559" - "Force thread termination" : "4560" - "Suspend or resume thread" : "4561" - "Send an alert to thread" : "4562" - "Get thread context" : "4563" - "Set thread context" : "4564" - "Set thread information" : "4565" - "Query thread information" : "4566" - "Assign a token to the thread" : "4567" - "Cause thread to directly impersonate another thread" : "4568" - "Directly impersonate this thread" : "4569" - "Undefined Access (no effect) Bit 10" : "4570" - "Undefined Access (no effect) Bit 11" : "4571" - "Undefined Access (no effect) Bit 12" : "4572" - "Undefined Access (no effect) Bit 13" : "4573" - "Undefined Access (no effect) Bit 14" : "4574" - "Undefined Access (no effect) Bit 15" : "4575" - "Query timer state" : "4576" - "Modify timer state" : "4577" - "Undefined Access (no effect) Bit 2" : "4578" - "Undefined Access (no effect) Bit 3" : "4579" - "Undefined Access (no effect) Bit 4" : "4580" - "Undefined Access (no effect) Bit 5" : "4581" - "Undefined Access (no effect) Bit 6" : "4582" - "Undefined Access (no effect) Bit 8" : "4584" - "Undefined Access (no effect) Bit 9" : "4585" - "Undefined Access (no effect) Bit 10" : "4586" - "Undefined Access (no effect) Bit 11" : "4587" - "Undefined Access (no effect) Bit 12" : "4588" - "Undefined Access (no effect) Bit 13" : "4589" - "Undefined Access (no effect) Bit 14" : "4590" - "Undefined Access (no effect) Bit 15" : "4591" - "AssignAsPrimary" : "4592" - "Duplicate" : "4593" - "Impersonate" : "4594" - "Query" : "4595" - "QuerySource" : "4596" - "AdjustPrivileges" : "4597" - "AdjustGroups" : "4598" - "AdjustDefaultDacl" : "4599" - "AdjustSessionID" : "4600" - "Undefined Access (no effect) Bit 9" : "4601" - "Undefined Access (no effect) Bit 10" : "4602" - "Undefined Access (no effect) Bit 11" : "4603" - "Undefined Access (no effect) Bit 12" : "4604" - "Undefined Access (no effect) Bit 13" : "4605" - "Undefined Access (no effect) Bit 14" : "4606" - "Undefined Access (no effect) Bit 15" : "4607" - "Create instance of object type" : "4608" - "Undefined Access (no effect) Bit 1" : "4609" - "Undefined Access (no effect) Bit 2" : "4610" - "Undefined Access (no effect) Bit 3" : "4611" - "Undefined Access (no effect) Bit 4" : "4612" - "Undefined Access (no effect) Bit 5" : "4613" - "Undefined Access (no effect) Bit 6" : "4614" - "Undefined Access (no effect) Bit 7" : "4615" - "Undefined Access (no effect) Bit 8" : "4616" - "Undefined Access (no effect) Bit 9" : "4617" - "Undefined Access (no effect) Bit 10" : "4618" - "Undefined Access (no effect) Bit 11" : "4619" - "Undefined Access (no effect) Bit 12" : "4620" - "Undefined Access (no effect) Bit 13" : "4621" - "Undefined Access (no effect) Bit 14" : "4622" - "Undefined Access (no effect) Bit 15" : "4623" - "Query State" : "4864" - "Modify State" : "4865" - "Channel read message" : "5120" - "Channel write message" : "5121" - "Channel query information" : "5122" - "Channel set information" : "5123" - "Undefined Access (no effect) Bit 4" : "5124" - "Undefined Access (no effect) Bit 5" : "5125" - "Undefined Access (no effect) Bit 6" : "5126" - "Undefined Access (no effect) Bit 7" : "5127" - "Undefined Access (no effect) Bit 8" : "5128" - "Undefined Access (no effect) Bit 9" : "5129" - "Undefined Access (no effect) Bit 10" : "5130" - "Undefined Access (no effect) Bit 11" : "5131" - "Undefined Access (no effect) Bit 12" : "5132" - "Undefined Access (no effect) Bit 13" : "5133" - "Undefined Access (no effect) Bit 14" : "5134" - "Undefined Access (no effect) Bit 15" : "5135" - "Assign process" : "5136" - "Set Attributes" : "5137" - "Query Attributes" : "5138" - "Terminate Job" : "5139" - "Set Security Attributes" : "5140" - "Undefined Access (no effect) Bit 5" : "5141" - "Undefined Access (no effect) Bit 6" : "5142" - "Undefined Access (no effect) Bit 7" : "5143" - "Undefined Access (no effect) Bit 8" : "5144" - "Undefined Access (no effect) Bit 9" : "5145" - "Undefined Access (no effect) Bit 10" : "5146" - "Undefined Access (no effect) Bit 11" : "5147" - "Undefined Access (no effect) Bit 12" : "5148" - "Undefined Access (no effect) Bit 13" : "5149" - "Undefined Access (no effect) Bit 14" : "5150" - "Undefined Access (no effect) Bit 15" : "5151" - "ConnectToServer" : "5376" - "ShutdownServer" : "5377" - "InitializeServer" : "5378" - "CreateDomain" : "5379" - "EnumerateDomains" : "5380" - "LookupDomain" : "5381" - "Undefined Access (no effect) Bit 6" : "5382" - "Undefined Access (no effect) Bit 7" : "5383" - "Undefined Access (no effect) Bit 8" : "5384" - "Undefined Access (no effect) Bit 9" : "5385" - "Undefined Access (no effect) Bit 10" : "5386" - "Undefined Access (no effect) Bit 11" : "5387" - "Undefined Access (no effect) Bit 12" : "5388" - "Undefined Access (no effect) Bit 13" : "5389" - "Undefined Access (no effect) Bit 14" : "5390" - "Undefined Access (no effect) Bit 15" : "5391" - "ReadPasswordParameters" : "5392" - "WritePasswordParameters" : "5393" - "ReadOtherParameters" : "5394" - "WriteOtherParameters" : "5395" - "CreateUser" : "5396" - "CreateGlobalGroup" : "5397" - "CreateLocalGroup" : "5398" - "GetLocalGroupMembership" : "5399" - "ListAccounts" : "5400" - "LookupIDs" : "5401" - "AdministerServer" : "5402" - "Undefined Access (no effect) Bit 11" : "5403" - "Undefined Access (no effect) Bit 12" : "5404" - "Undefined Access (no effect) Bit 13" : "5405" - "Undefined Access (no effect) Bit 14" : "5406" - "Undefined Access (no effect) Bit 15" : "5407" - "ReadInformation" : "5408" - "WriteAccount" : "5409" - "AddMember" : "5410" - "RemoveMember" : "5411" - "ListMembers" : "5412" - "Undefined Access (no effect) Bit 5" : "5413" - "Undefined Access (no effect) Bit 6" : "5414" - "Undefined Access (no effect) Bit 7" : "5415" - "Undefined Access (no effect) Bit 8" : "5416" - "Undefined Access (no effect) Bit 9" : "5417" - "Undefined Access (no effect) Bit 10" : "5418" - "Undefined Access (no effect) Bit 11" : "5419" - "Undefined Access (no effect) Bit 12" : "5420" - "Undefined Access (no effect) Bit 13" : "5421" - "Undefined Access (no effect) Bit 14" : "5422" - "Undefined Access (no effect) Bit 15" : "5423" - "AddMember" : "5424" - "RemoveMember" : "5425" - "ListMembers" : "5426" - "ReadInformation" : "5427" - "WriteAccount" : "5428" - "Undefined Access (no effect) Bit 5" : "5429" - "Undefined Access (no effect) Bit 6" : "5430" - "Undefined Access (no effect) Bit 7" : "5431" - "Undefined Access (no effect) Bit 8" : "5432" - "Undefined Access (no effect) Bit 9" : "5433" - "Undefined Access (no effect) Bit 10" : "5434" - "Undefined Access (no effect) Bit 11" : "5435" - "Undefined Access (no effect) Bit 12" : "5436" - "Undefined Access (no effect) Bit 13" : "5437" - "Undefined Access (no effect) Bit 14" : "5438" - "Undefined Access (no effect) Bit 15" : "5439" - "ReadGeneralInformation" : "5440" - "ReadPreferences" : "5441" - "WritePreferences" : "5442" - "ReadLogon" : "5443" - "ReadAccount" : "5444" - "WriteAccount" : "5445" - "ChangePassword (with knowledge of old password)" : "5446" - "SetPassword (without knowledge of old password)" : "5447" - "ListGroups" : "5448" - "ReadGroupMembership" : "5449" - "ChangeGroupMembership" : "5450" - "Undefined Access (no effect) Bit 11" : "5451" - "Undefined Access (no effect) Bit 12" : "5452" - "Undefined Access (no effect) Bit 13" : "5453" - "Undefined Access (no effect) Bit 14" : "5454" - "Undefined Access (no effect) Bit 15" : "5455" - "View non-sensitive policy information" : "5632" - "View system audit requirements" : "5633" - "Get sensitive policy information" : "5634" - "Modify domain trust relationships" : "5635" - "Create special accounts (for assignment of user rights)" : "5636" - "Create a secret object" : "5637" - "Create a privilege" : "5638" - "Set default quota limits" : "5639" - "Change system audit requirements" : "5640" - "Administer audit log attributes" : "5641" - "Enable/Disable LSA" : "5642" - "Lookup Names/SIDs" : "5643" - "Change secret value" : "5648" - "Query secret value" : "5649" - "Undefined Access (no effect) Bit 2" : "5650" - "Undefined Access (no effect) Bit 3" : "5651" - "Undefined Access (no effect) Bit 4" : "5652" - "Undefined Access (no effect) Bit 5" : "5653" - "Undefined Access (no effect) Bit 6" : "5654" - "Undefined Access (no effect) Bit 7" : "5655" - "Undefined Access (no effect) Bit 8" : "5656" - "Undefined Access (no effect) Bit 9" : "5657" - "Undefined Access (no effect) Bit 10" : "5658" - "Undefined Access (no effect) Bit 11" : "5659" - "Undefined Access (no effect) Bit 12" : "5660" - "Undefined Access (no effect) Bit 13" : "5661" - "Undefined Access (no effect) Bit 14" : "5662" - "Undefined Access (no effect) Bit 15" : "5663" - "Query trusted domain name/SID" : "5664" - "Retrieve the controllers in the trusted domain" : "5665" - "Change the controllers in the trusted domain" : "5666" - "Query the Posix ID offset assigned to the trusted domain" : "5667" - "Change the Posix ID offset assigned to the trusted domain" : "5668" - "Undefined Access (no effect) Bit 5" : "5669" - "Undefined Access (no effect) Bit 6" : "5670" - "Undefined Access (no effect) Bit 7" : "5671" - "Undefined Access (no effect) Bit 8" : "5672" - "Undefined Access (no effect) Bit 9" : "5673" - "Undefined Access (no effect) Bit 10" : "5674" - "Undefined Access (no effect) Bit 11" : "5675" - "Undefined Access (no effect) Bit 12" : "5676" - "Undefined Access (no effect) Bit 13" : "5677" - "Undefined Access (no effect) Bit 14" : "5678" - "Undefined Access (no effect) Bit 15" : "5679" - "Query account information" : "5680" - "Change privileges assigned to account" : "5681" - "Change quotas assigned to account" : "5682" - "Change logon capabilities assigned to account" : "5683" - "Change the Posix ID offset assigned to the accounted domain" : "5684" - "Undefined Access (no effect) Bit 5" : "5685" - "Undefined Access (no effect) Bit 6" : "5686" - "Undefined Access (no effect) Bit 7" : "5687" - "Undefined Access (no effect) Bit 8" : "5688" - "Undefined Access (no effect) Bit 9" : "5689" - "Undefined Access (no effect) Bit 10" : "5690" - "Undefined Access (no effect) Bit 11" : "5691" - "Undefined Access (no effect) Bit 12" : "5692" - "Undefined Access (no effect) Bit 13" : "5693" - "Undefined Access (no effect) Bit 14" : "5694" - "Undefined Access (no effect) Bit 15" : "5695" - "KeyedEvent Wait" : "5696" - "KeyedEvent Wake" : "5697" - "Undefined Access (no effect) Bit 2" : "5698" - "Undefined Access (no effect) Bit 3" : "5699" - "Undefined Access (no effect) Bit 4" : "5700" - "Undefined Access (no effect) Bit 5" : "5701" - "Undefined Access (no effect) Bit 6" : "5702" - "Undefined Access (no effect) Bit 7" : "5703" - "Undefined Access (no effect) Bit 8" : "5704" - "Undefined Access (no effect) Bit 9" : "5705" - "Undefined Access (no effect) Bit 10" : "5706" - "Undefined Access (no effect) Bit 11" : "5707" - "Undefined Access (no effect) Bit 12" : "5708" - "Undefined Access (no effect) Bit 13" : "5709" - "Undefined Access (no effect) Bit 14" : "5710" - "Undefined Access (no effect) Bit 15" : "5711" - "Enumerate desktops" : "6656" - "Read attributes" : "6657" - "Access Clipboard" : "6658" - "Create desktop" : "6659" - "Write attributes" : "6660" - "Access global atoms" : "6661" - "Exit windows" : "6662" - "Unused Access Flag" : "6663" - "Include this windowstation in enumerations" : "6664" - "Read screen" : "6665" - "Read Objects" : "6672" - "Create window" : "6673" - "Create menu" : "6674" - "Hook control" : "6675" - "Journal (record)" : "6676" - "Journal (playback)" : "6677" - "Include this desktop in enumerations" : "6678" - "Write objects" : "6679" - "Switch to this desktop" : "6680" - "Administer print server" : "6912" - "Enumerate printers" : "6913" - "Full Control" : "6930" - "Print" : "6931" - "Administer Document" : "6948" - "Connect to service controller" : "7168" - "Create a new service" : "7169" - "Enumerate services" : "7170" - "Lock service database for exclusive access" : "7171" - "Query service database lock state" : "7172" - "Set last-known-good state of service database" : "7173" - "Query service configuration information" : "7184" - "Set service configuration information" : "7185" - "Query status of service" : "7186" - "Enumerate dependencies of service" : "7187" - "Start the service" : "7188" - "Stop the service" : "7189" - "Pause or continue the service" : "7190" - "Query information from service" : "7191" - "Issue service-specific control commands" : "7192" - "DDE Share Read" : "7424" - "DDE Share Write" : "7425" - "DDE Share Initiate Static" : "7426" - "DDE Share Initiate Link" : "7427" - "DDE Share Request" : "7428" - "DDE Share Advise" : "7429" - "DDE Share Poke" : "7430" - "DDE Share Execute" : "7431" - "DDE Share Add Items" : "7432" - "DDE Share List Items" : "7433" - "Create Child" : "7680" - "Delete Child" : "7681" - "List Contents" : "7682" - "Write Self" : "7683" - "Read Property" : "7684" - "Write Property" : "7685" - "Delete Tree" : "7686" - "List Object" : "7687" - "Control Access" : "7688" - "Undefined Access (no effect) Bit 9" : "7689" - "Undefined Access (no effect) Bit 10" : "7690" - "Undefined Access (no effect) Bit 11" : "7691" - "Undefined Access (no effect) Bit 12" : "7692" - "Undefined Access (no effect) Bit 13" : "7693" - "Undefined Access (no effect) Bit 14" : "7694" - "Undefined Access (no effect) Bit 15" : "7695" - "Audit Set System Policy" : "7936" - "Audit Query System Policy" : "7937" - "Audit Set Per User Policy" : "7938" - "Audit Query Per User Policy" : "7939" - "Audit Enumerate Users" : "7940" - "Audit Set Options" : "7941" - "Audit Query Options" : "7942" - "Port sharing (read)" : "8064" - "Port sharing (write)" : "8065" - "Default credentials" : "8096" - "Credentials manager" : "8097" - "Fresh credentials" : "8098" - "Kerberos" : "8192" - "Preshared key" : "8193" - "Unknown authentication" : "8194" - "DES" : "8195" - "3DES" : "8196" - "MD5" : "8197" - "SHA1" : "8198" - "Local computer" : "8199" - "Remote computer" : "8200" - "No state" : "8201" - "Sent first (SA) payload" : "8202" - "Sent second (KE) payload" : "8203" - "Sent third (ID) payload" : "8204" - "Initiator" : "8205" - "Responder" : "8206" - "No state" : "8207" - "Sent first (SA) payload" : "8208" - "Sent final payload" : "8209" - "Complete" : "8210" - "Unknown" : "8211" - "Transport" : "8212" - "Tunnel" : "8213" - "IKE/AuthIP DoS prevention mode started" : "8214" - "IKE/AuthIP DoS prevention mode stopped" : "8215" - "Enabled" : "8216" - "Not enabled" : "8217" - "No state" : "8218" - "Sent first (EM attributes) payload" : "8219" - "Sent second (SSPI) payload" : "8220" - "Sent third (hash) payload" : "8221" - "IKEv1" : "8222" - "AuthIP" : "8223" - "Anonymous" : "8224" - "NTLM V2" : "8225" - "CGA" : "8226" - "Certificate" : "8227" - "SSL" : "8228" - "None" : "8229" - "DH group 1" : "8230" - "DH group 2" : "8231" - "DH group 14" : "8232" - "DH group ECP 256" : "8233" - "DH group ECP 384" : "8234" - "AES-128" : "8235" - "AES-192" : "8236" - "AES-256" : "8237" - "Certificate ECDSA P256" : "8238" - "Certificate ECDSA P384" : "8239" - "SSL ECDSA P256" : "8240" - "SSL ECDSA P384" : "8241" - "SHA 256" : "8242" - "SHA 384" : "8243" - "IKEv2" : "8244" - "EAP payload sent" : "8245" - "Authentication payload sent" : "8246" - "EAP" : "8247" - "DH group 24" : "8248" - "System" : "8272" - "Logon/Logoff" : "8273" - "Object Access" : "8274" - "Privilege Use" : "8275" - "Detailed Tracking" : "8276" - "Policy Change" : "8277" - "Account Management" : "8278" - "DS Access" : "8279" - "Account Logon" : "8280" - "Success removed" : "8448" - "Success Added" : "8449" - "Failure removed" : "8450" - "Failure Added" : "8451" - "Success include removed" : "8452" - "Success include added" : "8453" - "Success exclude removed" : "8454" - "Success exclude added" : "8455" - "Failure include removed" : "8456" - "Failure include added" : "8457" - "Failure exclude removed" : "8458" - "Failure exclude added" : "8459" - "Security State Change" : "12288" - "Security System Extension" : "12289" - "System Integrity" : "12290" - "IPsec Driver" : "12291" - "Other System Events" : "12292" - "Logon" : "12544" - "Logoff" : "12545" - "Account Lockout" : "12546" - "IPsec Main Mode" : "12547" - "Special Logon" : "12548" - "IPsec Quick Mode" : "12549" - "IPsec Extended Mode" : "12550" - "Other Logon/Logoff Events" : "12551" - "Network Policy Server" : "12552" - "User / Device Claims" : "12553" - "Group Membership" : "12554" - "File System" : "12800" - "Registry" : "12801" - "Kernel Object" : "12802" - "SAM" : "12803" - "Other Object Access Events" : "12804" - "Certification Services" : "12805" - "Application Generated" : "12806" - "Handle Manipulation" : "12807" - "File Share" : "12808" - "Filtering Platform Packet Drop" : "12809" - "Filtering Platform Connection" : "12810" - "Detailed File Share" : "12811" - "Removable Storage" : "12812" - "Central Policy Staging" : "12813" - "Sensitive Privilege Use" : "13056" - "Non Sensitive Privilege Use" : "13057" - "Other Privilege Use Events" : "13058" - "Process Creation" : "13312" - "Process Termination" : "13313" - "DPAPI Activity" : "13314" - "RPC Events" : "13315" - "Plug and Play Events" : "13316" - "Token Right Adjusted Events" : "13317" - "Audit Policy Change" : "13568" - "Authentication Policy Change" : "13569" - "Authorization Policy Change" : "13570" - "MPSSVC Rule-Level Policy Change" : "13571" - "Filtering Platform Policy Change" : "13572" - "Other Policy Change Events" : "13573" - "User Account Management" : "13824" - "Computer Account Management" : "13825" - "Security Group Management" : "13826" - "Distribution Group Management" : "13827" - "Application Group Management" : "13828" - "Other Account Management Events" : "13829" - "Directory Service Access" : "14080" - "Directory Service Changes" : "14081" - "Directory Service Replication" : "14082" - "Detailed Directory Service Replication" : "14083" - "Credential Validation" : "14336" - "Kerberos Service Ticket Operations" : "14337" - "Other Account Logon Events" : "14338" - "Kerberos Authentication Service" : "14339" - "Inbound" : "14592" - "Outbound" : "14593" - "Forward" : "14594" - "Bidirectional" : "14595" - "IP Packet" : "14596" - "Transport" : "14597" - "Forward" : "14598" - "Stream" : "14599" - "Datagram Data" : "14600" - "ICMP Error" : "14601" - "MAC 802.3" : "14602" - "MAC Native" : "14603" - "vSwitch" : "14604" - "Resource Assignment" : "14608" - "Listen" : "14609" - "Receive/Accept" : "14610" - "Connect" : "14611" - "Flow Established" : "14612" - "Resource Release" : "14614" - "Endpoint Closure" : "14615" - "Connect Redirect" : "14616" - "Bind Redirect" : "14617" - "Stream Packet" : "14624" - "ICMP Echo-Request" : "14640" - "vSwitch Ingress" : "14641" - "vSwitch Egress" : "14642" - "" : "14672" - "[NULL]" : "14673" - "Value Added" : "14674" - "Value Deleted" : "14675" - "Active Directory Domain Services" : "14676" - "Active Directory Lightweight Directory Services" : "14677" - "Yes" : "14678" - "No" : "14679" - "Value Added With Expiration Time" : "14680" - "Value Deleted With Expiration Time" : "14681" - "Value Auto Deleted With Expiration Time" : "14688" - "Add" : "16384" - "Delete" : "16385" - "Boot-time" : "16386" - "Persistent" : "16387" - "Not persistent" : "16388" - "Block" : "16389" - "Permit" : "16390" - "Callout" : "16391" - "MD5" : "16392" - "SHA-1" : "16393" - "SHA-256" : "16394" - "AES-GCM 128" : "16395" - "AES-GCM 192" : "16396" - "AES-GCM 256" : "16397" - "DES" : "16398" - "3DES" : "16399" - "AES-128" : "16400" - "AES-192" : "16401" - "AES-256" : "16402" - "Transport" : "16403" - "Tunnel" : "16404" - "Responder" : "16405" - "Initiator" : "16406" - "AES-GMAC 128" : "16407" - "AES-GMAC 192" : "16408" - "AES-GMAC 256" : "16409" - "AuthNoEncap Transport" : "16416" - "Enable WMI Account" : "16896" - "Execute Method" : "16897" - "Full Write" : "16898" - "Partial Write" : "16899" - "Provider Write" : "16900" - "Remote Access" : "16901" - "Subscribe" : "16902" - "Publish" : "16903" + "..." : ["1831"] + "3DES" : ["8196","16399"] + "64-bit Integer" : ["1820"] + "" : ["14672"] + "" : ["1800"] + "" : ["1794"] + "" : ["1793"] + "ACCESS_SYS_SEC" : ["1542"] + "AES-128" : ["16400","8235"] + "AES-192" : ["8236","16401"] + "AES-256" : ["16402","8237"] + "AES-GCM 128" : ["16395"] + "AES-GCM 192" : ["16396"] + "AES-GCM 256" : ["16397"] + "AES-GMAC 128" : ["16407"] + "AES-GMAC 192" : ["16408"] + "AES-GMAC 256" : ["16409"] + "Access Clipboard" : ["6658"] + "Access global atoms" : ["6661"] + "Account Disabled" : ["2080"] + "Account Enabled" : ["2048"] + "Account Locked" : ["2090"] + "Account Lockout" : ["12546"] + "Account Logon" : ["8280"] + "Account Management" : ["8278"] + "Account Unlocked" : ["2058"] + "Account currently disabled." : ["2310"] + "Account locked out." : ["2307"] + "Account logon time restriction violation." : ["2311"] + "Active Directory Domain Services" : ["14676"] + "Active Directory Lightweight Directory Services" : ["14677"] + "Add" : ["16384"] + "Add context." : ["2491"] + "Add function property." : ["2497"] + "Add function provider." : ["2495"] + "Add function." : ["2493"] + "Add provider." : ["2489"] + "AddMember" : ["5410","5424"] + "AdjustDefaultDacl" : ["4599"] + "AdjustGroups" : ["4598"] + "AdjustPrivileges" : ["4597"] + "AdjustSessionID" : ["4600"] + "Administer Document" : ["6948"] + "Administer audit log attributes" : ["5641"] + "Administer print server" : ["6912"] + "AdministerServer" : ["5402"] + "All" : ["1797"] + "An Error occured during Logon." : ["2304"] + "Anonymous" : ["8224"] + "AppendData (or AddSubdirectory or CreatePipeInstance)" : ["4418"] + "Application Generated" : ["12806"] + "Application Group Management" : ["13828"] + "Assign Primary Token Privilege" : ["1603"] + "Assign a token to the thread" : ["4567"] + "Assign process" : ["5136"] + "AssignAsPrimary" : ["4592"] + "Audit Enumerate Users" : ["7940"] + "Audit Policy Change" : ["13568"] + "Audit Policy query/set API Operation" : ["1799"] + "Audit Query Options" : ["7942"] + "Audit Query Per User Policy" : ["7939"] + "Audit Query System Policy" : ["7937"] + "Audit Set Options" : ["7941"] + "Audit Set Per User Policy" : ["7938"] + "Audit Set System Policy" : ["7936"] + "AuthIP" : ["8223"] + "AuthNoEncap Transport" : ["16416"] + "Authentication Policy Change" : ["13569"] + "Authentication payload sent" : ["8246"] + "Authorization Policy Change" : ["13570"] + "Auto" : ["1849"] + "Backup Privilege" : ["1617"] + "Bidirectional" : ["14595"] + "Bind Redirect" : ["14617"] + "Blob" : ["1822"] + "Block" : ["16389"] + "Boolean" : ["1824"] + "Boot-time" : ["16386"] + "CGA" : ["8226"] + "Callout" : ["16391"] + "Cause thread to directly impersonate another thread" : ["4568"] + "Central Policy Staging" : ["12813"] + "Certificate" : ["8227"] + "Certificate ECDSA P256" : ["8238"] + "Certificate ECDSA P384" : ["8239"] + "Certification Services" : ["12805"] + "Change Hardware Environment Privilege" : ["1622"] + "Change Notify (and Traverse) Privilege" : ["1623"] + "Change logon capabilities assigned to account" : ["5683"] + "Change privileges assigned to account" : ["5681"] + "Change quotas assigned to account" : ["5682"] + "Change secret value" : ["5648"] + "Change system audit requirements" : ["5640"] + "Change the Posix ID offset assigned to the accounted domain" : ["5684"] + "Change the Posix ID offset assigned to the trusted domain" : ["5668"] + "Change the controllers in the trusted domain" : ["5666"] + "ChangeGroupMembership" : ["5450"] + "ChangePassword (with knowledge of old password)" : ["5446"] + "Channel query information" : ["5122"] + "Channel read message" : ["5120"] + "Channel set information" : ["5123"] + "Channel write message" : ["5121"] + "Communicate using port" : ["4464"] + "Complete" : ["8210"] + "Computer Account Management" : ["13825"] + "Connect" : ["14611"] + "Connect Redirect" : ["14616"] + "Connect to service controller" : ["7168"] + "ConnectToServer" : ["5376"] + "Control Access" : ["7688"] + "Control profile" : ["4496"] + "Create Child" : ["7680"] + "Create Key." : ["2481"] + "Create Link" : ["4437"] + "Create Pagefile Privilege" : ["1615"] + "Create Permanent Object Privilege" : ["1616"] + "Create a new service" : ["7169"] + "Create a privilege" : ["5638"] + "Create a secret object" : ["5637"] + "Create a subprocess of process" : ["4487"] + "Create desktop" : ["6659"] + "Create instance of object type" : ["4608"] + "Create menu" : ["6674"] + "Create new thread in process" : ["4481"] + "Create object in directory" : ["4370"] + "Create special accounts (for assignment of user rights)" : ["5636"] + "Create sub-directory" : ["4371"] + "Create sub-key" : ["4434"] + "Create window" : ["6673"] + "CreateDomain" : ["5379"] + "CreateGlobalGroup" : ["5397"] + "CreateLocalGroup" : ["5398"] + "CreateUser" : ["5396"] + "Credential Validation" : ["14336"] + "Credentials manager" : ["8097"] + "DDE Share Add Items" : ["7432"] + "DDE Share Advise" : ["7429"] + "DDE Share Execute" : ["7431"] + "DDE Share Initiate Link" : ["7427"] + "DDE Share Initiate Static" : ["7426"] + "DDE Share List Items" : ["7433"] + "DDE Share Poke" : ["7430"] + "DDE Share Read" : ["7424"] + "DDE Share Request" : ["7428"] + "DDE Share Write" : ["7425"] + "DELETE" : ["1537"] + "DES" : ["16398","8195"] + "DH group 1" : ["8230"] + "DH group 14" : ["8232"] + "DH group 2" : ["8231"] + "DH group 24" : ["8248"] + "DH group ECP 256" : ["8233"] + "DH group ECP 384" : ["8234"] + "DPAPI Activity" : ["13314"] + "DS Access" : ["8279"] + "Datagram Data" : ["14600"] + "Debug Privilege" : ["1620"] + "Decrypt." : ["2484"] + "Default" : ["1846"] + "Default credentials" : ["8096"] + "Delegation" : ["1840"] + "Delete" : ["16385"] + "Delete Child" : ["7681"] + "Delete Key." : ["2482"] + "Delete Tree" : ["7686"] + "Delete key file." : ["2457"] + "DeleteChild" : ["4422"] + "Denied by" : ["1802"] + "Denied by ACE on parent folder" : ["1812"] + "Denied by Empty DACL" : ["1807"] + "Denied by Integrity Policy check" : ["1803"] + "Denied by Process Trust Label ACE" : ["1841"] + "Detailed Directory Service Replication" : ["14083"] + "Detailed File Share" : ["12811"] + "Detailed Tracking" : ["8276"] + "Device Access Bit 0" : ["4352"] + "Device Access Bit 1" : ["4353"] + "Device Access Bit 2" : ["4354"] + "Device Access Bit 3" : ["4355"] + "Device Access Bit 4" : ["4356"] + "Device Access Bit 5" : ["4357"] + "Device Access Bit 6" : ["4358"] + "Device Access Bit 7" : ["4359"] + "Device Access Bit 8" : ["4360"] + "Directly impersonate this thread" : ["4569"] + "Directory Service Access" : ["14080"] + "Directory Service Changes" : ["14081"] + "Directory Service Replication" : ["14082"] + "Disabled" : ["1796"] + "DisallowMmConfig" : ["1847"] + "Distribution Group Management" : ["13827"] + "Domain settings" : ["2487"] + "Domain sid inconsistent." : ["2314"] + "Don't Expire Password' - Disabled" : ["2057"] + "Don't Expire Password' - Enabled" : ["2089"] + "Don't Require Preauth' - Disabled" : ["2064"] + "Don't Require Preauth' - Enabled" : ["2096"] + "Duplicate" : ["4593"] + "Duplicate handle into or out of process" : ["4486"] + "EAP" : ["8247"] + "EAP payload sent" : ["8245"] + "Enable 64(or 32) bit application to open 32 bit key" : ["4441"] + "Enable 64(or 32) bit application to open 64 bit key" : ["4440"] + "Enable WMI Account" : ["16896"] + "Enable/Disable LSA" : ["5642"] + "Enabled" : ["1795","8216"] + "Encrypt." : ["2483"] + "Encrypted Text Password Allowed' - Disabled" : ["2059"] + "Encrypted Text Password Allowed' - Enabled" : ["2091"] + "Endpoint Closure" : ["14615"] + "Enumerate dependencies of service" : ["7187"] + "Enumerate desktops" : ["6656"] + "Enumerate printers" : ["6913"] + "Enumerate services" : ["7170"] + "Enumerate sub-keys" : ["4435"] + "EnumerateDomains" : ["5380"] + "Exclude Authorization Information' - Disabled" : ["2067"] + "Exclude Authorization Information' - Enabled" : ["2099"] + "Execute Method" : ["16897"] + "Execute/Traverse" : ["4421"] + "Existing registry value modified" : ["1905"] + "Exit windows" : ["6662"] + "Export of persistent cryptographic key." : ["2464"] + "Extend size" : ["4516"] + "FALSE" : ["1826"] + "FQBN" : ["1821"] + "Failed to unprotect persistent cryptographic key." : ["2448"] + "Failed to zero secret data." : ["2438"] + "Failure Added" : ["8451"] + "Failure exclude added" : ["8459"] + "Failure exclude removed" : ["8458"] + "Failure include added" : ["8457"] + "Failure include removed" : ["8456"] + "Failure removed" : ["8450"] + "File Share" : ["12808"] + "File System" : ["12800"] + "Filtering Platform Connection" : ["12810"] + "Filtering Platform Packet Drop" : ["12809"] + "Filtering Platform Policy Change" : ["13572"] + "Flow Established" : ["14612"] + "Force process termination" : ["4480"] + "Force thread termination" : ["4560"] + "Forward" : ["14598","14594"] + "Fresh credentials" : ["8098"] + "Friday" : ["1925"] + "Full Control" : ["6930"] + "Full Write" : ["16898"] + "Get sensitive policy information" : ["5634"] + "Get thread context" : ["4563"] + "GetLocalGroupMembership" : ["5399"] + "Granted by" : ["1801"] + "Granted by ACE on parent folder" : ["1811"] + "Granted by Central Access Rule" : ["1813"] + "Granted by NULL DACL" : ["1806"] + "Granted by NULL Security Descriptor" : ["1808"] + "Granted by Ownership" : ["1804"] + "Granted by parent folder's Central Access Rule" : ["1815"] + "Group Membership" : ["12554"] + "Handle Manipulation" : ["12807"] + "Home Directory Required' - Disabled" : ["2049"] + "Home Directory Required' - Enabled" : ["2081"] + "Hook control" : ["6675"] + "ICMP Echo-Request" : ["14640"] + "ICMP Error" : ["14601"] + "IKE/AuthIP DoS prevention mode started" : ["8214"] + "IKE/AuthIP DoS prevention mode stopped" : ["8215"] + "IKEv1" : ["8222"] + "IKEv2" : ["8244"] + "IP Packet" : ["14596"] + "IPsec Driver" : ["12291"] + "IPsec Extended Mode" : ["12550"] + "IPsec Main Mode" : ["12547"] + "IPsec Quick Mode" : ["12549"] + "Identification" : ["1832"] + "Impersonate" : ["4594"] + "Impersonation" : ["1833"] + "Import of persistent cryptographic key." : ["2465"] + "Inbound" : ["14592"] + "Include this desktop in enumerations" : ["6678"] + "Include this windowstation in enumerations" : ["6664"] + "Increase Memory Quota Privilege" : ["1605"] + "Increment Base Priority Privilege" : ["1614"] + "InitializeServer" : ["5378"] + "Initiator" : ["8205","16406"] + "Interdomain Trust Account' - Disabled" : ["2054"] + "Interdomain Trust Account' - Enabled" : ["2086"] + "Invalid" : ["1827"] + "Issue service-specific control commands" : ["7192"] + "Journal (playback)" : ["6677"] + "Journal (record)" : ["6676"] + "Kerberos" : ["8192"] + "Kerberos Authentication Service" : ["14339"] + "Kerberos Service Ticket Operations" : ["14337"] + "Kernel Object" : ["12802"] + "Key Derivation." : ["2501"] + "Key export checks failed." : ["2449"] + "Key failed pair wise consistency check." : ["2439"] + "KeyedEvent Wait" : ["5696"] + "KeyedEvent Wake" : ["5697"] + "List Contents" : ["7682"] + "List Object" : ["7687"] + "ListAccounts" : ["5400"] + "ListGroups" : ["5448"] + "ListMembers" : ["5412","5426"] + "Listen" : ["14609"] + "Load/Unload Driver Privilege" : ["1610"] + "Local computer" : ["8199"] + "Local settings" : ["2488"] + "Lock Memory Privilege" : ["1604"] + "Lock service database for exclusive access" : ["7171"] + "Logoff" : ["12545"] + "Logon" : ["12544"] + "Logon/Logoff" : ["8273"] + "Lookup Names/SIDs" : ["5643"] + "LookupDomain" : ["5381"] + "LookupIDs" : ["5401"] + "MAC 802.3" : ["14602"] + "MAC Native" : ["14603"] + "MAX_ALLOWED" : ["1543"] + "MD5" : ["16392","8197"] + "MNS Logon Account' - Disabled" : ["2053"] + "MNS Logon Account' - Enabled" : ["2085"] + "MPSSVC Rule-Level Policy Change" : ["13571"] + "Machine key." : ["2499"] + "Map section for execute" : ["4515"] + "Map section for read" : ["4514"] + "Map section for write" : ["4513"] + "Modify State" : ["4865"] + "Modify domain trust relationships" : ["5635"] + "Modify event state" : ["4385"] + "Modify semaphore state" : ["4529"] + "Modify timer state" : ["4577"] + "Monday" : ["1921"] + "NOT Granted by Central Access Rule" : ["1814"] + "NOT Granted by parent folder's Central Access Rule" : ["1816"] + "NTLM V2" : ["8225"] + "Network Policy Server" : ["12552"] + "New registry value created" : ["1904"] + "No" : ["14679","1843"] + "No state" : ["8207","8218","8201"] + "Non Sensitive Privilege Use" : ["13057"] + "None" : ["1798","8229"] + "Normal Account' - Disabled" : ["2052"] + "Normal Account' - Enabled" : ["2084"] + "Not Available" : ["1845"] + "Not Available." : ["2432"] + "Not Delegated' - Disabled" : ["2062"] + "Not Delegated' - Enabled" : ["2094"] + "Not enabled" : ["8217"] + "Not granted" : ["1805"] + "Not granted due to missing" : ["1810"] + "Not granted to AppContainers" : ["1830"] + "Not persistent" : ["16388"] + "Not used" : ["1601"] + "Notify about changes to keys" : ["4436"] + "Object Access" : ["8274"] + "Off" : ["1848"] + "Open Key." : ["2480"] + "Open key file." : ["2456"] + "Other Account Logon Events" : ["14338"] + "Other Account Management Events" : ["13829"] + "Other Logon/Logoff Events" : ["12551"] + "Other Object Access Events" : ["12804"] + "Other Policy Change Events" : ["13573"] + "Other Privilege Use Events" : ["13058"] + "Other System Events" : ["12292"] + "Outbound" : ["14593"] + "Partial Write" : ["16899"] + "Password Expired' - Disabled" : ["2065"] + "Password Expired' - Enabled" : ["2097"] + "Password Not Required' - Disabled" : ["2050"] + "Password Not Required' - Enabled" : ["2082"] + "Pause or continue the service" : ["7190"] + "Perform virtual memory operation" : ["4483"] + "Permit" : ["16390"] + "Persistent" : ["16387"] + "Plug and Play Events" : ["13316"] + "Policy Change" : ["8277"] + "Port sharing (read)" : ["8064"] + "Port sharing (write)" : ["8065"] + "Preshared key" : ["8193"] + "Print" : ["6931"] + "Privilege Use" : ["8275"] + "Process Creation" : ["13312"] + "Process Termination" : ["13313"] + "Profile Single Process Privilege" : ["1613"] + "Profile System Privilege" : ["1611"] + "Protect Kerberos Service Tickets with AES Keys' - Disabled" : ["2069"] + "Protect Kerberos Service Tickets with AES Keys' - Enabled" : ["2101"] + "Provider Write" : ["16900"] + "Publish" : ["16903"] + "Query" : ["4595"] + "Query Attributes" : ["5138"] + "Query State" : ["4864"] + "Query account information" : ["5680"] + "Query directory" : ["4368"] + "Query event state" : ["4384"] + "Query information from service" : ["7191"] + "Query key value" : ["4432"] + "Query mutant state" : ["4448"] + "Query process information" : ["4490"] + "Query secret value" : ["5649"] + "Query section state" : ["4512"] + "Query semaphore state" : ["4528"] + "Query service configuration information" : ["7184"] + "Query service database lock state" : ["7172"] + "Query status of service" : ["7186"] + "Query the Posix ID offset assigned to the trusted domain" : ["5667"] + "Query thread information" : ["4566"] + "Query timer state" : ["4576"] + "Query trusted domain name/SID" : ["5664"] + "QuerySource" : ["4596"] + "READ_CONTROL" : ["1538"] + "REG_BINARY" : ["1875"] + "REG_DWORD" : ["1876"] + "REG_DWORD_BIG_ENDIAN" : ["1877"] + "REG_EXPAND_SZ" : ["1874"] + "REG_FULL_RESOURCE_DESCRIPTOR" : ["1881"] + "REG_LINK" : ["1878"] + "REG_MULTI_SZ (New lines are replaced with *. A * is replaced with **)" : ["1879"] + "REG_NONE" : ["1872"] + "REG_QWORD" : ["1883"] + "REG_RESOURCE_LIST" : ["1880"] + "REG_RESOURCE_REQUIREMENTS_LIST" : ["1882"] + "REG_SZ" : ["1873"] + "RPC Events" : ["13315"] + "Random number generation failed FIPS-140 pre-hash check." : ["2437"] + "Random number generator failure." : ["2436"] + "Read Objects" : ["6672"] + "Read Property" : ["7684"] + "Read attributes" : ["6657"] + "Read from process memory" : ["4484"] + "Read persisted key from file." : ["2458"] + "Read screen" : ["6665"] + "ReadAccount" : ["5444"] + "ReadAttributes" : ["4423"] + "ReadData (or ListDirectory)" : ["4416"] + "ReadEA" : ["4419"] + "ReadGeneralInformation" : ["5440"] + "ReadGroupMembership" : ["5449"] + "ReadInformation" : ["5427","5408"] + "ReadLogon" : ["5443"] + "ReadOtherParameters" : ["5394"] + "ReadPasswordParameters" : ["5392"] + "ReadPreferences" : ["5441"] + "Receive/Accept" : ["14610"] + "Registry" : ["12801"] + "Registry value deleted" : ["1906"] + "Remote Access" : ["16901"] + "Remote computer" : ["8200"] + "Remotely Shut System Down Privilege" : ["1624"] + "Removable Storage" : ["12812"] + "Remove context." : ["2492"] + "Remove function property." : ["2498"] + "Remove function provider." : ["2496"] + "Remove function." : ["2494"] + "Remove provider." : ["2490"] + "RemoveMember" : ["5425","5411"] + "Resource Assignment" : ["14608"] + "Resource Release" : ["14614"] + "Responder" : ["16405","8206"] + "Restore From Backup Privilege" : ["1618"] + "Retrieve the controllers in the trusted domain" : ["5665"] + "SAM" : ["12803"] + "SHA 256" : ["8242"] + "SHA 384" : ["8243"] + "SHA-1" : ["16393"] + "SHA-256" : ["16394"] + "SHA1" : ["8198"] + "SSL" : ["8228"] + "SSL ECDSA P256" : ["8240"] + "SSL ECDSA P384" : ["8241"] + "SYNCHRONIZE" : ["1541"] + "Saturday" : ["1926"] + "Secret agreement." : ["2486"] + "Security Group Management" : ["13826"] + "Security Privilege" : ["1608"] + "Security State Change" : ["12288"] + "Security System Extension" : ["12289"] + "Send an alert to thread" : ["4562"] + "Sensitive Privilege Use" : ["13056"] + "Sent final payload" : ["8209"] + "Sent first (EM attributes) payload" : ["8219"] + "Sent first (SA) payload" : ["8208","8202"] + "Sent second (KE) payload" : ["8203"] + "Sent second (SSPI) payload" : ["8220"] + "Sent third (ID) payload" : ["8204"] + "Sent third (hash) payload" : ["8221"] + "Server Trust Account' - Disabled" : ["2056"] + "Server Trust Account' - Enabled" : ["2088"] + "Set Attributes" : ["5137"] + "Set Security Attributes" : ["5140"] + "Set System Time Privilege" : ["1612"] + "Set default quota limits" : ["5639"] + "Set key value" : ["4433"] + "Set last-known-good state of service database" : ["7173"] + "Set process information" : ["4489"] + "Set process quotas" : ["4488"] + "Set process session ID" : ["4482"] + "Set process termination port" : ["4491"] + "Set service configuration information" : ["7185"] + "Set thread context" : ["4564"] + "Set thread information" : ["4565"] + "SetPassword (without knowledge of old password)" : ["5447"] + "Shutdown System Privilege" : ["1619"] + "ShutdownServer" : ["5377"] + "Sid" : ["1823"] + "Sign hash." : ["2485"] + "Signature verification failed." : ["2451"] + "Smartcard Required' - Disabled" : ["2060"] + "Smartcard Required' - Enabled" : ["2092"] + "Smartcard logon is required and was not used." : ["2315"] + "Special Logon" : ["12548"] + "Start the service" : ["7188"] + "Stop the service" : ["7189"] + "Stream" : ["14599"] + "Stream Packet" : ["14624"] + "String" : ["1818"] + "Subscribe" : ["16902"] + "Success Added" : ["8449"] + "Success exclude added" : ["8455"] + "Success exclude removed" : ["8454"] + "Success include added" : ["8453"] + "Success include removed" : ["8452"] + "Success removed" : ["8448"] + "Sunday" : ["1920"] + "Suspend or resume thread" : ["4561"] + "Switch to this desktop" : ["6680"] + "System" : ["1844","8272"] + "System Integrity" : ["12290"] + "TRUE" : ["1825"] + "Take Ownership Privilege" : ["1609"] + "Temp Duplicate Account' - Disabled" : ["2051"] + "Temp Duplicate Account' - Enabled" : ["2083"] + "Terminate Job" : ["5139"] + "The NetLogon component is not active." : ["2306"] + "The specified account's password has expired." : ["2309"] + "The specified user account has expired." : ["2305"] + "The user has not been granted the requested logon type at this machine." : ["2308"] + "Thursday" : ["1924"] + "Token Right Adjusted Events" : ["13317"] + "TokenElevationTypeDefault (1)" : ["1936"] + "TokenElevationTypeFull (2)" : ["1937"] + "TokenElevationTypeLimited (3)" : ["1938"] + "Transport" : ["14597","16403","8212"] + "Traverse" : ["4369"] + "Trusted Computer Base Privilege" : ["1607"] + "Trusted For Delegation' - Disabled" : ["2061"] + "Trusted For Delegation' - Enabled" : ["2093"] + "Trusted To Authenticate For Delegation' - Disabled" : ["2066"] + "Trusted To Authenticate For Delegation' - Enabled" : ["2098"] + "Tuesday" : ["1922"] + "Tunnel" : ["16404","8213"] + "Undefined Access (no effect) Bit 1" : ["4609","4545","4497","4465","4449"] + "Undefined Access (no effect) Bit 10" : ["4554","4618","4378","5418","4474","7690","5690","4442","4522","4458","4602","5658","5434","5146","5706","4426","5386","4362","4538","4570","4586","5674","4506","4394","5130"] + "Undefined Access (no effect) Bit 11" : ["4587","5435","5691","5675","4603","4379","5451","5387","5707","4619","7691","4395","4459","4427","4571","4363","4539","5403","4443","5147","4523","5131","4475","4555","4507","5419","5659"] + "Undefined Access (no effect) Bit 12" : ["5660","4364","4620","5708","4540","4428","4524","5148","5420","4508","5404","5452","4380","4460","4604","5436","4492","4396","4556","7692","5676","4588","4476","4572","4444","5132","5692","5388"] + "Undefined Access (no effect) Bit 13" : ["5149","5437","4477","5389","4525","4557","5421","4605","4541","4461","5677","5693","4509","4621","4589","4381","5405","4429","4445","4573","5661","4397","5709","4365","5453","7693","4493","5133"] + "Undefined Access (no effect) Bit 14" : ["4510","4366","4606","4462","4558","5694","4446","5710","5390","5438","4478","4398","4382","4590","5150","5454","5134","5678","7694","5662","4526","4622","5422","4574","4542","4494","4430","5406"] + "Undefined Access (no effect) Bit 15" : ["4399","5679","4447","5391","5407","5135","4559","4591","5663","5439","4511","4431","4495","5151","4607","7695","4623","4575","4543","4479","5455","4367","4383","5695","5423","5711","4527","4463"] + "Undefined Access (no effect) Bit 2" : ["4450","4498","4466","5698","4386","5650","4610","4578","4530","4546"] + "Undefined Access (no effect) Bit 3" : ["4451","5699","4579","5651","4467","4387","4547","4611","4531","4499"] + "Undefined Access (no effect) Bit 4" : ["4372","5652","5124","4468","4580","4548","4500","4452","4532","5700","4612","4388"] + "Undefined Access (no effect) Bit 5" : ["5669","5701","5653","4517","4453","4469","4501","5125","4549","4533","4581","5429","5685","4373","5413","4389","4613","5141"] + "Undefined Access (no effect) Bit 6" : ["5654","4534","4502","4390","5414","5382","4550","4582","4518","4614","4438","4454","4374","5126","4470","5430","5702","5670","5686","5142"] + "Undefined Access (no effect) Bit 7" : ["4519","4455","5143","4375","5703","4471","5383","5415","4391","5687","5431","5655","4551","5127","4503","4439","5671","279","4535","4615"] + "Undefined Access (no effect) Bit 8" : ["5144","4376","5656","4552","4472","4504","4456","5128","4392","4616","4536","4584","4520","5432","5384","5672","5416","5704","5688"] + "Undefined Access (no effect) Bit 9" : ["5433","5145","4361","4457","4601","4537","4585","4393","4521","5657","5673","4553","7689","5385","4425","4505","4377","5689","5417","5705","4617","5129","4473"] + "Undefined UserAccountControl Bit 20' - Disabled" : ["2068"] + "Undefined UserAccountControl Bit 20' - Enabled" : ["2100"] + "Undefined UserAccountControl Bit 22' - Disabled" : ["2070"] + "Undefined UserAccountControl Bit 22' - Enabled" : ["2102"] + "Undefined UserAccountControl Bit 23' - Disabled" : ["2071"] + "Undefined UserAccountControl Bit 23' - Enabled" : ["2103"] + "Undefined UserAccountControl Bit 24' - Disabled" : ["2072"] + "Undefined UserAccountControl Bit 24' - Enabled" : ["2104"] + "Undefined UserAccountControl Bit 25' - Disabled" : ["2073"] + "Undefined UserAccountControl Bit 25' - Enabled" : ["2105"] + "Undefined UserAccountControl Bit 26' - Disabled" : ["2074"] + "Undefined UserAccountControl Bit 26' - Enabled" : ["2106"] + "Undefined UserAccountControl Bit 27' - Disabled" : ["2075"] + "Undefined UserAccountControl Bit 27' - Enabled" : ["2107"] + "Undefined UserAccountControl Bit 28' - Disabled" : ["2076"] + "Undefined UserAccountControl Bit 28' - Enabled" : ["2108"] + "Undefined UserAccountControl Bit 29' - Disabled" : ["2077"] + "Undefined UserAccountControl Bit 29' - Enabled" : ["2109"] + "Undefined UserAccountControl Bit 30' - Disabled" : ["2078"] + "Undefined UserAccountControl Bit 30' - Enabled" : ["2110"] + "Undefined UserAccountControl Bit 31' - Disabled" : ["2079"] + "Undefined UserAccountControl Bit 31' - Enabled" : ["2111"] + "Unknown" : ["8211"] + "Unknown Type" : ["1817"] + "Unknown authentication" : ["8194"] + "Unknown or unchecked" : ["1809"] + "Unknown specific access (bit 0)" : ["1552"] + "Unknown specific access (bit 1)" : ["1553"] + "Unknown specific access (bit 10)" : ["1562"] + "Unknown specific access (bit 11)" : ["1563"] + "Unknown specific access (bit 12)" : ["1564"] + "Unknown specific access (bit 13)" : ["1565"] + "Unknown specific access (bit 14)" : ["1566"] + "Unknown specific access (bit 15)" : ["1567"] + "Unknown specific access (bit 2)" : ["1554"] + "Unknown specific access (bit 3)" : ["1555"] + "Unknown specific access (bit 4)" : ["1556"] + "Unknown specific access (bit 5)" : ["1557"] + "Unknown specific access (bit 6)" : ["1558"] + "Unknown specific access (bit 7)" : ["1559"] + "Unknown specific access (bit 8)" : ["1560"] + "Unknown specific access (bit 9)" : ["1561"] + "Unknown user name or bad password." : ["2313"] + "Unsigned 64-bit Integer" : ["1819"] + "Unsolicited Input Privilege" : ["1606"] + "Unused Access Flag" : ["6663"] + "Unused message ID" : ["1536"] + "Use DES Key Only' - Disabled" : ["2063"] + "Use DES Key Only' - Enabled" : ["2095"] + "Use symbolic link" : ["4544"] + "User / Device Claims" : ["12553"] + "User Account Management" : ["13824"] + "User key." : ["2500"] + "User not allowed to logon at this computer." : ["2312"] + "Validation of public key failed." : ["2450"] + "Value Added" : ["14674"] + "Value Added With Expiration Time" : ["14680"] + "Value Auto Deleted With Expiration Time" : ["14688"] + "Value Deleted" : ["14675"] + "Value Deleted With Expiration Time" : ["14681"] + "View non-sensitive policy information" : ["5632"] + "View or Change Audit Log Privilege" : ["1621"] + "View system audit requirements" : ["5633"] + "WRITE_DAC" : ["1539"] + "WRITE_OWNER" : ["1540"] + "Wednesday" : ["1923"] + "Workstation Trust Account' - Disabled" : ["2055"] + "Workstation Trust Account' - Enabled" : ["2087"] + "Write Property" : ["7685"] + "Write Self" : ["7683"] + "Write attributes" : ["6660"] + "Write objects" : ["6679"] + "Write persisted key to file." : ["2459"] + "Write to process memory" : ["4485"] + "WriteAccount" : ["5409","5445","5428"] + "WriteAttributes" : ["4424"] + "WriteData (or AddFile)" : ["4417"] + "WriteEA" : ["4420"] + "WriteOtherParameters" : ["5395"] + "WritePasswordParameters" : ["5393"] + "WritePreferences" : ["5442"] + "Yes" : ["1842","14678"] + "[NULL]" : ["14673"] + "a Security Descriptor too long to display" : ["1829"] + "an ACE too long to display" : ["1828"] + "vSwitch" : ["14604"] + "vSwitch Egress" : ["14642"] + "vSwitch Ingress" : ["14641"] AccessMaskDescriptions: "0x00000001": Create Child "0x00000002": Delete Child @@ -3132,26 +2832,25 @@ processors: f.add(s.substring(last)); return f; } - - if (ctx?.winlog?.event_data?.FailureReason != null) { + if (ctx.winlog?.event_data?.FailureReason != null) { def code = ctx.winlog.event_data.FailureReason.replace("%%",""); def desc = params.descriptions[code]; if (desc == null) { desc = code; } if (desc != null) { - if (ctx?.winlog?.logon == null ) { + if (ctx.winlog?.logon == null ) { HashMap hm = new HashMap(); ctx.winlog.put("logon", hm); } - if (ctx?.winlog?.logon?.failure == null) { + if (ctx.winlog?.logon?.failure == null) { HashMap hm = new HashMap(); ctx.winlog.logon.put("failure", hm); } ctx.winlog.logon.failure.put("reason", desc); } } - if (ctx?.winlog?.event_data?.AuditPolicyChanges != null) { + if (ctx.winlog?.event_data?.AuditPolicyChanges != null) { ArrayList results = new ArrayList(); for (elem in ctx.winlog.event_data.AuditPolicyChanges.splitOnToken(",")) { def code = elem.replace("%%","").trim(); @@ -3165,21 +2864,40 @@ processors: ctx.winlog.event_data.put("AuditPolicyChangesDescription", results); } } - if (ctx?.winlog?.event_data?.AccessList != null) { + if (ctx.winlog?.event_data?.AccessList != null) { + ArrayList codes = new ArrayList(); ArrayList results = new ArrayList(); for (elem in split(ctx.winlog.event_data.AccessList)) { def code = elem.replace("%%","").trim(); + if (code != "") { + codes.add(code); + } if (params.descriptions.containsKey(code)) { results.add(params.descriptions[code]); } else { results.add(code); } } + if (codes.length > 0) { + ctx.winlog.event_data.AccessList = codes; + } if (results.length > 0) { ctx.winlog.event_data.put("AccessListDescription", results); } } - if (ctx?.winlog?.event_data?.AccessMask != null) { + if (ctx.winlog?.event_data?.Direction != null) { + def code = ctx.winlog.event_data.Direction.replace("%%","").trim(); + if (params.descriptions.containsKey(code)) { + ctx.winlog.event_data.put("DirectionDescription", params.descriptions[code]); + } + } + if (ctx.winlog?.event_data?.LayerName != null) { + def code = ctx.winlog.event_data.LayerName.replace("%%","").trim(); + if (params.descriptions.containsKey(code)) { + ctx.winlog.event_data.put("LayerNameDescription", params.descriptions[code]); + } + } + if (ctx.winlog?.event_data?.AccessMask != null) { ArrayList list = new ArrayList(); long accessMask; for (elem in split(ctx.winlog.event_data.AccessMask)) { @@ -3191,7 +2909,8 @@ processors: list.add(params.descriptions[code]); } else { list.add(code); - code = params.reversed_descriptions[code]; + if (params.reversed_descriptions.containsKey(code)) + code = params.reversed_descriptions[code][0]; } try { def longCode = Long.decode(code).longValue(); @@ -3251,30 +2970,30 @@ processors: "0xc0000371": "The local account store does not contain secret material for the specified account" "0x0": "Status OK." source: |- - if (ctx?.winlog?.event_data?.Status == null || - ctx?.event?.code == null || + if (ctx.winlog?.event_data?.Status == null || + ctx.event?.code == null || !["4625", "4776"].contains(ctx.event.code)) { return; } if (params.containsKey(ctx.winlog.event_data.Status)) { - if (ctx?.winlog?.logon == null ) { + if (ctx.winlog?.logon == null ) { HashMap hm = new HashMap(); ctx.winlog.put("logon", hm); } - if (ctx?.winlog?.logon?.failure == null) { + if (ctx.winlog?.logon?.failure == null) { HashMap hm = new HashMap(); ctx.winlog.logon.put("failure", hm); } ctx.winlog.logon.failure.put("status", params[ctx.winlog.event_data.Status]); } - if (ctx?.winlog?.event_data?.SubStatus == null || !params.containsKey(ctx.winlog.event_data.SubStatus)) { + if (ctx.winlog?.event_data?.SubStatus == null || !params.containsKey(ctx.winlog.event_data.SubStatus)) { return; } - if (ctx?.winlog?.logon == null ) { + if (ctx.winlog?.logon == null ) { HashMap hm = new HashMap(); ctx.winlog.put("logon", hm); } - if (ctx?.winlog?.logon?.failure == null) { + if (ctx.winlog?.logon?.failure == null) { HashMap hm = new HashMap(); ctx.winlog.logon.put("failure", hm); } @@ -3292,7 +3011,7 @@ processors: "3": "TRUST_TYPE_MIT" "4": "TRUST_TYPE_DCE" source: |- - if (ctx?.winlog?.event_data?.TdoType == null) { + if (ctx.winlog?.event_data?.TdoType == null) { return; } if (!params.containsKey(ctx.winlog.event_data.TdoType)) { @@ -3312,7 +3031,7 @@ processors: "2": "TRUST_DIRECTION_OUTBOUND" "3": "TRUST_DIRECTION_BIDIRECTIONAL" source: |- - if (ctx?.winlog?.event_data?.TdoDirection == null) { + if (ctx.winlog?.event_data?.TdoDirection == null) { return; } if (!params.containsKey(ctx.winlog.event_data.TdoDirection)) { @@ -3339,7 +3058,7 @@ processors: "512": "TRUST_ATTRIBUTE_CROSS_ORGANIZATION_NO_TGT_DELEGATION" "1024": "TRUST_ATTRIBUTE_PIM_TRUST" source: |- - if (ctx?.winlog?.event_data?.TdoAttributes == null) { + if (ctx.winlog?.event_data?.TdoAttributes == null) { return; } if (!params.containsKey(ctx.winlog.event_data.TdoAttributes)) { @@ -3352,21 +3071,21 @@ processors: tag: Add Session Events description: Add Session Events source: |- - if (ctx?.event?.code == null || + if (ctx.event?.code == null || !["4778", "4779"].contains(ctx.event.code)) { return; } //AccountName to user.name and related.user - if (ctx?.winlog?.event_data?.AccountName != null) { - if (ctx?.user == null) { + if (ctx.winlog?.event_data?.AccountName != null) { + if (ctx.user == null) { HashMap hm = new HashMap(); ctx.put("user", hm); } - if (ctx?.related == null) { + if (ctx.related == null) { HashMap hm = new HashMap(); ctx.put("related", hm); } - if (ctx?.related?.user == null) { + if (ctx.related?.user == null) { ArrayList al = new ArrayList(); ctx.related.put("user", al); } @@ -3377,8 +3096,8 @@ processors: } //AccountDomain to user.domain - if (ctx?.winlog?.event_data?.AccountDomain != null) { - if (ctx?.user == null) { + if (ctx.winlog?.event_data?.AccountDomain != null) { + if (ctx.user == null) { HashMap hm = new HashMap(); ctx.put("user", hm); } @@ -3386,22 +3105,22 @@ processors: } //ClientAddress to source.ip and related.ip - if (ctx?.winlog?.event_data?.ClientAddress != null && + if (ctx.winlog?.event_data?.ClientAddress != null && ctx.winlog.event_data.ClientAddress != "-" && ctx.winlog.event_data.ClientAddress != "Unknown") { // Correct invalid IP address "LOCAL" if (ctx?.winlog?.event_data?.ClientAddress == "LOCAL") { ctx.winlog.event_data.ClientAddress="127.0.0.1"; } - if (ctx?.source == null) { + if (ctx.source == null) { HashMap hm = new HashMap(); ctx.put("source", hm); } - if (ctx?.related == null) { + if (ctx.related == null) { HashMap hm = new HashMap(); ctx.put("related", hm); } - if (ctx?.related?.ip == null) { + if (ctx.related?.ip == null) { ArrayList al = new ArrayList(); ctx.related.put("ip", al); } @@ -3412,8 +3131,8 @@ processors: } //ClientName to source.domain - if (ctx?.winlog?.event_data?.ClientName != null) { - if (ctx?.source == null) { + if (ctx.winlog?.event_data?.ClientName != null) { + if (ctx.source == null) { HashMap hm = new HashMap(); ctx.put("source", hm); } @@ -3421,8 +3140,8 @@ processors: } //LogonID to winlog.logon.id - if (ctx?.winlog?.event_data?.LogonID != null) { - if (ctx?.winlog?.logon == null) { + if (ctx.winlog?.event_data?.LogonID != null) { + if (ctx.winlog?.logon == null) { HashMap hm = new HashMap(); ctx.winlog.put("logon", hm); } @@ -3435,27 +3154,27 @@ processors: tag: Copy Target User description: Copy Target User source: |- - if (ctx?.event?.code == null || + if (ctx.event?.code == null || !["4624", "4625", "4634", "4647", "4648", "4768", "4769", "4770", "4771", "4776", "4964"].contains(ctx.event.code)) { return; } - def targetUserId = ctx?.winlog?.event_data?.TargetUserSid; + def targetUserId = ctx.winlog?.event_data?.TargetUserSid; if (targetUserId == null) { - targetUserId = ctx?.winlog?.event_data?.TargetSid; + targetUserId = ctx.winlog?.event_data?.TargetSid; } //TargetUserSid to user.id or user.target.id if (targetUserId != null) { - if (ctx?.user == null) { + if (ctx.user == null) { HashMap hm = new HashMap(); ctx.put("user", hm); } - if (ctx?.user?.id == null) { + if (ctx.user?.id == null) { ctx.user.put("id", targetUserId); } else { - if (ctx?.user?.target == null) { + if (ctx.user?.target == null) { HashMap hm = new HashMap(); ctx.user.put("target", hm); } @@ -3464,26 +3183,26 @@ processors: } //TargetUserName to related.user and user.name or user.target.name - if (ctx?.winlog?.event_data?.TargetUserName != null) { + if (ctx.winlog?.event_data?.TargetUserName != null) { def tun = ctx.winlog.event_data.TargetUserName.splitOnToken("@"); - if (ctx?.user == null) { + if (ctx.user == null) { HashMap hm = new HashMap(); ctx.put("user", hm); } - if (ctx?.user?.name == null) { + if (ctx.user?.name == null) { ctx.user.put("name", tun[0]); } else { - if (ctx?.user?.target == null) { + if (ctx.user?.target == null) { HashMap hm = new HashMap(); ctx.user.put("target", hm); } ctx.user.target.put("name", tun[0]); } - if (ctx?.related == null) { + if (ctx.related == null) { HashMap hm = new HashMap(); ctx.put("related", hm); } - if (ctx?.related?.user == null) { + if (ctx.related?.user == null) { ArrayList al = new ArrayList(); ctx.related.put("user", al); } @@ -3492,50 +3211,58 @@ processors: } } //TargetUserDomain to user.domain or user.target.domain - if (ctx?.winlog?.event_data?.TargetDomainName != null) { - if (ctx?.user == null) { + if (ctx.winlog?.event_data?.TargetDomainName != null) { + if (ctx.user == null) { HashMap hm = new HashMap(); ctx.put("user", hm); } - if (ctx?.user?.domain == null) { + if (ctx.user?.domain == null) { ctx.user.put("domain", ctx.winlog.event_data.TargetDomainName); } else { - if (ctx?.user?.target == null){ + if (ctx.user?.target == null){ HashMap hm = new HashMap(); ctx.user.put("target", hm); } ctx.user.target.put("domain", ctx.winlog.event_data.TargetDomainName); } } +# split member name into parts based on comma ignoring escaped commas +# https://learn.microsoft.com/en-us/previous-versions/windows/desktop/ldap/distinguished-names + - split: + if: ctx.winlog?.event_data?.MemberName != null + field: winlog.event_data.MemberName + target_field: _temp.MemberNameParts + separator: "(? 1) || - (ctx?.process?.parent?.executable != null && ctx.process.parent.executable.length() > 1) + (ctx.process?.executable != null && ctx.process.executable.length() > 1) || + (ctx.process?.parent?.executable != null && ctx.process.parent.executable.length() > 1) source: |- def getProcessName(def path) { def idx = path.lastIndexOf("\\"); @@ -506,16 +568,16 @@ processors: return ""; } - def cmd = ctx?.process?.executable; - if (cmd != null && cmd != "" && ctx?.process?.name == null) { + def cmd = ctx.process?.executable; + if (cmd != null && cmd != "" && ctx.process?.name == null) { def name = getProcessName(cmd); if (name != "") { ctx.process.name = name; } } - def parentCmd = ctx?.process?.parent?.executable; - if (parentCmd != null && parentCmd != "" && ctx?.process?.parent?.name == null) { + def parentCmd = ctx.process?.parent?.executable; + if (parentCmd != null && parentCmd != "" && ctx.process?.parent?.name == null) { def name = getProcessName(parentCmd); if (name != "") { ctx.process.parent.name = name; @@ -528,8 +590,8 @@ processors: field: _temp.hashes target_field: file.hash if: |- - ctx?._temp?.hashes != null && - ["6", "7", "15"].contains(ctx.event.code) + ctx._temp?.hashes != null && + ["6", "7", "15", "26", "29"].contains(ctx.event.code) - rename: field: file.hash.imphash target_field: file.pe.imphash @@ -540,21 +602,25 @@ processors: target_field: file.path ignore_missing: true ignore_failure: true + if: ctx.winlog?.event_data?.TargetFilename != null && ctx.winlog?.event_data?.TargetFilename != "" - rename: field: winlog.event_data.Device target_field: file.path ignore_missing: true ignore_failure: true + if: ctx.winlog?.event_data?.Device != null && ctx.winlog?.event_data?.Device != "" - rename: field: winlog.event_data.PipeName target_field: file.name ignore_missing: true ignore_failure: true + if: ctx.winlog?.event_data?.PipeName != null && ctx.winlog?.event_data?.PipeName != "" - rename: field: winlog.event_data.ImageLoaded target_field: file.path ignore_missing: true ignore_failure: true + if: ctx.winlog?.event_data?.ImageLoaded != null && ctx.winlog?.event_data?.ImageLoaded != "" - set: field: file.code_signature.subject_name copy_from: winlog.event_data.Signature @@ -570,7 +636,7 @@ processors: target_field: file.pe.original_file_name ignore_missing: true ignore_failure: true - if: ctx.event.code == "7" + if: ctx.event.code == "7" && ctx.winlog?.event_data?.OriginalFileName != null && ctx.winlog?.event_data?.OriginalFileName != "" - set: field: file.pe.company copy_from: winlog.event_data.Company @@ -598,21 +664,21 @@ processors: - set: field: file.code_signature.signed value: true - if: ctx?.winlog?.event_data?.Signed == true + if: ctx.winlog?.event_data?.Signed != null && ctx.winlog.event_data.Signed == true - set: field: file.code_signature.valid value: true - if: ctx?.winlog?.event_data?.SignatureStatus == "Valid" + if: ctx.winlog?.event_data?.SignatureStatus != null && ctx.winlog?.event_data?.SignatureStatus == "Valid" - script: description: Adds file information. lang: painless - if: ctx?.file?.path != null && ctx.file.path.length() > 1 + if: ctx.file?.path != null && ctx.file.path.length() > 1 source: |- def path = ctx.file.path; def idx = path.lastIndexOf("\\"); if (idx > -1) { - if (ctx?.file == null) { + if (ctx.file == null) { ctx.file = new HashMap(); } ctx.file.name = path.substring(idx+1); @@ -631,18 +697,19 @@ processors: target_field: network.transport ignore_missing: true ignore_failure: true + if: ctx.winlog?.event_data?.Protocol != null && ctx.winlog?.event_data?.Protocol != "" - rename: field: winlog.event_data.DestinationPortName target_field: network.protocol ignore_missing: true ignore_failure: true - if: ctx.event.code != "22" + if: ctx.event.code != "22" && ctx.winlog?.event_data?.DestinationPortName != null && ctx.winlog?.event_data?.DestinationPortName != "" - rename: field: winlog.event_data.SourcePortName target_field: network.protocol ignore_missing: true ignore_failure: true - if: ctx.event.code != "22" + if: ctx.event.code != "22" && ctx.winlog?.event_data?.SourcePortName != null && ctx.winlog?.event_data?.SourcePortName != "" - set: field: network.protocol value: dns @@ -653,61 +720,68 @@ processors: type: ip ignore_failure: true ignore_missing: true + if: ctx.winlog?.event_data?.SourceIp != null && ctx.winlog?.event_data?.SourceIp != "" - rename: field: winlog.event_data.SourceHostname target_field: source.domain ignore_missing: true ignore_failure: true + if: ctx.winlog?.event_data?.SourceHostname != null && ctx.winlog?.event_data?.SourceHostname != "" - convert: field: winlog.event_data.SourcePort target_field: source.port type: long ignore_failure: true ignore_missing: true + if: ctx.winlog?.event_data?.SourcePort != null && ctx.winlog?.event_data?.SourcePort != "" - convert: field: winlog.event_data.DestinationIp target_field: destination.ip type: ip ignore_failure: true ignore_missing: true + if: ctx.winlog?.event_data?.DestinationIp != null && ctx.winlog?.event_data?.DestinationIp != "" - rename: field: winlog.event_data.DestinationHostname target_field: destination.domain ignore_missing: true ignore_failure: true + if: ctx.winlog?.event_data?.DestinationHostname != null && ctx.winlog?.event_data?.DestinationHostname != "" - convert: field: winlog.event_data.DestinationPort target_field: destination.port type: long ignore_failure: true ignore_missing: true + if: ctx.winlog?.event_data?.DestinationPort != null && ctx.winlog?.event_data?.DestinationPort != "" - rename: field: winlog.event_data.QueryName target_field: dns.question.name ignore_missing: true ignore_failure: true + if: ctx.winlog?.event_data?.QueryName != null && ctx.winlog?.event_data?.QueryName != "" - set: field: network.direction value: egress - if: ctx?.winlog?.event_data?.Initiated == "true" + if: ctx.winlog?.event_data?.Initiated != null && ctx.winlog?.event_data?.Initiated == "true" - set: field: network.direction value: ingress - if: ctx?.winlog?.event_data?.Initiated == "false" + if: ctx.winlog?.event_data?.Initiated != null && ctx.winlog?.event_data?.Initiated == "false" - set: field: network.type value: ipv4 - if: ctx?.winlog?.event_data?.SourceIsIpv6 == "false" + if: ctx.winlog?.event_data?.SourceIsIpv6 != null && ctx.winlog?.event_data?.SourceIsIpv6 == "false" - set: field: network.type value: ipv6 - if: ctx?.winlog?.event_data?.SourceIsIpv6 == "true" + if: ctx.winlog?.event_data?.SourceIsIpv6 != null && ctx.winlog?.event_data?.SourceIsIpv6 == "true" - script: description: | Splits the QueryResults field that contains the DNS responses. Example: "type: 5 f2.taboola.map.fastly.net;::ffff:151.101.66.2;::ffff:151.101.130.2;::ffff:151.101.194.2;::ffff:151.101.2.2;" lang: painless - if: ctx?.winlog?.event_data?.QueryResults != null + if: ctx.winlog?.event_data?.QueryResults != null && ctx.winlog?.event_data?.QueryResults != "" params: "1": "A" "2": "NS" @@ -797,7 +871,6 @@ processors: ]); } } else { - answer = answer.replace("::ffff:", ""); ips.add(answer); } } @@ -809,11 +882,29 @@ processors: ctx.dns.resolved_ip = ips; } if (relatedHosts.length > 0) { - if (ctx?.related == null) { + if (ctx.related == null) { ctx.related = new HashMap(); } ctx.related.hosts = relatedHosts; } + - foreach: + field: dns.answers + if: ctx.dns?.answers instanceof List + ignore_failure: true + processor: + gsub: + field: _ingest._value + pattern: '^\[?::ffff:([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(?:\](?::[0-9]+)?)?$' + replacement: '$1' + - foreach: + field: dns.resolved_ip + if: ctx.dns?.resolved_ip instanceof List + ignore_failure: true + processor: + gsub: + field: _ingest._value + pattern: '^\[?::ffff:([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)(?:\](?::[0-9]+)?)?$' + replacement: '$1' - foreach: field: dns.resolved_ip ignore_missing: true @@ -827,7 +918,7 @@ processors: - script: description: Convert V4MAPPED addresses. lang: painless - if: ctx?.dns?.resolved_ip != null + if: ctx.dns?.resolved_ip != null source: |- if (ctx.dns.answers == null) { ctx.dns.answers = new ArrayList(); @@ -858,7 +949,7 @@ processors: field: related.hosts value: "{{dns.question.name}}" allow_duplicates: false - if: ctx?.dns?.question?.name != null && ctx?.dns?.question?.name != "" + if: ctx.dns?.question?.name != null && ctx.dns?.question?.name != "" - remove: description: Remove dns.question.domain because it is not part of ECS and is redundant with dns.question.name. field: dns.question.domain @@ -888,19 +979,42 @@ processors: field: winlog.event_data.User target_field: "_temp.user_parts" separator: '\\' - if: ctx?.winlog?.event_data?.User != null + if: ctx.winlog?.event_data?.User != null - set: field: user.domain value: "{{_temp.user_parts.0}}" ignore_failure: true ignore_empty_value: true - if: ctx?._temp?.user_parts != null && ctx._temp.user_parts.size() == 2 + if: ctx._temp?.user_parts != null && ctx._temp.user_parts.size() == 2 - set: field: user.name value: "{{_temp.user_parts.1}}" ignore_failure: true ignore_empty_value: true - if: ctx?._temp?.user_parts != null && ctx._temp.user_parts.size() == 2 + if: ctx._temp?.user_parts != null && ctx._temp.user_parts.size() == 2 + # Get user details from the translate_sid processor enrichment + # if they are available and we don't already have them. + - rename: + field: winlog.event_data._MemberUserName + target_field: user.name + ignore_failure: true + ignore_missing: true + - rename: + field: winlog.event_data._MemberDomain + target_field: user.domain + ignore_failure: true + ignore_missing: true + - append: + value: '{{{winlog.event_data._MemberAccountType}}}' + field: user.roles + ignore_failure: true + allow_duplicates: false + if: ctx.winlog?.event_data?._MemberAccountType != null + - remove: + field: winlog.event_data._MemberAccountType + ignore_missing: true + ignore_failure: true + if: ctx.user?.roles != null && ctx.winlog?.event_data?._MemberAccountType != null && ctx.user.roles.contains(ctx.winlog.event_data._MemberAccountType) ## Sysmon fields @@ -909,6 +1023,7 @@ processors: target_field: sysmon.dns.status ignore_missing: true ignore_failure: true + if: ctx.winlog?.event_data?.QueryStatus != null && ctx.winlog?.event_data?.QueryStatus != "" - script: description: Translate DNS Query status. lang: painless @@ -1110,7 +1225,7 @@ processors: "10054": "WSAECONNRESET" "10055": "WSAENOBUFS" "10060": "WSAETIMEDOUT" - if: ctx?.sysmon?.dns?.status != null && ctx?.sysmon?.dns?.status != "" + if: ctx.sysmon?.dns?.status != null && ctx.sysmon?.dns?.status != "" source: |- def status = params[ctx.sysmon.dns.status]; if (status != null) { @@ -1122,12 +1237,18 @@ processors: type: boolean ignore_missing: true ignore_failure: true + if: ctx.winlog?.event_data?.Archived != null && ctx.winlog?.event_data?.Archived != "" - convert: field: winlog.event_data.IsExecutable target_field: sysmon.file.is_executable type: boolean ignore_missing: true ignore_failure: true + if: ctx.winlog?.event_data?.IsExecutable != null && ctx.winlog?.event_data?.IsExecutable != "" + - convert: + field: error.code + type: string + ignore_missing: true ## Related fields @@ -1136,19 +1257,19 @@ processors: value: "{{user.name}}" ignore_failure: true allow_duplicates: false - if: ctx?.user?.name != null && ctx.user.name != "" + if: ctx.user?.name != null && ctx.user.name != "" - append: field: related.ip value: "{{source.ip}}" ignore_failure: true allow_duplicates: false - if: ctx?.source?.ip != null && ctx.source.ip != "" + if: ctx.source?.ip != null && ctx.source.ip != "" - append: field: related.ip value: "{{destination.ip}}" ignore_failure: true allow_duplicates: false - if: ctx?.destination?.ip != null && ctx.destination.ip != "" + if: ctx.destination?.ip != null && ctx.destination.ip != "" ## Registry fields @@ -1156,7 +1277,8 @@ processors: description: Set registry fields. lang: painless if: |- - ctx?.winlog?.event_data?.TargetObject != null && ["12", "13", "14"].contains(ctx.event.code) + ctx.winlog?.event_data?.TargetObject != null && ctx.winlog?.event_data?.TargetObject != "" && + ["12", "13", "14"].contains(ctx.event.code) params: HKEY_CLASSES_ROOT: "HKCR" HKCR: "HKCR" @@ -1193,7 +1315,7 @@ processors: def value = pathTokens[pathTokens.length - 1]; ctx.registry.value = value; - def data = ctx?.winlog?.event_data?.Details; + def data = ctx.winlog?.event_data?.Details; if (data != null && data != "") { def prefixLen = 2; // to remove 0x prefix def dataValue = ""; @@ -1244,6 +1366,13 @@ processors: ]; } + +## Conformity + - rename: + field: winlog.event_data.TargetProcessGuid + target_field: winlog.event_data.TargetProcessGUID + if: ctx.winlog?.event_data?.TargetProcessGuid != null + ## Cleanup - remove: @@ -1277,15 +1406,22 @@ processors: - winlog.level ignore_failure: true ignore_missing: true + - script: + description: Remove all empty values from event_data. + lang: painless + source: ctx.winlog?.event_data?.entrySet().removeIf(entry -> entry.getValue() == null || entry.getValue().equals("") || entry.getValue().equals("-")); + if: ctx.winlog?.event_data != null - remove: description: Remove empty event data. field: winlog.event_data ignore_missing: true ignore_failure: true - if: ctx?.winlog?.event_data != null && ctx.winlog.event_data.size() == 0 + if: ctx.winlog?.event_data != null && ctx.winlog.event_data.size() == 0 on_failure: - set: - field: "error.message" - value: |- - Processor "{{ _ingest.on_failure_processor_type }}" with tag "{{ _ingest.on_failure_processor_tag }}" in pipeline "{{ _ingest.on_failure_pipeline }}" failed with message "{{ _ingest.on_failure_message }}" + field: event.kind + value: pipeline_error + - append: + field: error.message + value: "{{{ _ingest.on_failure_message }}}" From 923b7e1412b2434e50feea3bc2c7ce82372d0876 Mon Sep 17 00:00:00 2001 From: Julien Lind Date: Wed, 19 Feb 2025 11:03:08 +0100 Subject: [PATCH 02/41] updarte the ownership to comply with the current O11y org (#42764) --- .github/CODEOWNERS | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index feccb80bf282..1c12144fdffd 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -76,7 +76,7 @@ CHANGELOG* /licenses/ @elastic/elastic-agent-data-plane /metricbeat/ @elastic/elastic-agent-data-plane /metricbeat/docs/ # Listed without an owner to avoid maintaining doc ownership for each input and module. -/metricbeat/helper/kubernetes @elastic/obs-cloudnative-monitoring +/metricbeat/helper/kubernetes @elastic/obs-ds-hosted-services /metricbeat/module/aerospike @elastic/obs-infraobs-integrations /metricbeat/module/apache @elastic/obs-infraobs-integrations /metricbeat/module/beat/ @elastic/stack-monitoring @@ -91,15 +91,15 @@ CHANGELOG* /metricbeat/module/jolokia @elastic/obs-infraobs-integrations /metricbeat/module/kafka @elastic/obs-infraobs-integrations /metricbeat/module/kibana/ @elastic/stack-monitoring -/metricbeat/module/kubernetes/ @elastic/obs-cloudnative-monitoring +/metricbeat/module/kubernetes/ @elastic/obs-ds-hosted-services /metricbeat/module/logstash/ @elastic/stack-monitoring /metricbeat/module/memcached @elastic/obs-infraobs-integrations /metricbeat/module/mongodb @elastic/obs-infraobs-integrations /metricbeat/module/mysql @elastic/obs-infraobs-integrations -/metricbeat/module/nats/ @elastic/obs-cloudnative-monitoring +/metricbeat/module/nats/ @elastic/obs-ds-hosted-services /metricbeat/module/nginx @elastic/obs-infraobs-integrations /metricbeat/module/php_fpm @elastic/obs-infraobs-integrations -/metricbeat/module/prometheus/ @elastic/obs-cloudnative-monitoring +/metricbeat/module/prometheus/ @elastic/obs-ds-hosted-services /metricbeat/module/postgresql @elastic/obs-infraobs-integrations /metricbeat/module/rabbitmq @elastic/obs-infraobs-integrations /metricbeat/module/redis @elastic/obs-infraobs-integrations @@ -201,7 +201,7 @@ CHANGELOG* /x-pack/metricbeat/module/azure/billing @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/cloudfoundry @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/cockroachdb @elastic/obs-infraobs-integrations -/x-pack/metricbeat/module/containerd/ @elastic/obs-cloudnative-monitoring +/x-pack/metricbeat/module/containerd/ @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/coredns @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/enterprisesearch @elastic/app-search-team /x-pack/metricbeat/module/gcp @elastic/obs-ds-hosted-services @elastic/obs-infraobs-integrations @@ -222,16 +222,16 @@ CHANGELOG* /x-pack/metricbeat/module/gcp/storage @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/ibmmq @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/iis @elastic/obs-infraobs-integrations -/x-pack/metricbeat/module/istio/ @elastic/obs-cloudnative-monitoring +/x-pack/metricbeat/module/istio/ @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/mssql @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/openai @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/oracle @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/panw @elastic/obs-infraobs-integrations -/x-pack/metricbeat/module/prometheus/ @elastic/obs-cloudnative-monitoring +/x-pack/metricbeat/module/prometheus/ @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/redisenterprise @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/sql @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/statsd @elastic/obs-infraobs-integrations -/x-pack/metricbeat/module/stan/ @elastic/obs-cloudnative-monitoring +/x-pack/metricbeat/module/stan/ @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/tomcat @elastic/obs-infraobs-integrations /x-pack/osquerybeat/ @elastic/sec-deployment-and-devices /x-pack/packetbeat/ @elastic/sec-linux-platform From 08793313337047379aaae17b4064b56a524734df Mon Sep 17 00:00:00 2001 From: Gabriel Pop <94497545+gpop63@users.noreply.github.com> Date: Wed, 19 Feb 2025 13:31:39 +0200 Subject: [PATCH 03/41] add meraki codeowner entry (#42776) --- .github/CODEOWNERS | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 1c12144fdffd..896f67f8f98e 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -224,6 +224,7 @@ CHANGELOG* /x-pack/metricbeat/module/iis @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/istio/ @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/mssql @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/meraki @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/openai @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/oracle @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/panw @elastic/obs-infraobs-integrations From c61c0fe44c387bb4559febd02be8d178f4d12ffa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20=C5=9Awi=C4=85tek?= Date: Wed, 19 Feb 2025 14:07:03 +0100 Subject: [PATCH 04/41] Refactor kubernetes bearer token authentication (#42714) Instead of doing retries on 401 errors, use a mechanism from client-go which simply reloads the token periodically in the background. Also, don't stop logging errors after the first 401. These errors, if present, need to be addressed by the cluster operator, so we should make them more prominent. --- CHANGELOG.next.asciidoc | 1 + metricbeat/helper/http.go | 54 +++--------- metricbeat/helper/http_test.go | 88 +++++-------------- .../module/kubernetes/apiserver/metricset.go | 47 +++------- .../controllermanager/controllermanager.go | 49 +++-------- metricbeat/module/kubernetes/kubernetes.go | 18 ---- .../module/kubernetes/scheduler/scheduler.go | 50 +++-------- 7 files changed, 67 insertions(+), 240 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 134ba65ff8fc..21a6b6f294ba 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -480,6 +480,7 @@ otherwise no tag is added. {issue}42208[42208] {pull}42403[42403] - Collect more fields from ES node/stats metrics and only those that are necessary {pull}42421[42421] - Add new metricset wmi for the windows module. {pull}42017[42017] - Update beat module with apm-server tail sampling monitoring metrics fields {pull}42569[42569] +- Log every 401 response from Kubernetes API Server {pull}42714[42714] *Metricbeat* - Add benchmark module {pull}41801[41801] diff --git a/metricbeat/helper/http.go b/metricbeat/helper/http.go index d90213d4f1f8..e6b82bca285b 100644 --- a/metricbeat/helper/http.go +++ b/metricbeat/helper/http.go @@ -20,15 +20,17 @@ package helper import ( "bufio" "bytes" + "context" "encoding/json" "fmt" "io" "net/http" - "os" "github.com/elastic/elastic-agent-libs/transport/httpcommon" "github.com/elastic/elastic-agent-libs/useragent" + "k8s.io/client-go/transport" + "github.com/elastic/beats/v7/libbeat/version" "github.com/elastic/beats/v7/metricbeat/helper/dialer" "github.com/elastic/beats/v7/metricbeat/mb" @@ -69,14 +71,6 @@ func NewHTTPFromConfig(config Config, hostData mb.HostData) (*HTTP, error) { headers.Set(k, v) } - if config.BearerTokenFile != "" { - header, err := getAuthHeaderFromToken(config.BearerTokenFile) - if err != nil { - return nil, err - } - headers.Set("Authorization", header) - } - // Ensure backward compatibility builder := hostData.Transport if builder == nil { @@ -97,6 +91,15 @@ func NewHTTPFromConfig(config Config, hostData mb.HostData) (*HTTP, error) { return nil, err } + // Apply the token refreshing roundtripper. We can't do this in a transport option because we need to handle the + // error it can return at creation + if config.BearerTokenFile != "" { + client.Transport, err = transport.NewBearerAuthWithRefreshRoundTripper("", config.BearerTokenFile, client.Transport) + } + if err != nil { + return nil, err + } + return &HTTP{ hostData: hostData, bearerFile: config.BearerTokenFile, @@ -118,7 +121,7 @@ func (h *HTTP) FetchResponse() (*http.Response, error) { reader = bytes.NewReader(h.body) } - req, err := http.NewRequest(h.method, h.uri, reader) + req, err := http.NewRequestWithContext(context.Background(), h.method, h.uri, reader) // TODO: get context from caller if err != nil { return nil, fmt.Errorf("failed to create HTTP request: %w", err) } @@ -212,34 +215,3 @@ func (h *HTTP) FetchJSON() (map[string]interface{}, error) { return data, nil } - -func (h *HTTP) RefreshAuthorizationHeader() (bool, error) { - if h.bearerFile != "" { - header, err := getAuthHeaderFromToken(h.bearerFile) - if err != nil { - return false, err - } - h.headers.Set("Authorization", header) - return true, nil - } - return false, nil -} - -// getAuthHeaderFromToken reads a bearer authorization token from the given file -func getAuthHeaderFromToken(path string) (string, error) { - var token string - - b, err := os.ReadFile(path) - if err != nil { - return "", fmt.Errorf("reading bearer token file: %w", err) - } - - if len(b) != 0 { - if b[len(b)-1] == '\n' { - b = b[0 : len(b)-1] - } - token = fmt.Sprintf("Bearer %s", string(b)) - } - - return token, nil -} diff --git a/metricbeat/helper/http_test.go b/metricbeat/helper/http_test.go index 3666dc564aeb..c494244ad523 100644 --- a/metricbeat/helper/http_test.go +++ b/metricbeat/helper/http_test.go @@ -38,51 +38,6 @@ import ( "github.com/elastic/beats/v7/metricbeat/mb/parse" ) -func TestGetAuthHeaderFromToken(t *testing.T) { - tests := []struct { - Name, Content, Expected string - }{ - { - "Test a token is read", - "testtoken", - "Bearer testtoken", - }, - { - "Test a token is trimmed", - "testtoken\n", - "Bearer testtoken", - }, - } - - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { - content := []byte(test.Content) - tmpfile, err := os.CreateTemp("", "token") - if err != nil { - t.Fatal(err) - } - defer os.Remove(tmpfile.Name()) - - if _, err := tmpfile.Write(content); err != nil { - t.Fatal(err) - } - if err := tmpfile.Close(); err != nil { - t.Fatal(err) - } - - header, err := getAuthHeaderFromToken(tmpfile.Name()) - assert.NoError(t, err) - assert.Equal(t, test.Expected, header) - }) - } -} - -func TestGetAuthHeaderFromTokenNoFile(t *testing.T) { - header, err := getAuthHeaderFromToken("nonexistingfile") - assert.Equal(t, "", header) - assert.Error(t, err) -} - func TestTimeout(t *testing.T) { c := make(chan struct{}) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -301,35 +256,34 @@ func TestRefreshAuthorizationHeader(t *testing.T) { bearerFileName := "token" bearerFilePath := filepath.Join(path, bearerFileName) - getAuth := func(helper *HTTP) string { - for k, v := range helper.headers { - if k == "Authorization" { - return v[0] - } - } - return "" - } + var authToken string + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + authToken = r.Header.Get("Authorization") + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() firstToken := "token-1" err := os.WriteFile(bearerFilePath, []byte(firstToken), 0644) assert.NoError(t, err) - helper := &HTTP{bearerFile: bearerFilePath, headers: make(http.Header)} - updated, err := helper.RefreshAuthorizationHeader() - assert.NoError(t, err) - assert.True(t, updated) - expected := fmt.Sprintf("Bearer %s", firstToken) - assert.Equal(t, expected, getAuth(helper)) + cfg := defaultConfig() + cfg.BearerTokenFile = bearerFilePath + hostData := mb.HostData{ + URI: ts.URL, + SanitizedURI: ts.URL, + } - secondToken := "token-2" - err = os.WriteFile(bearerFilePath, []byte(secondToken), 0644) - assert.NoError(t, err) + h, err := NewHTTPFromConfig(cfg, hostData) + require.NoError(t, err) - updated, err = helper.RefreshAuthorizationHeader() - assert.NoError(t, err) - assert.True(t, updated) - expected = fmt.Sprintf("Bearer %s", secondToken) - assert.Equal(t, expected, getAuth(helper)) + res, err := h.FetchResponse() + require.NoError(t, err) + res.Body.Close() + + assert.Equal(t, http.StatusOK, res.StatusCode) + assert.Contains(t, authToken, firstToken) } func checkTimeout(t *testing.T, h *HTTP) { diff --git a/metricbeat/module/kubernetes/apiserver/metricset.go b/metricbeat/module/kubernetes/apiserver/metricset.go index 5457093e5536..9191db36dbc6 100644 --- a/metricbeat/module/kubernetes/apiserver/metricset.go +++ b/metricbeat/module/kubernetes/apiserver/metricset.go @@ -19,11 +19,7 @@ package apiserver import ( "fmt" - "net/http" - "strings" - "time" - "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/helper/prometheus" "github.com/elastic/beats/v7/metricbeat/mb" k8smod "github.com/elastic/beats/v7/metricbeat/module/kubernetes" @@ -34,7 +30,6 @@ import ( // Metricset for apiserver is a prometheus based metricset type Metricset struct { mb.BaseMetricSet - http *helper.HTTP prometheusClient prometheus.Prometheus prometheusMappings *prometheus.MetricsMapping clusterMeta mapstr.M @@ -54,13 +49,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, fmt.Errorf("must be child of kubernetes module") } - http, err := pc.GetHttp() - if err != nil { - return nil, fmt.Errorf("the http connection is not valid") - } ms := &Metricset{ BaseMetricSet: base, - http: http, prometheusClient: pc, prometheusMappings: mapping, clusterMeta: util.AddClusterECSMeta(base), @@ -73,36 +63,19 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch gathers information from the apiserver and reports events with this information. func (m *Metricset) Fetch(reporter mb.ReporterV2) error { events, err := m.prometheusClient.GetProcessedMetrics(m.prometheusMappings) - errorString := fmt.Sprintf("%s", err) - errorUnauthorisedMsg := fmt.Sprintf("unexpected status code %d", http.StatusUnauthorized) - if err != nil && strings.Contains(errorString, errorUnauthorisedMsg) { - count := 2 // We retry twice to refresh the Authorisation token in case of http.StatusUnauthorize = 401 Error - for count > 0 { - if _, errAuth := m.http.RefreshAuthorizationHeader(); errAuth == nil { - events, err = m.prometheusClient.GetProcessedMetrics(m.prometheusMappings) - } - if err != nil { - time.Sleep(m.mod.Config().Period) - count-- - } else { - break - } - } - } // We need to check for err again in case error is not 401 or RefreshAuthorizationHeader has failed if err != nil { return fmt.Errorf("error getting metrics: %w", err) - } else { - for _, e := range events { - event := mb.TransformMapStrToEvent("kubernetes", e, nil) - if len(m.clusterMeta) != 0 { - event.RootFields.DeepUpdate(m.clusterMeta) - } - isOpen := reporter.Event(event) - if !isOpen { - return nil - } + } + for _, e := range events { + event := mb.TransformMapStrToEvent("kubernetes", e, nil) + if len(m.clusterMeta) != 0 { + event.RootFields.DeepUpdate(m.clusterMeta) + } + isOpen := reporter.Event(event) + if !isOpen { + return nil } - return nil } + return nil } diff --git a/metricbeat/module/kubernetes/controllermanager/controllermanager.go b/metricbeat/module/kubernetes/controllermanager/controllermanager.go index 6c7b1c8ae528..3b875e0081a3 100644 --- a/metricbeat/module/kubernetes/controllermanager/controllermanager.go +++ b/metricbeat/module/kubernetes/controllermanager/controllermanager.go @@ -19,11 +19,7 @@ package controllermanager import ( "fmt" - "net/http" - "strings" - "time" - "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/helper/prometheus" "github.com/elastic/beats/v7/metricbeat/mb" k8smod "github.com/elastic/beats/v7/metricbeat/module/kubernetes" @@ -79,7 +75,6 @@ func init() { // MetricSet implements the mb.PushMetricSet interface, and therefore does not rely on polling. type MetricSet struct { mb.BaseMetricSet - http *helper.HTTP prometheusClient prometheus.Prometheus prometheusMappings *prometheus.MetricsMapping clusterMeta mapstr.M @@ -100,13 +95,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, fmt.Errorf("must be child of kubernetes module") } - http, err := pc.GetHttp() - if err != nil { - return nil, fmt.Errorf("the http connection is not valid") - } ms := &MetricSet{ BaseMetricSet: base, - http: http, prometheusClient: pc, prometheusMappings: mapping, clusterMeta: util.AddClusterECSMeta(base), @@ -118,37 +108,20 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch gathers information from the apiserver and reports events with this information. func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { events, err := m.prometheusClient.GetProcessedMetrics(m.prometheusMappings) - errorString := fmt.Sprintf("%s", err) - errorUnauthorisedMsg := fmt.Sprintf("unexpected status code %d", http.StatusUnauthorized) - if err != nil && strings.Contains(errorString, errorUnauthorisedMsg) { - count := 2 // We retry twice to refresh the Authorisation token in case of http.StatusUnauthorize = 401 Error - for count > 0 { - if _, errAuth := m.http.RefreshAuthorizationHeader(); errAuth == nil { - events, err = m.prometheusClient.GetProcessedMetrics(m.prometheusMappings) - } - if err != nil { - time.Sleep(m.mod.Config().Period) - count-- - } else { - break - } - } - } - // We need to check for err again in case error is not 401 or RefreshAuthorizationHeader has failed if err != nil { return fmt.Errorf("error getting metrics: %w", err) - } else { - for _, e := range events { - event := mb.TransformMapStrToEvent("kubernetes", e, nil) - if len(m.clusterMeta) != 0 { - event.RootFields.DeepUpdate(m.clusterMeta) - } - isOpen := reporter.Event(event) - if !isOpen { - return nil - } + } + + for _, e := range events { + event := mb.TransformMapStrToEvent("kubernetes", e, nil) + if len(m.clusterMeta) != 0 { + event.RootFields.DeepUpdate(m.clusterMeta) + } + isOpen := reporter.Event(event) + if !isOpen { + return nil } - return nil } + return nil } diff --git a/metricbeat/module/kubernetes/kubernetes.go b/metricbeat/module/kubernetes/kubernetes.go index 238b8ec21d46..5725ad817e17 100644 --- a/metricbeat/module/kubernetes/kubernetes.go +++ b/metricbeat/module/kubernetes/kubernetes.go @@ -19,8 +19,6 @@ package kubernetes import ( "fmt" - httpnet "net/http" - "strings" "sync" "time" @@ -149,27 +147,11 @@ func (m *module) GetKubeletStats(http *helper.HTTP) ([]byte, error) { // (https://github.com/elastic/beats/pull/25640#discussion_r633395213) statsCache := m.kubeletStatsCache.getCacheMapEntry(m.cacheHash) - // Check if the last time we tried to make a request to the Kubelet API ended in a 401 Unauthorized error. - // If this is the case, we should not keep making requests. - errorUnauthorisedMsg := fmt.Sprintf("HTTP error %d", httpnet.StatusUnauthorized) - if statsCache.lastFetchErr != nil && strings.Contains(statsCache.lastFetchErr.Error(), errorUnauthorisedMsg) { - return statsCache.sharedStats, statsCache.lastFetchErr - } - // If this is the first request, or it has passed more time than config.period, we should // make a request to the Kubelet API again to get the last metrics' values. if statsCache.lastFetchTimestamp.IsZero() || now.Sub(statsCache.lastFetchTimestamp) > m.Config().Period { statsCache.sharedStats, statsCache.lastFetchErr = http.FetchContent() - // If we got an unauthorized error from our HTTP request, it is possible the token has expired. - // We should update the Authorization header in that case. We only try this for the first time - // we get HTTP 401 to avoid getting in a loop in case the cause of the error is something different. - if statsCache.lastFetchErr != nil && strings.Contains(statsCache.lastFetchErr.Error(), errorUnauthorisedMsg) { - if _, err := http.RefreshAuthorizationHeader(); err == nil { - statsCache.sharedStats, statsCache.lastFetchErr = http.FetchContent() - } - } - statsCache.lastFetchTimestamp = now } diff --git a/metricbeat/module/kubernetes/scheduler/scheduler.go b/metricbeat/module/kubernetes/scheduler/scheduler.go index 1b563ad000af..29dbba1e9a35 100644 --- a/metricbeat/module/kubernetes/scheduler/scheduler.go +++ b/metricbeat/module/kubernetes/scheduler/scheduler.go @@ -19,11 +19,7 @@ package scheduler import ( "fmt" - "net/http" - "strings" - "time" - "github.com/elastic/beats/v7/metricbeat/helper" "github.com/elastic/beats/v7/metricbeat/helper/prometheus" "github.com/elastic/beats/v7/metricbeat/mb" k8smod "github.com/elastic/beats/v7/metricbeat/module/kubernetes" @@ -83,7 +79,6 @@ func init() { // MetricSet implements the mb.PushMetricSet interface, and therefore does not rely on polling. type MetricSet struct { mb.BaseMetricSet - http *helper.HTTP prometheusClient prometheus.Prometheus prometheusMappings *prometheus.MetricsMapping clusterMeta mapstr.M @@ -104,13 +99,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, fmt.Errorf("must be child of kubernetes module") } - http, err := pc.GetHttp() - if err != nil { - return nil, fmt.Errorf("the http connection is not valid") - } ms := &MetricSet{ BaseMetricSet: base, - http: http, prometheusClient: pc, prometheusMappings: mapping, clusterMeta: util.AddClusterECSMeta(base), @@ -122,37 +112,19 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch gathers information from the apiserver and reports events with this information. func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { events, err := m.prometheusClient.GetProcessedMetrics(m.prometheusMappings) - errorString := fmt.Sprintf("%s", err) - errorUnauthorisedMsg := fmt.Sprintf("unexpected status code %d", http.StatusUnauthorized) - if err != nil && strings.Contains(errorString, errorUnauthorisedMsg) { - count := 2 // We retry twice to refresh the Authorisation token in case of http.StatusUnauthorize = 401 Error - for count > 0 { - if _, errAuth := m.http.RefreshAuthorizationHeader(); errAuth == nil { - events, err = m.prometheusClient.GetProcessedMetrics(m.prometheusMappings) - } - if err != nil { - time.Sleep(m.mod.Config().Period) - count-- - } else { - break - } - } - } - // We need to check for err again in case error is not 401 or RefreshAuthorizationHeader has failed if err != nil { return fmt.Errorf("error getting metrics: %w", err) - } else { - for _, e := range events { - event := mb.TransformMapStrToEvent("kubernetes", e, nil) - if len(m.clusterMeta) != 0 { - event.RootFields.DeepUpdate(m.clusterMeta) - } - isOpen := reporter.Event(event) - if !isOpen { - return nil - } + } + for _, e := range events { + event := mb.TransformMapStrToEvent("kubernetes", e, nil) + if len(m.clusterMeta) != 0 { + event.RootFields.DeepUpdate(m.clusterMeta) + } + isOpen := reporter.Event(event) + if !isOpen { + return nil } - - return nil } + + return nil } From 2200a1640a21a639086a4fd4623c71127159355d Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Wed, 19 Feb 2025 14:38:31 +0100 Subject: [PATCH 05/41] skip flaky test (#42780) skip flaky test: TestFilebeat/Filebeat_crashes_due_to_incorrect_config --- filebeat/testing/integration/sample_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/filebeat/testing/integration/sample_test.go b/filebeat/testing/integration/sample_test.go index 01330be7c40e..cd8093231f60 100644 --- a/filebeat/testing/integration/sample_test.go +++ b/filebeat/testing/integration/sample_test.go @@ -113,6 +113,8 @@ output.console: }) t.Run("Filebeat crashes due to incorrect config", func(t *testing.T) { + t.Skip("Flaky test: https://github.com/elastic/beats/issues/42778") + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() From d05a0706e4e1321ad8b18c8aada433e696afd500 Mon Sep 17 00:00:00 2001 From: jonathan molinatto Date: Wed, 19 Feb 2025 09:07:52 -0500 Subject: [PATCH 06/41] [Netflow] Migrate from log to the logp logging library The rest of beats is using github.com/elastic/elastic-agent-libs/logp, so make Netflow use it, too. Cleaned up the test code as well --- .../input/netflow/decoder/config/config.go | 37 ++++++++----------- .../filebeat/input/netflow/decoder/decoder.go | 6 ++- .../decoder/examples/go-netflow-example.go | 25 +++++++------ .../input/netflow/decoder/ipfix/ipfix.go | 4 +- .../input/netflow/decoder/ipfix/ipfix_test.go | 16 +++++--- .../netflow/decoder/protocol/registry_test.go | 12 ++++-- .../input/netflow/decoder/test/helper.go | 5 --- .../filebeat/input/netflow/decoder/v1/v1.go | 14 ++++--- .../input/netflow/decoder/v1/v1_test.go | 11 ++++-- .../filebeat/input/netflow/decoder/v5/v5.go | 7 ++-- .../input/netflow/decoder/v5/v5_test.go | 12 ++++-- .../filebeat/input/netflow/decoder/v6/v6.go | 8 ++-- .../input/netflow/decoder/v6/v6_test.go | 12 ++++-- .../filebeat/input/netflow/decoder/v7/v7.go | 7 ++-- .../input/netflow/decoder/v7/v7_test.go | 8 ++-- .../filebeat/input/netflow/decoder/v8/v8.go | 14 ++++--- .../input/netflow/decoder/v8/v8_test.go | 12 ++++-- .../input/netflow/decoder/v9/decoder.go | 13 ++++--- .../input/netflow/decoder/v9/session.go | 17 +++++---- .../input/netflow/decoder/v9/session_test.go | 19 ++++++---- .../filebeat/input/netflow/decoder/v9/v9.go | 27 ++++++++------ .../input/netflow/decoder/v9/v9_test.go | 34 ++++++++++------- x-pack/filebeat/input/netflow/input.go | 23 +----------- x-pack/filebeat/input/netflow/netflow_test.go | 14 ++++--- 24 files changed, 193 insertions(+), 164 deletions(-) diff --git a/x-pack/filebeat/input/netflow/decoder/config/config.go b/x-pack/filebeat/input/netflow/decoder/config/config.go index 5297f3c4a31e..8b51336596ea 100644 --- a/x-pack/filebeat/input/netflow/decoder/config/config.go +++ b/x-pack/filebeat/input/netflow/decoder/config/config.go @@ -5,10 +5,10 @@ package config import ( - "io" "time" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/fields" + "github.com/elastic/elastic-agent-libs/logp" ) type ActiveSessionsMetric interface { @@ -19,7 +19,7 @@ type ActiveSessionsMetric interface { // Config stores the configuration used by the NetFlow Collector. type Config struct { protocols []string - logOutput io.Writer + logOutput *logp.Logger expiration time.Duration detectReset bool fields fields.FieldDict @@ -28,21 +28,22 @@ type Config struct { activeSessionsMetric ActiveSessionsMetric } -var defaultCfg = Config{ - protocols: []string{}, - logOutput: io.Discard, - expiration: time.Hour, - detectReset: true, - sharedTemplates: false, - withCache: false, -} - // Defaults returns a configuration object with defaults settings: // - no protocols are enabled. -// - log output is discarded +// - log output is set to the logger that is passed in. // - session expiration is checked once every hour. -func Defaults() Config { - return defaultCfg +// - resets are detected. +// - templates are not shared. +// - cache is disabled. +func Defaults(logger *logp.Logger) Config { + return Config{ + protocols: []string{}, + logOutput: logger, + expiration: time.Hour, + detectReset: true, + sharedTemplates: false, + withCache: false, + } } // WithProtocols modifies an existing configuration object to enable the @@ -52,12 +53,6 @@ func (c *Config) WithProtocols(protos ...string) *Config { return c } -// WithLogOutput sets the output io.Writer for logging. -func (c *Config) WithLogOutput(output io.Writer) *Config { - c.logOutput = output - return c -} - // WithExpiration configures the expiration timeout for sessions and templates. // A value of zero disables expiration. func (c *Config) WithExpiration(timeout time.Duration) *Config { @@ -121,7 +116,7 @@ func (c *Config) Protocols() []string { } // LogOutput returns the io.Writer where logs are to be written. -func (c *Config) LogOutput() io.Writer { +func (c *Config) LogOutput() *logp.Logger { return c.logOutput } diff --git a/x-pack/filebeat/input/netflow/decoder/decoder.go b/x-pack/filebeat/input/netflow/decoder/decoder.go index 0e1ef3df92cc..37b00e5b3565 100644 --- a/x-pack/filebeat/input/netflow/decoder/decoder.go +++ b/x-pack/filebeat/input/netflow/decoder/decoder.go @@ -14,6 +14,8 @@ import ( "net" "sync" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/protocol" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" @@ -93,8 +95,8 @@ func (p *Decoder) Read(buf *bytes.Buffer, source net.Addr) (records []record.Rec } // NewConfig returns a new configuration structure to be passed to NewDecoder. -func NewConfig() *config.Config { - cfg := config.Defaults() +func NewConfig(logger *logp.Logger) *config.Config { + cfg := config.Defaults(logger) return &cfg } diff --git a/x-pack/filebeat/input/netflow/decoder/examples/go-netflow-example.go b/x-pack/filebeat/input/netflow/decoder/examples/go-netflow-example.go index c86e97e5d460..2d88eb0bebfc 100644 --- a/x-pack/filebeat/input/netflow/decoder/examples/go-netflow-example.go +++ b/x-pack/filebeat/input/netflow/decoder/examples/go-netflow-example.go @@ -8,43 +8,44 @@ import ( "bytes" "encoding/json" "fmt" - "log" "net" - "os" + + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder" ) func main() { - decoder, err := decoder.NewDecoder(decoder.NewConfig(). - WithLogOutput(os.Stderr). + logger := logp.L().Named("netflow") + + decoder, err := decoder.NewDecoder(decoder.NewConfig(logger). WithProtocols("v1", "v5", "v9", "ipfix")) if err != nil { - log.Fatal("Failed creating decoder:", err) + logger.Fatal("Failed creating decoder:", err) } addr, err := net.ResolveUDPAddr("udp", ":2055") if err != nil { - log.Fatal("Failed to resolve address:", err) + logger.Fatal("Failed to resolve address:", err) } server, err := net.ListenUDP("udp", addr) if err != nil { - log.Fatalf("Failed to listen on %v: %v", addr, err) + logger.Fatalf("Failed to listen on %v: %v", addr, err) } defer server.Close() if err = server.SetReadBuffer(1 << 16); err != nil { - log.Fatalf("Failed to set read buffer size for socket: %v", err) + logger.Fatalf("Failed to set read buffer size for socket: %v", err) } - log.Println("Listening on ", server.LocalAddr()) + logger.Debug("Listening on ", server.LocalAddr()) buf := make([]byte, 8192) decBuf := new(bytes.Buffer) for { size, remote, err := server.ReadFromUDP(buf) if err != nil { - log.Println("Error reading from socket:", err) + logger.Debug("Error reading from socket:", err) continue } @@ -52,7 +53,7 @@ func main() { decBuf.Write(buf[:size]) records, err := decoder.Read(decBuf, remote) if err != nil { - log.Printf("warn: Failed reading records from %v: %v\n", remote, err) + logger.Debugf("warn: Failed reading records from %v: %v\n", remote, err) } for _, r := range records { @@ -63,7 +64,7 @@ func main() { "data": r.Fields, }) if err != nil { - log.Fatal(err) + logger.Fatal(err) } fmt.Println(string(evt)) } diff --git a/x-pack/filebeat/input/netflow/decoder/ipfix/ipfix.go b/x-pack/filebeat/input/netflow/decoder/ipfix/ipfix.go index b8799c2d3919..c3f3b0669b81 100644 --- a/x-pack/filebeat/input/netflow/decoder/ipfix/ipfix.go +++ b/x-pack/filebeat/input/netflow/decoder/ipfix/ipfix.go @@ -5,8 +5,6 @@ package ipfix import ( - "log" - "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/protocol" v9 "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/v9" @@ -29,7 +27,7 @@ func init() { } func New(config config.Config) protocol.Protocol { - logger := log.New(config.LogOutput(), LogPrefix, 0) + logger := config.LogOutput().Named(LogPrefix) decoder := DecoderIPFIX{ DecoderV9: v9.DecoderV9{Logger: logger, Fields: config.Fields()}, } diff --git a/x-pack/filebeat/input/netflow/decoder/ipfix/ipfix_test.go b/x-pack/filebeat/input/netflow/decoder/ipfix/ipfix_test.go index afed80638c1e..d4b0445d419b 100644 --- a/x-pack/filebeat/input/netflow/decoder/ipfix/ipfix_test.go +++ b/x-pack/filebeat/input/netflow/decoder/ipfix/ipfix_test.go @@ -12,6 +12,8 @@ import ( "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/fields" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" @@ -19,6 +21,10 @@ import ( v9 "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/v9" ) +func init() { + logp.TestingSetup() +} + func TestMessageWithOptions(t *testing.T) { rawString := "" + "000a01e45bf435e1000000a500000000000200480400001000080004000c0004" + @@ -67,7 +73,7 @@ func TestMessageWithOptions(t *testing.T) { "version": uint64(10), }, } - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) flows, err := proto.OnPacket(bytes.NewBuffer(raw), test.MakeAddress(t, "127.0.0.1:1234")) assert.NoError(t, err) if assert.Len(t, flows, 7) { @@ -84,7 +90,7 @@ func TestOptionTemplates(t *testing.T) { key := v9.MakeSessionKey(addr, 1234, false) t.Run("Single options template", func(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) flows, err := proto.OnPacket(test.MakePacket([]uint16{ // Header // Version, Length, Ts, SeqNo, Source @@ -113,7 +119,7 @@ func TestOptionTemplates(t *testing.T) { }) t.Run("Multiple options template", func(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) raw := test.MakePacket([]uint16{ // Header // Version, Count, Ts, SeqNo, Source @@ -151,7 +157,7 @@ func TestOptionTemplates(t *testing.T) { }) t.Run("records discarded", func(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) raw := test.MakePacket([]uint16{ // Header // Version, Count, Ts, SeqNo, Source @@ -193,7 +199,7 @@ func TestOptionTemplates(t *testing.T) { func TestCustomFields(t *testing.T) { addr := test.MakeAddress(t, "127.0.0.1:12345") - conf := config.Defaults() + conf := config.Defaults(logp.L()) conf.WithCustomFields(fields.FieldDict{ fields.Key{EnterpriseID: 0x12345678, FieldID: 33}: &fields.Field{Name: "customField", Decoder: fields.String}, }) diff --git a/x-pack/filebeat/input/netflow/decoder/protocol/registry_test.go b/x-pack/filebeat/input/netflow/decoder/protocol/registry_test.go index b78fe875486f..8b2eb75f63c6 100644 --- a/x-pack/filebeat/input/netflow/decoder/protocol/registry_test.go +++ b/x-pack/filebeat/input/netflow/decoder/protocol/registry_test.go @@ -11,10 +11,16 @@ import ( "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" ) +func init() { + logp.TestingSetup() +} + type testProto int func (testProto) Version() uint16 { @@ -61,7 +67,7 @@ func TestRegistry_Get(t *testing.T) { assert.NoError(t, err) gen, err := registry.Get("my_proto") assert.NoError(t, err) - assert.Equal(t, testProto(0), gen(config.Defaults())) + assert.Equal(t, testProto(0), gen(config.Defaults(logp.L()))) }) t.Run("two protocols", func(t *testing.T) { registry := ProtocolRegistry{} @@ -71,10 +77,10 @@ func TestRegistry_Get(t *testing.T) { assert.NoError(t, err) gen, err := registry.Get("my_proto") assert.NoError(t, err) - assert.Equal(t, testProto(1), gen(config.Defaults())) + assert.Equal(t, testProto(1), gen(config.Defaults(logp.L()))) gen, err = registry.Get("other_proto") assert.NoError(t, err) - assert.Equal(t, testProto(2), gen(config.Defaults())) + assert.Equal(t, testProto(2), gen(config.Defaults(logp.L()))) }) t.Run("not registered", func(t *testing.T) { registry := ProtocolRegistry{} diff --git a/x-pack/filebeat/input/netflow/decoder/test/helper.go b/x-pack/filebeat/input/netflow/decoder/test/helper.go index f62d03fa87ad..6041d2e6c80a 100644 --- a/x-pack/filebeat/input/netflow/decoder/test/helper.go +++ b/x-pack/filebeat/input/netflow/decoder/test/helper.go @@ -20,11 +20,6 @@ type TestLogWriter struct { testing.TB } -func (t TestLogWriter) Write(buf []byte) (int, error) { - t.Log(string(buf)) - return len(buf), nil -} - func MakeAddress(t testing.TB, ipPortPair string) net.Addr { ip, portS, err := net.SplitHostPort(ipPortPair) if err != nil { diff --git a/x-pack/filebeat/input/netflow/decoder/v1/v1.go b/x-pack/filebeat/input/netflow/decoder/v1/v1.go index e023341c4ad4..e7ccc6659ba3 100644 --- a/x-pack/filebeat/input/netflow/decoder/v1/v1.go +++ b/x-pack/filebeat/input/netflow/decoder/v1/v1.go @@ -9,7 +9,6 @@ import ( "encoding/binary" "fmt" "io" - "log" "net" "time" @@ -18,6 +17,7 @@ import ( "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/protocol" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/template" + "github.com/elastic/elastic-agent-libs/logp" ) const ( @@ -52,21 +52,23 @@ var templateV1 = template.Template{ type ReadHeaderFn func(*bytes.Buffer, net.Addr) (int, time.Time, record.Map, error) type NetflowProtocol struct { - logger *log.Logger + logger *logp.Logger flowTemplate *template.Template version uint16 readHeader ReadHeaderFn } func init() { - protocol.Registry.Register(ProtocolName, New) + if err := protocol.Registry.Register(ProtocolName, New); err != nil { + panic(err) + } } func New(config config.Config) protocol.Protocol { - return NewProtocol(ProtocolID, &templateV1, readV1Header, log.New(config.LogOutput(), LogPrefix, 0)) + return NewProtocol(ProtocolID, &templateV1, readV1Header, config.LogOutput().Named(LogPrefix)) } -func NewProtocol(version uint16, template *template.Template, readHeader ReadHeaderFn, logger *log.Logger) protocol.Protocol { +func NewProtocol(version uint16, template *template.Template, readHeader ReadHeaderFn, logger *logp.Logger) protocol.Protocol { return &NetflowProtocol{ logger: logger, flowTemplate: template, @@ -90,7 +92,7 @@ func (NetflowProtocol) Stop() error { func (p *NetflowProtocol) OnPacket(buf *bytes.Buffer, source net.Addr) (flows []record.Record, err error) { numFlows, timestamp, metadata, err := p.readHeader(buf, source) if err != nil { - p.logger.Printf("Failed parsing packet: %v", err) + p.logger.Debugf("Failed parsing packet: %v", err) return nil, fmt.Errorf("error reading netflow header: %w", err) } flows, err = p.flowTemplate.Apply(buf, numFlows) diff --git a/x-pack/filebeat/input/netflow/decoder/v1/v1_test.go b/x-pack/filebeat/input/netflow/decoder/v1/v1_test.go index 8887298c06d0..e88a168fbf56 100644 --- a/x-pack/filebeat/input/netflow/decoder/v1/v1_test.go +++ b/x-pack/filebeat/input/netflow/decoder/v1/v1_test.go @@ -17,10 +17,15 @@ import ( "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" template2 "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/template" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/test" + "github.com/elastic/elastic-agent-libs/logp" ) +func init() { + logp.TestingSetup() +} + func TestNetflowProtocol_New(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) assert.Nil(t, proto.Start()) assert.Equal(t, uint16(1), proto.Version()) @@ -28,7 +33,7 @@ func TestNetflowProtocol_New(t *testing.T) { } func TestNetflowProtocol_OnPacket(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) rawS := "00010002000000015bf689f605946fb0" + "acd910e5c0a8017b00000000000000000000000e00002cfa" + @@ -105,7 +110,7 @@ func TestNetflowProtocol_OnPacket(t *testing.T) { } func TestNetflowProtocol_BadPacket(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) rawS := "00010002000000015bf689f605" raw, err := hex.DecodeString(rawS) diff --git a/x-pack/filebeat/input/netflow/decoder/v5/v5.go b/x-pack/filebeat/input/netflow/decoder/v5/v5.go index 74d4adbb70e0..d96091c8ca97 100644 --- a/x-pack/filebeat/input/netflow/decoder/v5/v5.go +++ b/x-pack/filebeat/input/netflow/decoder/v5/v5.go @@ -8,7 +8,6 @@ import ( "bytes" "encoding/binary" "io" - "log" "net" "time" @@ -54,11 +53,13 @@ var templateV5 = template.Template{ } func init() { - protocol.Registry.Register(ProtocolName, New) + if err := protocol.Registry.Register(ProtocolName, New); err != nil { + panic(err) + } } func New(config config.Config) protocol.Protocol { - return v1.NewProtocol(ProtocolID, &templateV5, ReadV5Header, log.New(config.LogOutput(), LogPrefix, 0)) + return v1.NewProtocol(ProtocolID, &templateV5, ReadV5Header, config.LogOutput().Named(LogPrefix)) } type PacketHeader struct { diff --git a/x-pack/filebeat/input/netflow/decoder/v5/v5_test.go b/x-pack/filebeat/input/netflow/decoder/v5/v5_test.go index 9494d482f6d3..821e6248e953 100644 --- a/x-pack/filebeat/input/netflow/decoder/v5/v5_test.go +++ b/x-pack/filebeat/input/netflow/decoder/v5/v5_test.go @@ -13,14 +13,20 @@ import ( "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/template" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/test" ) +func init() { + logp.TestingSetup() +} + func TestNetflowProtocol_New(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) assert.Nil(t, proto.Start()) assert.Equal(t, uint16(5), proto.Version()) @@ -28,7 +34,7 @@ func TestNetflowProtocol_New(t *testing.T) { } func TestNetflowProtocol_OnPacket(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) rawS := "00050002000000015bf68d8b35fcb9780000000000000000" + "acd910e5c0a8017b00000000000000000000000e00002cfa" + @@ -119,7 +125,7 @@ func TestNetflowProtocol_OnPacket(t *testing.T) { } func TestNetflowProtocol_BadPacket(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) rawS := "00050002000000015bf689f605" raw, err := hex.DecodeString(rawS) diff --git a/x-pack/filebeat/input/netflow/decoder/v6/v6.go b/x-pack/filebeat/input/netflow/decoder/v6/v6.go index a5d1bc339e97..5949af7960be 100644 --- a/x-pack/filebeat/input/netflow/decoder/v6/v6.go +++ b/x-pack/filebeat/input/netflow/decoder/v6/v6.go @@ -5,8 +5,6 @@ package v6 import ( - "log" - "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/fields" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/protocol" @@ -49,9 +47,11 @@ var templateV6 = template.Template{ } func init() { - protocol.Registry.Register(ProtocolName, New) + if err := protocol.Registry.Register(ProtocolName, New); err != nil { + panic(err) + } } func New(config config.Config) protocol.Protocol { - return v1.NewProtocol(ProtocolID, &templateV6, v5.ReadV5Header, log.New(config.LogOutput(), LogPrefix, 0)) + return v1.NewProtocol(ProtocolID, &templateV6, v5.ReadV5Header, config.LogOutput().Named(LogPrefix)) } diff --git a/x-pack/filebeat/input/netflow/decoder/v6/v6_test.go b/x-pack/filebeat/input/netflow/decoder/v6/v6_test.go index af46896289e2..5703fe523434 100644 --- a/x-pack/filebeat/input/netflow/decoder/v6/v6_test.go +++ b/x-pack/filebeat/input/netflow/decoder/v6/v6_test.go @@ -13,14 +13,20 @@ import ( "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/template" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/test" ) +func init() { + logp.TestingSetup() +} + func TestNetflowProtocol_New(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) assert.Nil(t, proto.Start()) assert.Equal(t, uint16(6), proto.Version()) @@ -28,7 +34,7 @@ func TestNetflowProtocol_New(t *testing.T) { } func TestNetflowProtocol_OnPacket(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) rawS := "00060002000000015bf68d8b35fcb9780000000000000000" + "acd910e5c0a8017b00000000000000000000000e00002cfa" + @@ -121,7 +127,7 @@ func TestNetflowProtocol_OnPacket(t *testing.T) { } func TestNetflowProtocol_BadPacket(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) rawS := "00060002000000015bf689f605" raw, err := hex.DecodeString(rawS) diff --git a/x-pack/filebeat/input/netflow/decoder/v7/v7.go b/x-pack/filebeat/input/netflow/decoder/v7/v7.go index 62cbdc56a065..d851d9420784 100644 --- a/x-pack/filebeat/input/netflow/decoder/v7/v7.go +++ b/x-pack/filebeat/input/netflow/decoder/v7/v7.go @@ -8,7 +8,6 @@ import ( "bytes" "encoding/binary" "io" - "log" "net" "time" @@ -55,11 +54,13 @@ var v7template = template.Template{ } func init() { - protocol.Registry.Register(ProtocolName, New) + if err := protocol.Registry.Register(ProtocolName, New); err != nil { + panic(err) + } } func New(config config.Config) protocol.Protocol { - return v1.NewProtocol(ProtocolID, &v7template, ReadV7Header, log.New(config.LogOutput(), LogPrefix, 0)) + return v1.NewProtocol(ProtocolID, &v7template, ReadV7Header, config.LogOutput().Named(LogPrefix)) } type PacketHeader struct { diff --git a/x-pack/filebeat/input/netflow/decoder/v7/v7_test.go b/x-pack/filebeat/input/netflow/decoder/v7/v7_test.go index cafdbc36b563..66481394dbc5 100644 --- a/x-pack/filebeat/input/netflow/decoder/v7/v7_test.go +++ b/x-pack/filebeat/input/netflow/decoder/v7/v7_test.go @@ -13,6 +13,8 @@ import ( "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/template" @@ -20,7 +22,7 @@ import ( ) func TestNetflowProtocol_New(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) assert.Nil(t, proto.Start()) assert.Equal(t, uint16(7), proto.Version()) @@ -28,7 +30,7 @@ func TestNetflowProtocol_New(t *testing.T) { } func TestNetflowProtocol_OnPacket(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) rawS := "00070002000000015bf68d8b35fcb9780000000000000000" + "acd910e5c0a8017b00000000000000000000000e00002cfa" + @@ -119,7 +121,7 @@ func TestNetflowProtocol_OnPacket(t *testing.T) { } func TestNetflowProtocol_BadPacket(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) rawS := "00060002000000015bf689f605" raw, err := hex.DecodeString(rawS) diff --git a/x-pack/filebeat/input/netflow/decoder/v8/v8.go b/x-pack/filebeat/input/netflow/decoder/v8/v8.go index 9fa88ea1c686..06b3ac6c292c 100644 --- a/x-pack/filebeat/input/netflow/decoder/v8/v8.go +++ b/x-pack/filebeat/input/netflow/decoder/v8/v8.go @@ -9,7 +9,6 @@ import ( "encoding/binary" "fmt" "io" - "log" "net" "time" @@ -18,6 +17,7 @@ import ( "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/protocol" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/template" + "github.com/elastic/elastic-agent-libs/logp" ) const ( @@ -300,16 +300,18 @@ var templates = map[AggType]*template.Template{ } type NetflowV8Protocol struct { - logger *log.Logger + logger *logp.Logger } func init() { - protocol.Registry.Register(ProtocolName, New) + if err := protocol.Registry.Register(ProtocolName, New); err != nil { + panic(err) + } } func New(config config.Config) protocol.Protocol { return &NetflowV8Protocol{ - logger: log.New(config.LogOutput(), LogPrefix, 0), + logger: config.LogOutput().Named(LogPrefix), } } @@ -320,12 +322,12 @@ func (NetflowV8Protocol) Version() uint16 { func (p *NetflowV8Protocol) OnPacket(buf *bytes.Buffer, source net.Addr) (flows []record.Record, err error) { header, err := ReadPacketHeader(buf) if err != nil { - p.logger.Printf("Failed parsing packet: %v", err) + p.logger.Debugf("Failed parsing packet: %v", err) return nil, fmt.Errorf("error reading V8 header: %w", err) } template, found := templates[header.Aggregation] if !found { - p.logger.Printf("Packet from %s uses an unknown V8 aggregation: %d", source, header.Aggregation) + p.logger.Debugf("Packet from %s uses an unknown V8 aggregation: %d", source, header.Aggregation) return nil, fmt.Errorf("unsupported V8 aggregation: %d", header.Aggregation) } metadata := header.GetMetadata(source) diff --git a/x-pack/filebeat/input/netflow/decoder/v8/v8_test.go b/x-pack/filebeat/input/netflow/decoder/v8/v8_test.go index 81ad2437cae9..0624b76db47d 100644 --- a/x-pack/filebeat/input/netflow/decoder/v8/v8_test.go +++ b/x-pack/filebeat/input/netflow/decoder/v8/v8_test.go @@ -14,12 +14,18 @@ import ( "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" template2 "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/template" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/test" ) +func init() { + logp.TestingSetup() +} + func TestTemplates(t *testing.T) { for code, template := range templates { if !template2.ValidateTemplate(t, template) { @@ -29,7 +35,7 @@ func TestTemplates(t *testing.T) { } func TestNetflowProtocol_New(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) assert.Nil(t, proto.Start()) assert.Equal(t, uint16(8), proto.Version()) @@ -37,7 +43,7 @@ func TestNetflowProtocol_New(t *testing.T) { } func TestNetflowProtocol_BadPacket(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) rawS := "00080002000000015bf689f605" raw, err := hex.DecodeString(rawS) @@ -50,7 +56,7 @@ func TestNetflowProtocol_BadPacket(t *testing.T) { } func TestNetflowV8Protocol_OnPacket(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) address := test.MakeAddress(t, "127.0.0.1:11111") captureTime, err := time.Parse(time.RFC3339Nano, "2018-11-22T20:53:03.987654321Z") if !assert.NoError(t, err) { diff --git a/x-pack/filebeat/input/netflow/decoder/v9/decoder.go b/x-pack/filebeat/input/netflow/decoder/v9/decoder.go index bd34b424d2f3..d283bab506be 100644 --- a/x-pack/filebeat/input/netflow/decoder/v9/decoder.go +++ b/x-pack/filebeat/input/netflow/decoder/v9/decoder.go @@ -10,10 +10,11 @@ import ( "errors" "fmt" "io" - "log" "net" "time" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/fields" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/template" @@ -29,18 +30,18 @@ type Decoder interface { ReadSetHeader(*bytes.Buffer) (SetHeader, error) ReadTemplateSet(setID uint16, buf *bytes.Buffer) ([]*template.Template, error) ReadFieldDefinition(*bytes.Buffer) (field fields.Key, length uint16, err error) - GetLogger() *log.Logger + GetLogger() *logp.Logger GetFields() fields.FieldDict } type DecoderV9 struct { - Logger *log.Logger + Logger *logp.Logger Fields fields.FieldDict } var _ Decoder = (*DecoderV9)(nil) -func (d DecoderV9) GetLogger() *log.Logger { +func (d DecoderV9) GetLogger() *logp.Logger { return d.Logger } @@ -124,10 +125,10 @@ func ReadFields(d Decoder, buf *bytes.Buffer, count int) (record template.Templa if length == template.VariableLength || min <= field.Length && field.Length <= max { field.Info = fieldInfo } else if logger != nil { - logger.Printf("Size of field %s in template is out of bounds (size=%d, min=%d, max=%d)", fieldInfo.Name, field.Length, min, max) + logger.Debugf("Size of field %s in template is out of bounds (size=%d, min=%d, max=%d)", fieldInfo.Name, field.Length, min, max) } } else if logger != nil { - logger.Printf("Field %v in template not found", key) + logger.Debugf("Field %v in template not found", key) } record.Fields[i] = field } diff --git a/x-pack/filebeat/input/netflow/decoder/v9/session.go b/x-pack/filebeat/input/netflow/decoder/v9/session.go index e72fa1ab80a6..4a31b16937d8 100644 --- a/x-pack/filebeat/input/netflow/decoder/v9/session.go +++ b/x-pack/filebeat/input/netflow/decoder/v9/session.go @@ -5,12 +5,13 @@ package v9 import ( - "log" "net" "sync" "sync/atomic" "time" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/template" ) @@ -45,12 +46,12 @@ type SessionState struct { mutex sync.RWMutex Templates map[TemplateKey]*TemplateWrapper lastSequence uint32 - logger *log.Logger + logger *logp.Logger Delete atomic.Bool } // NewSession creates a new session. -func NewSession(logger *log.Logger) *SessionState { +func NewSession(logger *logp.Logger) *SessionState { return &SessionState{ logger: logger, Templates: make(map[TemplateKey]*TemplateWrapper), @@ -59,7 +60,7 @@ func NewSession(logger *log.Logger) *SessionState { // AddTemplate adds the passed template. func (s *SessionState) AddTemplate(t *template.Template) { - s.logger.Printf("state %p addTemplate %d %p", s, t.ID, t) + s.logger.Debugf("state %p addTemplate %d %p", s, t.ID, t) s.mutex.Lock() defer s.mutex.Unlock() s.Templates[TemplateKey(t.ID)] = &TemplateWrapper{Template: t} @@ -94,7 +95,7 @@ func (s *SessionState) ExpireTemplates() (alive int, removed int) { total = len(s.Templates) for _, id := range toDelete { if template, found := s.Templates[id]; found && template.Delete.Load() { - s.logger.Printf("expired template %v", id) + s.logger.Debugf("expired template %v", id) delete(s.Templates, id) removed++ } @@ -125,12 +126,12 @@ func isValidSequence(current, next uint32) bool { type SessionMap struct { mutex sync.RWMutex Sessions map[SessionKey]*SessionState - logger *log.Logger + logger *logp.Logger metric config.ActiveSessionsMetric } // NewSessionMap returns a new SessionMap. -func NewSessionMap(logger *log.Logger, metric config.ActiveSessionsMetric) SessionMap { +func NewSessionMap(logger *logp.Logger, metric config.ActiveSessionsMetric) SessionMap { return SessionMap{ logger: logger, Sessions: make(map[SessionKey]*SessionState), @@ -216,7 +217,7 @@ func (m *SessionMap) CleanupLoop(interval time.Duration, done <-chan struct{}) { case <-t.C: aliveS, removedS, aliveT, removedT := m.cleanup() if removedS > 0 || removedT > 0 { - m.logger.Printf("Expired %d sessions (%d remain) / %d templates (%d remain)", removedS, aliveS, removedT, aliveT) + m.logger.Debugf("Expired %d sessions (%d remain) / %d templates (%d remain)", removedS, aliveS, removedT, aliveT) } } } diff --git a/x-pack/filebeat/input/netflow/decoder/v9/session_test.go b/x-pack/filebeat/input/netflow/decoder/v9/session_test.go index 8c10b2b98e94..88d38284be8d 100644 --- a/x-pack/filebeat/input/netflow/decoder/v9/session_test.go +++ b/x-pack/filebeat/input/netflow/decoder/v9/session_test.go @@ -5,26 +5,29 @@ package v9 import ( - "io" - "log" "math" "sync" "testing" "time" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/template" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/test" ) -var logger = log.New(io.Discard, "", 0) +func init() { + logp.TestingSetup() +} func makeSessionKey(t testing.TB, ipPortPair string, domain uint32) SessionKey { return MakeSessionKey(test.MakeAddress(t, ipPortPair), domain, false) } func TestSessionMap_GetOrCreate(t *testing.T) { + var logger = logp.NewLogger("session_map") t.Run("consistent behavior", func(t *testing.T) { sm := NewSessionMap(logger, nil) @@ -101,7 +104,7 @@ func testTemplate(id uint16) *template.Template { } func TestSessionState(t *testing.T) { - logger := log.New(io.Discard, "", 0) + var logger = logp.NewLogger("session_state") t.Run("create and get", func(t *testing.T) { s := NewSession(logger) t1 := testTemplate(1) @@ -133,7 +136,7 @@ func TestSessionState(t *testing.T) { } func TestSessionMap_Cleanup(t *testing.T) { - sm := NewSessionMap(logger, nil) + sm := NewSessionMap(logp.L(), nil) // Session is created k1 := makeSessionKey(t, "127.0.0.1:1234", 1) @@ -180,7 +183,7 @@ func TestSessionMap_Cleanup(t *testing.T) { func TestSessionMap_CleanupLoop(t *testing.T) { timeout := time.Millisecond * 100 - sm := NewSessionMap(log.New(io.Discard, "", 0), nil) + sm := NewSessionMap(logp.NewLogger(""), nil) key := makeSessionKey(t, "127.0.0.1:1", 42) s := sm.GetOrCreate(key) @@ -201,7 +204,7 @@ func TestSessionMap_CleanupLoop(t *testing.T) { } func TestTemplateExpiration(t *testing.T) { - s := NewSession(logger) + s := NewSession(logp.L()) assert.Nil(t, s.GetTemplate(256)) assert.Nil(t, s.GetTemplate(257)) s.AddTemplate(testTemplate(256)) @@ -263,7 +266,7 @@ func TestSessionCheckReset(t *testing.T) { }, } { t.Run(testCase.title, func(t *testing.T) { - s := NewSession(logger) + s := NewSession(logp.L()) s.lastSequence = testCase.current prev, isReset := s.CheckReset(testCase.next) assert.Equal(t, prev, testCase.current) diff --git a/x-pack/filebeat/input/netflow/decoder/v9/v9.go b/x-pack/filebeat/input/netflow/decoder/v9/v9.go index 4e67dde701f3..611a9dcba256 100644 --- a/x-pack/filebeat/input/netflow/decoder/v9/v9.go +++ b/x-pack/filebeat/input/netflow/decoder/v9/v9.go @@ -8,10 +8,11 @@ import ( "bytes" "context" "fmt" - "log" "net" "time" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/protocol" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/record" @@ -30,7 +31,7 @@ type NetflowV9Protocol struct { ctx context.Context cancel context.CancelFunc decoder Decoder - logger *log.Logger + logger *logp.Logger Session SessionMap timeout time.Duration cache *pendingTemplatesCache @@ -39,15 +40,17 @@ type NetflowV9Protocol struct { } func init() { - _ = protocol.Registry.Register(ProtocolName, New) + if err := protocol.Registry.Register(ProtocolName, New); err != nil { + panic(err) + } } func New(config config.Config) protocol.Protocol { - logger := log.New(config.LogOutput(), LogPrefix, 0) + logger := config.LogOutput().Named(LogPrefix) return NewProtocolWithDecoder(DecoderV9{Logger: logger, Fields: config.Fields()}, config, logger) } -func NewProtocolWithDecoder(decoder Decoder, config config.Config, logger *log.Logger) *NetflowV9Protocol { +func NewProtocolWithDecoder(decoder Decoder, config config.Config, logger *logp.Logger) *NetflowV9Protocol { ctx, cancel := context.WithCancel(context.Background()) pd := &NetflowV9Protocol{ ctx: ctx, @@ -94,7 +97,7 @@ func (p *NetflowV9Protocol) Stop() error { func (p *NetflowV9Protocol) OnPacket(buf *bytes.Buffer, source net.Addr) (flows []record.Record, err error) { header, payload, numFlowSets, err := p.decoder.ReadPacketHeader(buf) if err != nil { - p.logger.Printf("Unable to read V9 header: %v", err) + p.logger.Debugf("Unable to read V9 header: %v", err) return nil, fmt.Errorf("error reading header: %w", err) } buf = payload @@ -104,10 +107,10 @@ func (p *NetflowV9Protocol) OnPacket(buf *bytes.Buffer, source net.Addr) (flows session := p.Session.GetOrCreate(sessionKey) remote := source.String() - p.logger.Printf("Packet from:%s src:%d seq:%d", remote, header.SourceID, header.SequenceNo) + p.logger.Debugf("Packet from:%s src:%d seq:%d", remote, header.SourceID, header.SequenceNo) if p.detectReset { if prev, reset := session.CheckReset(header.SequenceNo); reset { - p.logger.Printf("Session %s reset (sequence=%d last=%d)", remote, header.SequenceNo, prev) + p.logger.Debugf("Session %s reset (sequence=%d last=%d)", remote, header.SequenceNo, prev) } } @@ -117,15 +120,15 @@ func (p *NetflowV9Protocol) OnPacket(buf *bytes.Buffer, source net.Addr) (flows break } if buf.Len() < set.BodyLength() { - p.logger.Printf("FlowSet ID %+v overflows packet from %s", set, source) + p.logger.Debugf("FlowSet ID %+v overflows packet from %s", set, source) break } body := bytes.NewBuffer(buf.Next(set.BodyLength())) - p.logger.Printf("FlowSet ID %d length %d", set.SetID, set.BodyLength()) + p.logger.Debugf("FlowSet ID %d length %d", set.SetID, set.BodyLength()) f, err := p.parseSet(set.SetID, sessionKey, session, body) if err != nil { - p.logger.Printf("Error parsing set %d: %v", set.SetID, err) + p.logger.Debugf("Error parsing set %d: %v", set.SetID, err) return nil, fmt.Errorf("error parsing set: %w", err) } flows = append(flows, f...) @@ -152,7 +155,7 @@ func (p *NetflowV9Protocol) parseSet( if p.cache != nil { p.cache.Add(key, buf) } else { - p.logger.Printf("No template for ID %d", setID) + p.logger.Debugf("No template for ID %d", setID) } return nil, nil } diff --git a/x-pack/filebeat/input/netflow/decoder/v9/v9_test.go b/x-pack/filebeat/input/netflow/decoder/v9/v9_test.go index 67212c1e4084..c2fc1c2fa14e 100644 --- a/x-pack/filebeat/input/netflow/decoder/v9/v9_test.go +++ b/x-pack/filebeat/input/netflow/decoder/v9/v9_test.go @@ -10,17 +10,23 @@ import ( "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/config" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/fields" "github.com/elastic/beats/v7/x-pack/filebeat/input/netflow/decoder/test" ) +func init() { + logp.TestingSetup() +} + func TestNetflowV9Protocol_ID(t *testing.T) { - assert.Equal(t, ProtocolID, New(config.Defaults()).Version()) + assert.Equal(t, ProtocolID, New(config.Defaults(logp.L())).Version()) } func TestNetflowProtocol_New(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) assert.Nil(t, proto.Start()) assert.Equal(t, uint16(9), proto.Version()) @@ -33,7 +39,7 @@ func TestOptionTemplates(t *testing.T) { key := MakeSessionKey(addr, sourceID, false) t.Run("Single options template", func(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) flows, err := proto.OnPacket(test.MakePacket([]uint16{ // Header // Version, Count, Uptime, Ts, SeqNo, Source @@ -62,7 +68,7 @@ func TestOptionTemplates(t *testing.T) { }) t.Run("Multiple options template", func(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) raw := test.MakePacket([]uint16{ // Header // Version, Count, Uptime, Ts, SeqNo, Source @@ -99,7 +105,7 @@ func TestOptionTemplates(t *testing.T) { }) t.Run("records discarded", func(t *testing.T) { - proto := New(config.Defaults()) + proto := New(config.Defaults(logp.L())) raw := test.MakePacket([]uint16{ // Header // Version, Count, Uptime, Ts, SeqNo, Source @@ -161,8 +167,8 @@ func TestSessionReset(t *testing.T) { 3, 3, } t.Run("Reset disabled", func(t *testing.T) { - cfg := config.Defaults() - cfg.WithSequenceResetEnabled(false).WithLogOutput(test.TestLogWriter{TB: t}) + cfg := config.Defaults(logp.NewLogger("v9_test")) + cfg.WithSequenceResetEnabled(false) proto := New(cfg) flows, err := proto.OnPacket(test.MakePacket(templatePacket), addr) assert.NoError(t, err) @@ -172,8 +178,8 @@ func TestSessionReset(t *testing.T) { assert.Len(t, flows, 1) }) t.Run("Reset enabled", func(t *testing.T) { - cfg := config.Defaults() - cfg.WithSequenceResetEnabled(true).WithLogOutput(test.TestLogWriter{TB: t}) + cfg := config.Defaults(logp.NewLogger("v9_test")) + cfg.WithSequenceResetEnabled(true) proto := New(cfg) flows, err := proto.OnPacket(test.MakePacket(templatePacket), addr) assert.NoError(t, err) @@ -192,8 +198,8 @@ func TestSessionReset(t *testing.T) { tmp[9] = uint16(sourceID & 0xffff) return test.MakePacket(tmp) } - cfg := config.Defaults() - cfg.WithSequenceResetEnabled(true).WithLogOutput(test.TestLogWriter{TB: t}) + cfg := config.Defaults(logp.NewLogger("v9_test")) + cfg.WithSequenceResetEnabled(true) proto := New(cfg) flows, err := proto.OnPacket(mkPack(templatePacket, 1, 1000), addr) assert.NoError(t, err) @@ -213,7 +219,7 @@ func TestSessionReset(t *testing.T) { func TestCustomFields(t *testing.T) { addr := test.MakeAddress(t, "127.0.0.1:12345") - conf := config.Defaults() + conf := config.Defaults(logp.L()) conf.WithCustomFields(fields.FieldDict{ fields.Key{FieldID: 33333}: &fields.Field{Name: "customField", Decoder: fields.String}, }) @@ -276,7 +282,7 @@ func TestSharedTemplates(t *testing.T) { } t.Run("Template sharing enabled", func(t *testing.T) { - cfg := config.Defaults() + cfg := config.Defaults(logp.L()) cfg.WithSharedTemplates(true) proto := New(cfg) flows, err := proto.OnPacket(test.MakePacket(templatePacket), templateAddr) @@ -288,7 +294,7 @@ func TestSharedTemplates(t *testing.T) { }) t.Run("Template sharing disabled", func(t *testing.T) { - cfg := config.Defaults() + cfg := config.Defaults(logp.L()) cfg.WithSharedTemplates(false) proto := New(cfg) flows, err := proto.OnPacket(test.MakePacket(templatePacket), templateAddr) diff --git a/x-pack/filebeat/input/netflow/input.go b/x-pack/filebeat/input/netflow/input.go index bb4046b74a91..a415f994a9b5 100644 --- a/x-pack/filebeat/input/netflow/input.go +++ b/x-pack/filebeat/input/netflow/input.go @@ -129,10 +129,9 @@ func (n *netflowInput) Run(env v2.Context, connector beat.PipelineConnector) err n.metrics = newInputMetrics(n.udpMetrics.Registry()) var err error - n.decoder, err = decoder.NewDecoder(decoder.NewConfig(). + n.decoder, err = decoder.NewDecoder(decoder.NewConfig(n.logger). WithProtocols(n.cfg.Protocols...). WithExpiration(n.cfg.ExpirationTimeout). - WithLogOutput(&logDebugWrapper{Logger: n.logger}). WithCustomFields(n.customFields...). WithSequenceResetEnabled(n.cfg.DetectSequenceReset). WithSharedTemplates(n.cfg.ShareTemplates). @@ -236,26 +235,6 @@ func (n *netflowInput) Run(env v2.Context, connector beat.PipelineConnector) err return nil } -// An adapter so that logp.Logger can be used as a log.Logger. -type logDebugWrapper struct { - sync.Mutex - Logger *logp.Logger - buf []byte -} - -// Write writes messages to the log. -func (w *logDebugWrapper) Write(p []byte) (n int, err error) { - w.Lock() - defer w.Unlock() - n = len(p) - w.buf = append(w.buf, p...) - for endl := bytes.IndexByte(w.buf, '\n'); endl != -1; endl = bytes.IndexByte(w.buf, '\n') { - w.Logger.Debug(string(w.buf[:endl])) - w.buf = w.buf[endl+1:] - } - return n, nil -} - // stop stops the netflow input func (n *netflowInput) stop() { n.mtx.Lock() diff --git a/x-pack/filebeat/input/netflow/netflow_test.go b/x-pack/filebeat/input/netflow/netflow_test.go index 65383df4a98b..5bc4763ea7e6 100644 --- a/x-pack/filebeat/input/netflow/netflow_test.go +++ b/x-pack/filebeat/input/netflow/netflow_test.go @@ -49,6 +49,10 @@ const ( datSourceIP = "192.0.2.1" ) +func init() { + logp.TestingSetup() +} + // DatTests specifies the .dat files associated with test cases. type DatTests struct { Tests map[string]TestCase `yaml:"tests"` @@ -289,11 +293,10 @@ func readDatTests(t testing.TB) *DatTests { func getFlowsFromDat(t testing.TB, name string, testCase TestCase) TestResult { t.Helper() - config := decoder.NewConfig(). + config := decoder.NewConfig(logp.NewLogger("netflow_test")). WithProtocols(protocol.Registry.All()...). WithSequenceResetEnabled(false). - WithExpiration(0). - WithLogOutput(test.TestLogWriter{TB: t}) + WithExpiration(0) for _, fieldFile := range testCase.Fields { fields, err := LoadFieldDefinitionsFromFile(filepath.Join(fieldsDir, fieldFile)) @@ -351,12 +354,11 @@ func getFlowsFromPCAP(t testing.TB, name, pcapFile string) TestResult { r, err := pcapgo.NewReader(f) require.NoError(t, err) - config := decoder.NewConfig(). + config := decoder.NewConfig(logp.NewLogger("netflow_test")). WithProtocols(protocol.Registry.All()...). WithSequenceResetEnabled(false). WithExpiration(0). - WithCache(strings.HasSuffix(pcapFile, ".reversed.pcap")). - WithLogOutput(test.TestLogWriter{TB: t}) + WithCache(strings.HasSuffix(pcapFile, ".reversed.pcap")) decoder, err := decoder.NewDecoder(config) if !assert.NoError(t, err) { From d6ff82bb031716d537b8de99cf4125b3eb8a3267 Mon Sep 17 00:00:00 2001 From: "Alex K." <8418476+fearful-symmetry@users.noreply.github.com> Date: Wed, 19 Feb 2025 08:08:04 -0800 Subject: [PATCH 07/41] Handle leak of process info in `hostfs` provider for `add_session_metadata` (#42398) * handle leak in hostfs provider for sessionmd * add metrics, clean up * fix tests * add process reaper for dropped exit events * remove test code * linter * more testing, fix mock provider * fix error checks * clean up, add session maps to reaper, expand metrics * fix tests * fix tests * format * docs --- .../sessionmd/add_session_metadata.go | 17 +- .../sessionmd/add_session_metadata_test.go | 7 +- .../auditbeat/processors/sessionmd/config.go | 19 +- .../docs/add_session_metadata.asciidoc | 24 +- .../processors/sessionmd/processdb/db.go | 119 +++++++--- .../processors/sessionmd/processdb/db_test.go | 188 ++++++++++++++++ .../sessionmd/processdb/entry_leader_test.go | 95 +++++--- .../processors/sessionmd/processdb/metrics.go | 56 +++++ .../processors/sessionmd/processdb/reaper.go | 207 +++++++++++------- .../processors/sessionmd/procfs/mock.go | 16 ++ .../processors/sessionmd/procfs/procfs.go | 6 + .../provider/procfsprovider/procfsprovider.go | 1 + .../procfsprovider/procfsprovider_test.go | 100 ++++++++- .../processors/sessionmd/types/events.go | 7 +- 14 files changed, 714 insertions(+), 148 deletions(-) create mode 100644 x-pack/auditbeat/processors/sessionmd/processdb/metrics.go diff --git a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go index 93fed7096b33..ed6701e18064 100644 --- a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go +++ b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go @@ -11,6 +11,7 @@ import ( "fmt" "reflect" "strconv" + "sync/atomic" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/processors" @@ -23,6 +24,7 @@ import ( cfg "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" + "github.com/elastic/elastic-agent-libs/monitoring" ) const ( @@ -37,6 +39,9 @@ func InitializeModule() { processors.RegisterPlugin(processorName, New) } +// instanceID assigns a uniqueID to every instance of the metrics handler for the procfs DB +var instanceID atomic.Uint32 + type addSessionMetadata struct { ctx context.Context cancel context.CancelFunc @@ -56,9 +61,17 @@ func New(cfg *cfg.C) (beat.Processor, error) { logger := logp.NewLogger(logName) + id := int(instanceID.Add(1)) + regName := "processor.add_session_metadata.processdb" + // if more than one instance of the DB is running, start to increment the metrics keys. + if id > 1 { + regName = fmt.Sprintf("%s.%d", regName, id) + } + metricsReg := monitoring.Default.NewRegistry(regName) + ctx, cancel := context.WithCancel(context.Background()) reader := procfs.NewProcfsReader(*logger) - db, err := processdb.NewDB(reader, *logger) + db, err := processdb.NewDB(ctx, metricsReg, reader, logger, c.DBReaperPeriod, c.ReapProcesses) if err != nil { cancel() return nil, fmt.Errorf("failed to create DB: %w", err) @@ -182,7 +195,7 @@ func (p *addSessionMetadata) enrich(ev *beat.Event) (*beat.Event, error) { fullProcess, err = p.db.GetProcess(pid) if err != nil { e := fmt.Errorf("pid %v not found in db: %w", pid, err) - p.logger.Debugw("PID not found in provider", "pid", pid, "error", err) + p.logger.Debugf("PID %d not found in provider: %s", pid, err) return nil, e } } diff --git a/x-pack/auditbeat/processors/sessionmd/add_session_metadata_test.go b/x-pack/auditbeat/processors/sessionmd/add_session_metadata_test.go index a993737611bd..422af4c935c2 100644 --- a/x-pack/auditbeat/processors/sessionmd/add_session_metadata_test.go +++ b/x-pack/auditbeat/processors/sessionmd/add_session_metadata_test.go @@ -7,7 +7,9 @@ package sessionmd import ( + "context" "testing" + "time" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" @@ -18,6 +20,7 @@ import ( "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/types" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" + "github.com/elastic/elastic-agent-libs/monitoring" ) var ( @@ -337,10 +340,12 @@ var ( ) func TestEnrich(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15) + defer cancel() for _, tt := range enrichTests { t.Run(tt.testName, func(t *testing.T) { reader := procfs.NewMockReader() - db, err := processdb.NewDB(reader, *logger) + db, err := processdb.NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) for _, ev := range tt.mockProcesses { diff --git a/x-pack/auditbeat/processors/sessionmd/config.go b/x-pack/auditbeat/processors/sessionmd/config.go index 1aaa354c97d4..8bedc80bbdcb 100644 --- a/x-pack/auditbeat/processors/sessionmd/config.go +++ b/x-pack/auditbeat/processors/sessionmd/config.go @@ -6,15 +6,28 @@ package sessionmd +import "time" + // Config for add_session_metadata processor. type config struct { - Backend string `config:"backend"` + // Backend specifies the data source for the processor. Possible values are `auto`, `procfs`, and `kernel_tracing` + Backend string `config:"backend"` + // PIDField specifies the event field used to locate the process ID PIDField string `config:"pid_field"` + /// DBReaperPeriod specifies the interval of how often the backing process DB should remove orphaned and exited events. + // Only valid for the `procfs` backend, or if `auto` falls back to `procfs` + DBReaperPeriod time.Duration `config:"db_reaper_period"` + // ReapProcesses, if enabled, will tell the process DB reaper thread to also remove orphaned process exec events, in addition to orphaned exit events and compleated process events. + // This can result in data loss if auditbeat is running in an environment where it can't properly talk to procfs, but it can also reduce the memory footprint of auditbeat. + // Only valid for the `procfs` backend. + ReapProcesses bool `config:"reap_processes"` } func defaultConfig() config { return config{ - Backend: "auto", - PIDField: "process.pid", + Backend: "auto", + PIDField: "process.pid", + DBReaperPeriod: time.Second * 30, + ReapProcesses: false, } } diff --git a/x-pack/auditbeat/processors/sessionmd/docs/add_session_metadata.asciidoc b/x-pack/auditbeat/processors/sessionmd/docs/add_session_metadata.asciidoc index 8c9314d054ff..7e5b2a3c54a2 100644 --- a/x-pack/auditbeat/processors/sessionmd/docs/add_session_metadata.asciidoc +++ b/x-pack/auditbeat/processors/sessionmd/docs/add_session_metadata.asciidoc @@ -70,7 +70,7 @@ auditbeat.modules: - module: auditd processors: - add_session_metadata: - backend: "auto" + backend: "auto" ------------------------------------- + . Add audit rules in the modules configuration section of `auditbeat.yml` or the @@ -96,3 +96,25 @@ auditbeat.modules: ------------------------------------- sudo systemctl restart auditbeat ------------------------------------- + +===== Configuring the Process Database + +When using the `procfs` backend, `add_session_metadata` will use an in-memory database to store and match events as they arrive to the processor. +This processor has a number of additional config values: + +[source,yaml] +------------------------------------- +auditbeat.modules: +- module: auditd + processors: + - add_session_metadata: + backend: "procfs" + reap_processes: false + db_reaper_period: 30s +------------------------------------- + +* `reap_processes` tells the database to remove orphan `execve` and `execveat` process events for which no matching `exit_group` event is found. + This may result in incomplete data, but will reduce memory usage under high load. The default is `false`. +* `db_reaper_period` specifies the time interval of the reaper process that will regularly remove exited and orphaned processes from the database. + Setting this value lower my result in incomplete data, but will reduce memory pressure. Setting this to a higher value may help on systems with high load, but will increase memory usage. + The default is `30s.` \ No newline at end of file diff --git a/x-pack/auditbeat/processors/sessionmd/processdb/db.go b/x-pack/auditbeat/processors/sessionmd/processdb/db.go index e9e53eb965ac..f8f7b2fd0bc6 100644 --- a/x-pack/auditbeat/processors/sessionmd/processdb/db.go +++ b/x-pack/auditbeat/processors/sessionmd/processdb/db.go @@ -7,7 +7,7 @@ package processdb import ( - "container/heap" + "context" "encoding/base64" "errors" "fmt" @@ -26,6 +26,7 @@ import ( "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/timeutils" "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/types" "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/monitoring" ) type EntryType string @@ -68,9 +69,12 @@ type Process struct { CTTY tty.TTYDev Argv []string Cwd string - Env map[string]string Filename string ExitCode int32 + + // procfsLookupFail is true if procfs couldn't find a matching PID in /proc. + procfsLookupFail bool + insertTime time.Time } var ( @@ -175,22 +179,59 @@ type DB struct { entryLeaderRelationships map[uint32]uint32 procfs procfs.Reader stopChan chan struct{} - removalCandidates rcHeap -} - -func NewDB(reader procfs.Reader, logger logp.Logger) (*DB, error) { + // map of processes that we can remove during the next reaper run, if the exit event is older than `removalCandidateTimeout` + removalMap map[uint32]removalCandidate + ctx context.Context + + // used for metrics reporting + stats *Stats + + // knobs for the reaper thread follows + + // how often the reaper checks for expired or orphaned events. + // A negative value disables the reaper. + reaperPeriod time.Duration + // Tells the reaper to remove orphaned process exec events. + // If true, exec events for which no /proc entry can be found will be removed after their insertion time has passed `orphanTimeout`. + // If disabled, the reaper will only remove exec events if they are matched with a exit event. + reapProcesses bool + // The duration after which we'll reap an orphaned process exec event for which no /proc data exists. Measured from the time the event is inserted. + processReapAfter time.Duration +} + +// NewDB creates a new DB for tracking processes. +// +// - metrics: monitoring registry for exporting DB metrics +// - reader: handler for /proc data and events. +// - reaperPeriod: tells the reaper to update its tracking of deat and orphaned processes every at every `n` period. +// - reapProcesses: optionally tell the reaper to also reap orphan processes from the DB, if no matching exit event can be found. +// May result in data loss if the DB in under load and events do not arrive in a timely fashion. +func NewDB(ctx context.Context, metrics *monitoring.Registry, reader procfs.Reader, logger *logp.Logger, reaperPeriod time.Duration, reapProcesses bool) (*DB, error) { once.Do(initialize) if initError != nil { return &DB{}, initError } db := DB{ - logger: logp.NewLogger("processdb"), + logger: logger.Named("processdb"), processes: make(map[uint32]Process), entryLeaders: make(map[uint32]EntryType), entryLeaderRelationships: make(map[uint32]uint32), procfs: reader, stopChan: make(chan struct{}), - removalCandidates: make(rcHeap, 0), + removalMap: make(map[uint32]removalCandidate), + reaperPeriod: reaperPeriod, + stats: NewStats(metrics), + reapProcesses: reapProcesses, + processReapAfter: time.Minute * 10, + ctx: ctx, + } + + if db.reaperPeriod > 0 { + logger.Infof("starting processDB reaper with interval %s", db.reaperPeriod) + } + + if db.reapProcesses { + logger.Info("WARNING: reaping orphaned processes. May result in data loss.") } db.startReaper() return &db, nil @@ -260,18 +301,34 @@ func (db *DB) insertProcess(process Process) { } } +// InsertExec adds an exec event func (db *DB) InsertExec(exec types.ProcessExecEvent) { db.mutex.Lock() defer db.mutex.Unlock() proc := Process{ - PIDs: pidInfoFromProto(exec.PIDs), - Creds: credInfoFromProto(exec.Creds), - CTTY: ttyDevFromProto(exec.CTTY), - Argv: exec.Argv, - Cwd: exec.CWD, - Env: exec.Env, - Filename: exec.Filename, + PIDs: pidInfoFromProto(exec.PIDs), + Creds: credInfoFromProto(exec.Creds), + CTTY: ttyDevFromProto(exec.CTTY), + Argv: exec.Argv, + Cwd: exec.CWD, + Filename: exec.Filename, + procfsLookupFail: exec.ProcfsLookupFail, + insertTime: time.Now(), + } + if proc.procfsLookupFail { + db.stats.procfsLookupFail.Add(1) + } + + // check to see if an orphaned exit event maps to this exec event. + // the out-of-order problem where we get the exit before the exec usually happens under load. + // if we don't track orphaned processes like this, we'll never scrub them from the DB. + if evt, ok := db.removalMap[proc.PIDs.Tgid]; ok { + proc.ExitCode = evt.exitCode + db.stats.resolvedOrphanExits.Add(1) + db.logger.Debugf("resolved orphan exit for pid %d", proc.PIDs.Tgid) + evt.startTime = proc.PIDs.StartTimeNS + db.removalMap[proc.PIDs.Tgid] = evt } db.processes[exec.PIDs.Tgid] = proc @@ -286,7 +343,7 @@ func (db *DB) createEntryLeader(pid uint32, entryType EntryType) { db.logger.Debugf("created entry leader %d: %s, name: %s", pid, string(entryType), db.processes[pid].Filename) } -// pid returned is a pointer type because its possible for no +// pid returned is a pointer type because it is possible no matching PID is found. func (db *DB) evaluateEntryLeader(p Process) *uint32 { pid := p.PIDs.Tgid @@ -387,6 +444,7 @@ func (db *DB) evaluateEntryLeader(p Process) *uint32 { return nil } +// InsertSetsid adds a set SID event func (db *DB) InsertSetsid(setsid types.ProcessSetsidEvent) { db.mutex.Lock() defer db.mutex.Unlock() @@ -401,23 +459,28 @@ func (db *DB) InsertSetsid(setsid types.ProcessSetsidEvent) { } } +// InsertExit adds a process exit event func (db *DB) InsertExit(exit types.ProcessExitEvent) { db.mutex.Lock() defer db.mutex.Unlock() - pid := exit.PIDs.Tgid + newRemoval := removalCandidate{ + pid: pid, + exitTime: time.Now(), + exitCode: exit.ExitCode, + } + process, ok := db.processes[pid] if !ok { - db.logger.Debugf("could not insert exit, pid %v not found in db", pid) - return + newRemoval.orphanTime = time.Now() + db.logger.Debugf("pid %v for exit event not found in db, adding as orphan", pid) + } else { + // If we already have the process, add our exit info + process.ExitCode = exit.ExitCode + db.processes[pid] = process + newRemoval.startTime = process.PIDs.StartTimeNS } - process.ExitCode = exit.ExitCode - db.processes[pid] = process - heap.Push(&db.removalCandidates, removalCandidate{ - pid: pid, - startTime: process.PIDs.StartTimeNS, - exitTime: time.Now(), - }) + db.removalMap[pid] = newRemoval } func fullProcessFromDBProcess(p Process) types.Process { @@ -610,8 +673,10 @@ func (db *DB) GetProcess(pid uint32) (types.Process, error) { process, ok := db.processes[pid] if !ok { + db.stats.failedToFindProcessCount.Add(1) return types.Process{}, errors.New("process not found") } + db.stats.servedProcessCount.Add(1) ret := fullProcessFromDBProcess(process) @@ -651,6 +716,7 @@ func (db *DB) GetProcess(pid uint32) (types.Process, error) { } } else { db.logger.Debugf("failed to find entry leader for %d (%s)", pid, db.processes[pid].Filename) + db.stats.entryLeaderLookupFail.Add(1) } db.setEntityID(&ret) @@ -695,7 +761,6 @@ func (db *DB) ScrapeProcfs() []uint32 { CTTY: ttyDevFromProto(procInfo.CTTY), Argv: procInfo.Argv, Cwd: procInfo.Cwd, - Env: procInfo.Env, Filename: procInfo.Filename, } diff --git a/x-pack/auditbeat/processors/sessionmd/processdb/db_test.go b/x-pack/auditbeat/processors/sessionmd/processdb/db_test.go index 5cd1eed1ffcb..ff6a943e61ba 100644 --- a/x-pack/auditbeat/processors/sessionmd/processdb/db_test.go +++ b/x-pack/auditbeat/processors/sessionmd/processdb/db_test.go @@ -7,19 +7,207 @@ package processdb import ( + "context" "testing" + "time" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/auditbeat/helper/tty" + "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/procfs" + "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/timeutils" + "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/types" "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/monitoring" ) var logger = logp.NewLogger("processdb") +var testAlwaysTimeout = func(_, _ time.Time) bool { + return false +} + +var testNeverTimeout = func(_, _ time.Time) bool { + return true +} + func TestGetTTYType(t *testing.T) { require.Equal(t, tty.TTYConsole, tty.GetTTYType(4, 0)) require.Equal(t, tty.Pts, tty.GetTTYType(136, 0)) require.Equal(t, tty.TTY, tty.GetTTYType(4, 64)) require.Equal(t, tty.TTYUnknown, tty.GetTTYType(1000, 1000)) } + +func TestProcessOrphanResolve(t *testing.T) { + // test to make sure that if we get an exit event before a exec event, we still match up the two + + // uncomment if you want some logs + //_ = logp.DevelopmentSetup() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + reader := procfs.NewProcfsReader(*logger) + testDB, err := NewDB(ctx, monitoring.NewRegistry(), reader, logp.L(), -1, false) + require.NoError(t, err) + removalFuncTimeoutWaiting = testAlwaysTimeout + + pid1 := types.PIDInfo{Tgid: 10, StartTimeNS: 19} + pid2 := types.PIDInfo{Tgid: 11, StartTimeNS: 25} + + exitCode1 := int32(24) + exitCode2 := int32(30) + + exit1 := types.ProcessExitEvent{PIDs: pid1, ExitCode: exitCode1} + exit2 := types.ProcessExitEvent{PIDs: pid2, ExitCode: exitCode2} + + exec1 := types.ProcessExecEvent{PIDs: pid1} + exec2 := types.ProcessExecEvent{PIDs: pid2} + + testDB.InsertExit(exit1) + testDB.InsertExit(exit2) + + testDB.InsertExec(exec1) + testDB.InsertExec(exec2) + + res1, err := testDB.GetProcess(pid1.Tgid) + require.NoError(t, err) + require.Equal(t, exitCode1, res1.ExitCode) + require.Equal(t, timeutils.TimeFromNsSinceBoot(timeutils.ReduceTimestampPrecision(pid1.StartTimeNS)), res1.Start) + + res2, err := testDB.GetProcess(pid2.Tgid) + require.NoError(t, err) + require.Equal(t, exitCode2, res2.ExitCode) + require.Equal(t, timeutils.TimeFromNsSinceBoot(timeutils.ReduceTimestampPrecision(pid2.StartTimeNS)), res2.Start) + // verify that the pid is removed once we run a pass of the reaper + require.Len(t, testDB.processes, 2) + require.Len(t, testDB.removalMap, 2) + testDB.reapProcs() + require.Len(t, testDB.processes, 0) + require.Len(t, testDB.removalMap, 0) +} + +func TestReapExitOrphans(t *testing.T) { + // test to make sure that orphaned exit events are still cleaned up + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + reader := procfs.NewProcfsReader(*logger) + testDB, err := NewDB(ctx, monitoring.NewRegistry(), reader, logp.L(), -1, false) + require.NoError(t, err) + removalFuncTimeoutWaiting = testAlwaysTimeout + orphanFuncTimeoutWaiting = testAlwaysTimeout + + testDB.InsertExit(types.ProcessExitEvent{PIDs: types.PIDInfo{Tgid: 10, StartTimeNS: 19}, ExitCode: 0}) + testDB.InsertExit(types.ProcessExitEvent{PIDs: types.PIDInfo{Tgid: 11, StartTimeNS: 20}, ExitCode: 0}) + testDB.InsertExit(types.ProcessExitEvent{PIDs: types.PIDInfo{Tgid: 12, StartTimeNS: 25}, ExitCode: 0}) + + require.Len(t, testDB.removalMap, 3) + + testDB.reapProcs() + + require.Len(t, testDB.removalMap, 0) +} + +func TestReapProcesses(t *testing.T) { + reader := procfs.NewProcfsReader(*logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + testDB, err := NewDB(ctx, monitoring.NewRegistry(), reader, logp.L(), -1, true) + require.NoError(t, err) + testDB.processReapAfter = time.Duration(0) + removalFuncTimeoutWaiting = testNeverTimeout + + pid1 := types.PIDInfo{Tgid: 10, StartTimeNS: 19} + pid2 := types.PIDInfo{Tgid: 11, StartTimeNS: 25} + pid3 := types.PIDInfo{Tgid: 13, StartTimeNS: 40} + pid4 := types.PIDInfo{Tgid: 14, StartTimeNS: 50} + + exec1 := types.ProcessExecEvent{PIDs: pid1, ProcfsLookupFail: true} + exec2 := types.ProcessExecEvent{PIDs: pid2, ProcfsLookupFail: true} + exec3 := types.ProcessExecEvent{PIDs: pid3, ProcfsLookupFail: true} + // if we got a procfs lookup, don't reap + exec4 := types.ProcessExecEvent{PIDs: pid4, ProcfsLookupFail: false} + + testDB.InsertExec(exec1) + testDB.InsertExec(exec2) + testDB.InsertExec(exec3) + testDB.InsertExec(exec4) + + // if a process has a corresponding exit, do not reap + testDB.InsertExit(types.ProcessExitEvent{PIDs: pid3, ExitCode: 0}) + + testDB.reapProcs() + + // make sure processes are removed + require.NotContains(t, testDB.processes, pid1.Tgid) + require.NotContains(t, testDB.processes, pid2.Tgid) + require.Contains(t, testDB.processes, pid3.Tgid) + require.Contains(t, testDB.processes, pid4.Tgid) +} + +func TestReapProcessesWithProcFS(t *testing.T) { + mockReader := procfs.NewMockReader() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + testDB, err := NewDB(ctx, monitoring.NewRegistry(), mockReader, logp.L(), -1, false) + require.NoError(t, err) + testDB.reapProcesses = true + testDB.processReapAfter = time.Duration(0) + removalFuncTimeoutWaiting = testNeverTimeout + orphanFuncTimeoutWaiting = testAlwaysTimeout + + // insert procfs entries for two of the pids + mockReader.AddEntry(10, procfs.ProcessInfo{}) + mockReader.AddEntry(11, procfs.ProcessInfo{}) + + pid1 := types.PIDInfo{Tgid: 10, StartTimeNS: 19} + pid2 := types.PIDInfo{Tgid: 11, StartTimeNS: 25} + pid3 := types.PIDInfo{Tgid: 13, StartTimeNS: 40} + + exec1 := types.ProcessExecEvent{PIDs: pid1, ProcfsLookupFail: false} + exec2 := types.ProcessExecEvent{PIDs: pid2, ProcfsLookupFail: false} + exec3 := types.ProcessExecEvent{PIDs: pid3, ProcfsLookupFail: false} + + testDB.InsertExec(exec1) + testDB.InsertExec(exec2) + testDB.InsertExec(exec3) + + testDB.reapProcs() + // after one iteration, 3 should be marked as `LookupFail`, others should be fine + require.True(t, testDB.processes[pid3.Tgid].procfsLookupFail) + require.False(t, testDB.processes[pid2.Tgid].procfsLookupFail) + require.False(t, testDB.processes[pid1.Tgid].procfsLookupFail) + + // after a second reap, they should be removed + testDB.reapProcs() + + require.NotContains(t, testDB.processes, pid3.Tgid) + require.Contains(t, testDB.processes, pid1.Tgid) + require.Contains(t, testDB.processes, pid2.Tgid) +} + +func TestReapingProcessesOrphanResolvedRace(t *testing.T) { + // test to make sure that if we resolve a process in between mutex holds, we won't prematurely reap it + mockReader := procfs.NewMockReader() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + testDB, err := NewDB(ctx, monitoring.NewRegistry(), mockReader, logp.L(), -1, false) + require.NoError(t, err) + testDB.reapProcesses = true + testDB.processReapAfter = time.Duration(0) + removalFuncTimeoutWaiting = testNeverTimeout + orphanFuncTimeoutWaiting = testAlwaysTimeout + + // insert procfs entries for two of the pids + pid1 := types.PIDInfo{Tgid: 10, StartTimeNS: 19} + exec1 := types.ProcessExecEvent{PIDs: pid1, ProcfsLookupFail: false} + testDB.InsertExec(exec1) + + testDB.reapProcs() + // should now be marked as lookup fail + require.True(t, testDB.processes[pid1.Tgid].procfsLookupFail) + + // now we get our exit + testDB.InsertExit(types.ProcessExitEvent{PIDs: pid1}) + testDB.reapProcs() + // process should still exist + require.Contains(t, testDB.processes, pid1.Tgid) +} diff --git a/x-pack/auditbeat/processors/sessionmd/processdb/entry_leader_test.go b/x-pack/auditbeat/processors/sessionmd/processdb/entry_leader_test.go index a3315d61b4c4..57dc717b4078 100644 --- a/x-pack/auditbeat/processors/sessionmd/processdb/entry_leader_test.go +++ b/x-pack/auditbeat/processors/sessionmd/processdb/entry_leader_test.go @@ -7,14 +7,17 @@ package processdb import ( + "context" "path" "testing" + "time" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/auditbeat/helper/tty" "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/procfs" "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/types" + "github.com/elastic/elastic-agent-libs/monitoring" ) const ( @@ -80,7 +83,7 @@ func requireProcess(t *testing.T, db *DB, pid uint32, processPath string) { } } -func requireParent(t *testing.T, db *DB, pid uint32, ppid uint32) { +func requireParent(t *testing.T, db *DB, pid, ppid uint32) { t.Helper() process, err := db.GetProcess(pid) require.Nil(t, err) @@ -94,7 +97,7 @@ func requireParentUnset(t *testing.T, process types.Process) { require.Nil(t, process.Parent.Start) } -func requireSessionLeader(t *testing.T, db *DB, pid uint32, sid uint32) { +func requireSessionLeader(t *testing.T, db *DB, pid, sid uint32) { t.Helper() process, err := db.GetProcess(pid) require.Nil(t, err) @@ -110,7 +113,7 @@ func requireSessionLeaderUnset(t *testing.T, process types.Process) { require.Nil(t, process.SessionLeader.Start) } -func requireGroupLeader(t *testing.T, db *DB, pid uint32, pgid uint32) { +func requireGroupLeader(t *testing.T, db *DB, pid, pgid uint32) { t.Helper() process, err := db.GetProcess(pid) require.Nil(t, err) @@ -119,7 +122,7 @@ func requireGroupLeader(t *testing.T, db *DB, pid uint32, pgid uint32) { require.Equal(t, pid == pgid, *process.GroupLeader.SameAsProcess) } -func requireEntryLeader(t *testing.T, db *DB, pid uint32, entryPID uint32, expectedEntryType EntryType) { +func requireEntryLeader(t *testing.T, db *DB, pid, entryPID uint32, expectedEntryType EntryType) { t.Helper() process, err := db.GetProcess(pid) require.Nil(t, err) @@ -190,7 +193,9 @@ func populateProcfsWithInit(reader *procfs.MockReader) { func TestSingleProcessSessionLeaderEntryTypeTerminal(t *testing.T) { reader := procfs.NewMockReader() - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -214,7 +219,9 @@ func TestSingleProcessSessionLeaderEntryTypeTerminal(t *testing.T) { func TestSingleProcessSessionLeaderLoginProcess(t *testing.T) { reader := procfs.NewMockReader() - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -243,7 +250,9 @@ func TestSingleProcessSessionLeaderLoginProcess(t *testing.T) { func TestSingleProcessSessionLeaderChildOfInit(t *testing.T) { reader := procfs.NewMockReader() - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -273,7 +282,9 @@ func TestSingleProcessSessionLeaderChildOfInit(t *testing.T) { func TestSingleProcessSessionLeaderChildOfSsmSessionWorker(t *testing.T) { reader := procfs.NewMockReader() - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -309,7 +320,9 @@ func TestSingleProcessSessionLeaderChildOfSsmSessionWorker(t *testing.T) { func TestSingleProcessSessionLeaderChildOfSshd(t *testing.T) { reader := procfs.NewMockReader() - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -344,7 +357,9 @@ func TestSingleProcessSessionLeaderChildOfSshd(t *testing.T) { func TestSingleProcessSessionLeaderChildOfContainerdShim(t *testing.T) { reader := procfs.NewMockReader() - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -379,7 +394,9 @@ func TestSingleProcessSessionLeaderChildOfContainerdShim(t *testing.T) { func TestSingleProcessSessionLeaderChildOfRunc(t *testing.T) { reader := procfs.NewMockReader() - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -415,7 +432,9 @@ func TestSingleProcessSessionLeaderChildOfRunc(t *testing.T) { func TestSingleProcessEmptyProcess(t *testing.T) { reader := procfs.NewMockReader() - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -448,7 +467,9 @@ func TestSingleProcessEmptyProcess(t *testing.T) { // EntryLeaderEntryMetaType func TestSingleProcessOverwriteOldEntryLeader(t *testing.T) { reader := procfs.NewMockReader() - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -519,7 +540,9 @@ func TestSingleProcessOverwriteOldEntryLeader(t *testing.T) { func TestInitSshdBashLs(t *testing.T) { reader := procfs.NewMockReader() populateProcfsWithInit(reader) - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -602,7 +625,9 @@ func TestInitSshdBashLs(t *testing.T) { func TestInitSshdSshdBashLs(t *testing.T) { reader := procfs.NewMockReader() populateProcfsWithInit(reader) - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -694,7 +719,9 @@ func TestInitSshdSshdBashLs(t *testing.T) { func TestInitSshdSshdSshdBashLs(t *testing.T) { reader := procfs.NewMockReader() populateProcfsWithInit(reader) - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -803,7 +830,9 @@ func TestInitSshdSshdSshdBashLs(t *testing.T) { func TestInitContainerdContainerdShim(t *testing.T) { reader := procfs.NewMockReader() populateProcfsWithInit(reader) - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -856,7 +885,9 @@ func TestInitContainerdContainerdShim(t *testing.T) { func TestInitContainerdShimBashContainerdShimIsReparentedToInit(t *testing.T) { reader := procfs.NewMockReader() populateProcfsWithInit(reader) - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -925,7 +956,9 @@ func TestInitContainerdShimBashContainerdShimIsReparentedToInit(t *testing.T) { func TestInitContainerdShimPauseContainerdShimIsReparentedToInit(t *testing.T) { reader := procfs.NewMockReader() populateProcfsWithInit(reader) - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -997,7 +1030,9 @@ func TestInitContainerdShimPauseContainerdShimIsReparentedToInit(t *testing.T) { func TestInitSshdBashLsAndGrepGrepOnlyHasGroupLeader(t *testing.T) { reader := procfs.NewMockReader() populateProcfsWithInit(reader) - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -1084,7 +1119,9 @@ func TestInitSshdBashLsAndGrepGrepOnlyHasGroupLeader(t *testing.T) { func TestInitSshdBashLsAndGrepGrepOnlyHasSessionLeader(t *testing.T) { reader := procfs.NewMockReader() populateProcfsWithInit(reader) - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -1166,7 +1203,9 @@ func TestInitSshdBashLsAndGrepGrepOnlyHasSessionLeader(t *testing.T) { // entry meta type of "unknown" and making it an entry leader. func TestGrepInIsolation(t *testing.T) { reader := procfs.NewMockReader() - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -1199,7 +1238,9 @@ func TestGrepInIsolation(t *testing.T) { // Kernel threads should never have an entry meta type or entry leader set. func TestKernelThreads(t *testing.T) { reader := procfs.NewMockReader() - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) kthreaddPID := uint32(2) @@ -1251,7 +1292,9 @@ func TestKernelThreads(t *testing.T) { func TestPIDReuseSameSession(t *testing.T) { reader := procfs.NewMockReader() populateProcfsWithInit(reader) - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() @@ -1356,7 +1399,9 @@ func TestPIDReuseSameSession(t *testing.T) { func TestPIDReuseNewSession(t *testing.T) { reader := procfs.NewMockReader() populateProcfsWithInit(reader) - db, err := NewDB(reader, *logger) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + db, err := NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) db.ScrapeProcfs() diff --git a/x-pack/auditbeat/processors/sessionmd/processdb/metrics.go b/x-pack/auditbeat/processors/sessionmd/processdb/metrics.go new file mode 100644 index 000000000000..665d63fa30f1 --- /dev/null +++ b/x-pack/auditbeat/processors/sessionmd/processdb/metrics.go @@ -0,0 +1,56 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build linux + +package processdb + +import ( + "github.com/elastic/elastic-agent-libs/monitoring" +) + +type Stats struct { + // number of orphans we have resolved, meaning we got the exit event before the exec event. + resolvedOrphanExits *monitoring.Uint + // orphan exit events (an exit with no matching exec) that were never matched and later reaped. + reapedOrphanExits *monitoring.Uint + // current size of the process map + currentProcs *monitoring.Uint + // current size of the exit map + currentExit *monitoring.Uint + // number of orphaned (an exec with no matching exst) processes that were removed from the DB by the reaper. + reapedOrphanProcesses *monitoring.Uint + // count of times we successfully served a process upstream + servedProcessCount *monitoring.Uint + // count of times we could not find a process for the upstream processor + failedToFindProcessCount *monitoring.Uint + // count of processes removed from the DB with a matching exit + reapedProcesses *monitoring.Uint + // processes where we couldn't find a matching hostfs entry + procfsLookupFail *monitoring.Uint + // number of processes marked as session entry leaders + entryLeaders *monitoring.Uint + // number of session process relationships + entryLeaderRelationships *monitoring.Uint + // number of times we failed to find an entry leader for a process + entryLeaderLookupFail *monitoring.Uint +} + +func NewStats(reg *monitoring.Registry) *Stats { + obj := &Stats{ + resolvedOrphanExits: monitoring.NewUint(reg, "resolved_orphan_exits"), + reapedOrphanExits: monitoring.NewUint(reg, "reaped_orphan_exits"), + currentProcs: monitoring.NewUint(reg, "processes_gauge"), + currentExit: monitoring.NewUint(reg, "exit_events_gauge"), + reapedOrphanProcesses: monitoring.NewUint(reg, "reaped_orphan_processes"), + servedProcessCount: monitoring.NewUint(reg, "served_process_count"), + failedToFindProcessCount: monitoring.NewUint(reg, "failed_process_lookup_count"), + reapedProcesses: monitoring.NewUint(reg, "reaped_processes"), + procfsLookupFail: monitoring.NewUint(reg, "procfs_lookup_fail"), + entryLeaders: monitoring.NewUint(reg, "entry_leaders_gauge"), + entryLeaderRelationships: monitoring.NewUint(reg, "entry_leader_relationships_gauge"), + entryLeaderLookupFail: monitoring.NewUint(reg, "entry_leader_lookup_fail"), + } + return obj +} diff --git a/x-pack/auditbeat/processors/sessionmd/processdb/reaper.go b/x-pack/auditbeat/processors/sessionmd/processdb/reaper.go index 12751bead9c3..ac2d4782ddc6 100644 --- a/x-pack/auditbeat/processors/sessionmd/processdb/reaper.go +++ b/x-pack/auditbeat/processors/sessionmd/processdb/reaper.go @@ -7,48 +7,34 @@ package processdb import ( - "container/heap" "time" ) const ( - reaperInterval = 30 * time.Second // run the reaper process at this interval - removalTimeout = 10 * time.Second // remove processes that have been exited longer than this + removalCandidateTimeout = 10 * time.Second // remove processes that have been exited longer than this + orphanTimeout = 90 * time.Second // remove orphan exit events that have been around longer than this ) -type removalCandidate struct { - pid uint32 - exitTime time.Time - startTime uint64 +// the reaper logic for removing a process. +// split out to a new function to ease testing. +var removalFuncTimeoutWaiting = func(now, exitTime time.Time) bool { + return now.Sub(exitTime) < removalCandidateTimeout } -type rcHeap []removalCandidate - -func (h rcHeap) Len() int { - return len(h) +// the reaper logic for removing an orphaned exit event. +// split out to a new function to ease testing. +var orphanFuncTimeoutWaiting = func(now, exitTime time.Time) bool { + return now.Sub(exitTime) < orphanTimeout } -func (h rcHeap) Less(i, j int) bool { - return h[i].exitTime.Sub(h[j].exitTime) < 0 -} - -func (h rcHeap) Swap(i, j int) { - h[i], h[j] = h[j], h[i] -} - -func (h *rcHeap) Push(x any) { - v, ok := x.(removalCandidate) - if ok { - *h = append(*h, v) - } -} +type removalCandidate struct { + pid uint32 + exitTime time.Time + startTime uint64 -func (h *rcHeap) Pop() any { - old := *h - n := len(old) - x := old[n-1] - *h = old[0 : n-1] - return x + // only used for orphan exit events + orphanTime time.Time + exitCode int32 } // The reaper will remove exited processes from the DB a short time after they have exited. @@ -59,51 +45,122 @@ func (h *rcHeap) Pop() any { // it cannot have a relation with any other longer-lived processes. If this processor is ported to other OSs, this // assumption will need to be revisited. func (db *DB) startReaper() { - go func(db *DB) { - ticker := time.NewTicker(reaperInterval) - defer ticker.Stop() - - h := &db.removalCandidates - heap.Init(h) - for { - select { - case <-ticker.C: - db.mutex.Lock() - now := time.Now() - for { - if len(db.removalCandidates) == 0 { - break - } - v := heap.Pop(h) - c, ok := v.(removalCandidate) - if !ok { - db.logger.Debugf("unexpected item in removal queue: \"%v\"", v) - continue - } - if now.Sub(c.exitTime) < removalTimeout { - // this candidate hasn't reached its timeout, put it back on the heap - // everything else will have a later exit time, so end this run - heap.Push(h, c) - break - } - p, ok := db.processes[c.pid] - if !ok { - db.logger.Debugf("pid %v was candidate for removal, but was already removed", c.pid) - continue - } - if p.PIDs.StartTimeNS != c.startTime { - // this could happen if the PID has already rolled over and reached this PID again. - db.logger.Debugf("start times of removal candidate %v differs, not removing (PID had been reused?)", c.pid) - continue - } - delete(db.processes, c.pid) - delete(db.entryLeaders, c.pid) - delete(db.entryLeaderRelationships, c.pid) + if db.reaperPeriod > 0 { + go func(db *DB) { + ticker := time.NewTicker(db.reaperPeriod) + defer ticker.Stop() + + for { + select { + case <-db.ctx.Done(): + db.logger.Infof("got context done, closing reaper") + return + case <-ticker.C: + db.reapProcs() + case <-db.stopChan: + return } - db.mutex.Unlock() - case <-db.stopChan: - return } + }(db) + } +} + +// run as a separate function to make testing easier +func (db *DB) reapProcs() { + db.mutex.Lock() + now := time.Now() + db.logger.Debugf("REAPER: processes: %d removal candidates: %d", len(db.processes), len(db.removalMap)) + + for pid, cand := range db.removalMap { + // this candidate hasn't reached its timeout, can't be removed yet + if removalFuncTimeoutWaiting(now, cand.exitTime) { + continue } - }(db) + + p, ok := db.processes[pid] + // this represents an orphaned exit event with no matching exec event. + // in this case, give us a few iterations for us to get the exec, since things can arrive out of order. + // In our current state, we'll have a lot of orphaned exit events, + // as we don't track `fork` events. + if !ok { + if !orphanFuncTimeoutWaiting(now, cand.orphanTime) { + db.stats.reapedOrphanExits.Add(1) + delete(db.removalMap, pid) + } + + continue + } + if p.PIDs.StartTimeNS != cand.startTime { + // this could happen if the PID has already rolled over and reached this PID again. + db.logger.Debugf("start times of removal candidate %v differs, not removing (PID had been reused?)", pid) + continue + } + db.stats.reapedProcesses.Add(1) + delete(db.removalMap, pid) + delete(db.processes, pid) + delete(db.entryLeaders, pid) + delete(db.entryLeaderRelationships, pid) + + } + + // We also need to go through and reap suspect processes. + // This processor can't rely on any sort of guarantee that we'll get every event, + // as the audit netlink socket may drop events, and the user may misconfigure + // the auditd rules so we don't catch every event. + // as a result, we may need to drop processes that appear orphaned + var procsToTest []uint32 + if db.reapProcesses { + for pid, proc := range db.processes { + // if a process can't be found in procFS, that may mean it's already exited, + // so we can "safely" remove it after a certain period. + // however this is still a tad risky, as if the user is running in some kind of + // container environment where they have access to netlink but not to procfs, + // we'll remove live processes. + if proc.procfsLookupFail { + _, matchingExit := db.removalMap[pid] + if now.Sub(proc.insertTime) > db.processReapAfter && !matchingExit { + delete(db.processes, pid) + // more potential for data loss; if we don't reap these, they can leak, but we may break relationships if a later child PID comes along looking for + // an entry leader that matches the our orphaned exec event. + delete(db.entryLeaders, pid) + delete(db.entryLeaderRelationships, pid) + db.stats.reapedOrphanProcesses.Add(1) + } + } else { + // be extra cautious with trying to reap processes that we have procfs data for, check to see if the processes are still running first; + // this is more likely to lead to data loss if running inside a container. + // In order to check these, we'll need to reach out to /proc, which is more work than I'd rather do while holding a global mutex that's stopping the entire DB. + // so gather a list now, then check them later + procsToTest = append(procsToTest, pid) + } + } + } + + db.stats.currentExit.Set(uint64(len(db.removalMap))) + db.stats.currentProcs.Set(uint64(len(db.processes))) + db.stats.entryLeaders.Set(uint64(len(db.entryLeaders))) + db.stats.entryLeaderRelationships.Set(uint64(len(db.entryLeaderRelationships))) + + db.mutex.Unlock() + + // check to make sure that the process still exists. + if db.reapProcesses && len(procsToTest) > 0 { + var deadProcs []uint32 + for _, proc := range procsToTest { + if !db.procfs.ProcessExists(proc) { + deadProcs = append(deadProcs, proc) + } + } + + // now grab mutex again, mark processes we know are dead + db.mutex.Lock() + for _, deadProc := range deadProcs { + if proc, ok := db.processes[deadProc]; ok { + // set the lookup fail flag, let the rest of the reaper deal with it + proc.procfsLookupFail = true + db.processes[deadProc] = proc + } + } + db.mutex.Unlock() + } } diff --git a/x-pack/auditbeat/processors/sessionmd/procfs/mock.go b/x-pack/auditbeat/processors/sessionmd/procfs/mock.go index 1689873044ec..5c1edcdf2f4a 100644 --- a/x-pack/auditbeat/processors/sessionmd/procfs/mock.go +++ b/x-pack/auditbeat/processors/sessionmd/procfs/mock.go @@ -8,23 +8,30 @@ package procfs import ( "fmt" + "sync" ) type MockReader struct { entries map[uint32]ProcessInfo + mut *sync.Mutex } func NewMockReader() *MockReader { return &MockReader{ entries: make(map[uint32]ProcessInfo), + mut: &sync.Mutex{}, } } func (r *MockReader) AddEntry(pid uint32, entry ProcessInfo) { + r.mut.Lock() + defer r.mut.Unlock() r.entries[pid] = entry } func (r *MockReader) GetProcess(pid uint32) (ProcessInfo, error) { + r.mut.Lock() + defer r.mut.Unlock() entry, ok := r.entries[pid] if !ok { return ProcessInfo{}, fmt.Errorf("not found") @@ -32,7 +39,16 @@ func (r *MockReader) GetProcess(pid uint32) (ProcessInfo, error) { return entry, nil } +func (r *MockReader) ProcessExists(pid uint32) bool { + r.mut.Lock() + defer r.mut.Unlock() + _, ok := r.entries[pid] + return ok +} + func (r *MockReader) GetAllProcesses() ([]ProcessInfo, error) { + r.mut.Lock() + defer r.mut.Unlock() ret := make([]ProcessInfo, 0, len(r.entries)) for _, entry := range r.entries { diff --git a/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go b/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go index 2d5d26bc3883..9b689c344a3e 100644 --- a/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go +++ b/x-pack/auditbeat/processors/sessionmd/procfs/procfs.go @@ -31,6 +31,7 @@ func MinorTTY(ttyNr uint32) uint32 { type Reader interface { GetProcess(pid uint32) (ProcessInfo, error) GetAllProcesses() ([]ProcessInfo, error) + ProcessExists(pid uint32) bool } type ProcfsReader struct { @@ -186,6 +187,11 @@ func (r ProcfsReader) GetProcess(pid uint32) (ProcessInfo, error) { return r.getProcessInfo(proc) } +func (ProcfsReader) ProcessExists(pid uint32) bool { + _, err := procfs.NewProc(int(pid)) + return err == nil +} + // returns empty slice on error func (r ProcfsReader) GetAllProcesses() ([]ProcessInfo, error) { procs, err := procfs.AllProcs() diff --git a/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go index 34c3166f26fa..7baf8b75f25e 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider.go @@ -71,6 +71,7 @@ func (p prvdr) Sync(ev *beat.Event, pid uint32) error { p.logger.Debugw("couldn't get process info from proc for pid", "pid", pid, "error", err) // If process info couldn't be taken from procfs, populate with as much info as // possible from the event + pe.ProcfsLookupFail = true pe.PIDs.Tgid = pid var intr interface{} var i int diff --git a/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider_test.go b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider_test.go index 3d4941882f3b..4a90c3651714 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider_test.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/procfsprovider/procfsprovider_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/beat" @@ -19,13 +20,80 @@ import ( "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/types" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" + "github.com/elastic/elastic-agent-libs/monitoring" ) var ( - logger = *logp.NewLogger("procfs_test") + logger = logp.NewLogger("procfs_test") timestamp = time.Now() ) +func constructEvt(tgid uint32, syscall string) *beat.Event { + evt := &beat.Event{Fields: mapstr.M{}} + evt.Fields.Put("process.pid", tgid) + evt.Fields.Put(syscallField, syscall) + return evt +} + +func assertRegistryUint(t require.TestingT, reg *monitoring.Registry, key string, expected uint64, message string) { + entry := reg.Get(key) + assert.NotNil(t, entry) + + value, ok := reg.Get(key).(*monitoring.Uint) + assert.True(t, ok) + assert.Equal(t, expected, value.Get(), message) +} + +func loadDB(t *testing.T, count uint32, procHandler procfs.MockReader, prov prvdr) { + for i := uint32(1); i < count; i++ { + evt := constructEvt(i, "execve") + procHandler.AddEntry(i, procfs.ProcessInfo{PIDs: types.PIDInfo{Tgid: i, Ppid: 1234}}) + + err := prov.Sync(evt, i) + require.NoError(t, err) + + // verify that we got the process + found, err := prov.db.GetProcess(i) + require.NoError(t, err) + require.NotNil(t, found) + + // now insert the exit + exitEvt := constructEvt(i, "exit_group") + err = prov.Sync(exitEvt, i) + require.NoError(t, err) + + } +} + +func TestProviderLoadMetrics(t *testing.T) { + testReg := monitoring.NewRegistry() + testProc := procfs.NewMockReader() + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15) + defer cancel() + + procDB, err := processdb.NewDB(ctx, testReg, testProc, logp.L(), time.Second*2, true) + require.NoError(t, err) + + testProvider, err := NewProvider(ctx, logp.L(), procDB, testProc, "process.pid") + require.NoError(t, err) + rawPrvdr, _ := testProvider.(prvdr) + + events := 100_000 + loadDB(t, uint32(events), *testProc, rawPrvdr) + + // wait for the maps to empty to the correct amount as the reaper runs + require.EventuallyWithT(t, func(collect *assert.CollectT) { + assertRegistryUint(collect, testReg, "processes_gauge", 0, "processes_gauge") + assertRegistryUint(collect, testReg, "exit_events_gauge", 0, "exit_events_gauge") + }, time.Minute*5, time.Second*10) + + // ensure processes are getting resolved properly + assertRegistryUint(t, testReg, "resolved_orphan_exits", 0, "resolved_orphan_exits") + assertRegistryUint(t, testReg, "reaped_orphan_exits", 0, "reaped_orphan_exits") + assertRegistryUint(t, testReg, "failed_process_lookup_count", 0, "failed_process_lookup_count") + assertRegistryUint(t, testReg, "procfs_lookup_fail", 0, "procfs_lookup_fail") +} + func TestExecveEvent(t *testing.T) { var pid uint32 = 100 event := beat.Event{ @@ -109,8 +177,10 @@ func TestExecveEvent(t *testing.T) { }, } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15) + defer cancel() reader := procfs.NewMockReader() - db, err := processdb.NewDB(reader, logger) + db, err := processdb.NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) for _, entry := range prereq { reader.AddEntry(entry.PIDs.Tgid, entry) @@ -121,7 +191,7 @@ func TestExecveEvent(t *testing.T) { reader.AddEntry(entry.PIDs.Tgid, entry) } - provider, err := NewProvider(context.TODO(), &logger, db, reader, "process.pid") + provider, err := NewProvider(context.TODO(), logger, db, reader, "process.pid") require.Nil(t, err, "error creating provider") err = provider.Sync(&event, expected.PIDs.Tgid) @@ -219,8 +289,10 @@ func TestExecveatEvent(t *testing.T) { }, } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15) + defer cancel() reader := procfs.NewMockReader() - db, err := processdb.NewDB(reader, logger) + db, err := processdb.NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) for _, entry := range prereq { reader.AddEntry(entry.PIDs.Tgid, entry) @@ -231,7 +303,7 @@ func TestExecveatEvent(t *testing.T) { reader.AddEntry(entry.PIDs.Tgid, entry) } - provider, err := NewProvider(context.TODO(), &logger, db, reader, "process.pid") + provider, err := NewProvider(context.TODO(), logger, db, reader, "process.pid") require.Nil(t, err, "error creating provider") err = provider.Sync(&event, expected.PIDs.Tgid) @@ -306,15 +378,17 @@ func TestSetSidEvent(t *testing.T) { }, } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15) + defer cancel() reader := procfs.NewMockReader() - db, err := processdb.NewDB(reader, logger) + db, err := processdb.NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) for _, entry := range prereq { reader.AddEntry(entry.PIDs.Tgid, entry) } db.ScrapeProcfs() - provider, err := NewProvider(context.TODO(), &logger, db, reader, "process.pid") + provider, err := NewProvider(context.TODO(), logger, db, reader, "process.pid") require.Nil(t, err, "error creating provider") err = provider.Sync(&event, expected.PIDs.Tgid) @@ -388,15 +462,17 @@ func TestSetSidEventFailed(t *testing.T) { }, } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15) + defer cancel() reader := procfs.NewMockReader() - db, err := processdb.NewDB(reader, logger) + db, err := processdb.NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) for _, entry := range prereq { reader.AddEntry(entry.PIDs.Tgid, entry) } db.ScrapeProcfs() - provider, err := NewProvider(context.TODO(), &logger, db, reader, "process.pid") + provider, err := NewProvider(context.TODO(), logger, db, reader, "process.pid") require.Nil(t, err, "error creating provider") err = provider.Sync(&event, expected.PIDs.Tgid) @@ -459,15 +535,17 @@ func TestSetSidSessionLeaderNotScraped(t *testing.T) { }, } + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15) + defer cancel() reader := procfs.NewMockReader() - db, err := processdb.NewDB(reader, logger) + db, err := processdb.NewDB(ctx, monitoring.NewRegistry(), reader, logger, time.Second*30, false) require.Nil(t, err) for _, entry := range prereq { reader.AddEntry(entry.PIDs.Tgid, entry) } db.ScrapeProcfs() - provider, err := NewProvider(context.TODO(), &logger, db, reader, "process.pid") + provider, err := NewProvider(context.TODO(), logger, db, reader, "process.pid") require.Nil(t, err, "error creating provider") err = provider.Sync(&event, expected.PIDs.Tgid) diff --git a/x-pack/auditbeat/processors/sessionmd/types/events.go b/x-pack/auditbeat/processors/sessionmd/types/events.go index 2f5cc3109e9d..f283ddd5e21c 100644 --- a/x-pack/auditbeat/processors/sessionmd/types/events.go +++ b/x-pack/auditbeat/processors/sessionmd/types/events.go @@ -56,9 +56,10 @@ type ProcessForkEvent struct { } type ProcessExecEvent struct { - PIDs PIDInfo - Creds CredInfo - CTTY tty.TTYDev + PIDs PIDInfo + Creds CredInfo + CTTY tty.TTYDev + ProcfsLookupFail bool // varlen fields CWD string From 3ff193186e7619bd427ddebd2091f2f53f9d7039 Mon Sep 17 00:00:00 2001 From: Christiano Haesbaert Date: Wed, 19 Feb 2025 20:17:35 +0100 Subject: [PATCH 08/41] Update testify and fix filebeat tests (#42747) Testify 1.10 fixed NotSame which exposes a bug in our filebeat tests: https://github.com/stretchr/testify/commit/118fb8346630c192421c8914848381af9d4412a7 Remove the structure comparison, as they will never be the same address. Require that the maps they hold have a different backing. --- NOTICE.txt | 4 ++-- filebeat/channel/runner_mock_test.go | 11 +++++++---- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 12 insertions(+), 9 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 61aa6ac7db3c..e92c00ff1216 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -22940,11 +22940,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/stretchr/testify -Version: v1.9.0 +Version: v1.10.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/stretchr/testify@v1.9.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/stretchr/testify@v1.10.0/LICENSE: MIT License diff --git a/filebeat/channel/runner_mock_test.go b/filebeat/channel/runner_mock_test.go index e784d833e9e3..55cda0f1d480 100644 --- a/filebeat/channel/runner_mock_test.go +++ b/filebeat/channel/runner_mock_test.go @@ -18,6 +18,7 @@ package channel import ( + "reflect" "testing" "github.com/elastic/beats/v7/libbeat/beat" @@ -69,6 +70,10 @@ func (r runnerFactoryMock) Assert(t *testing.T) { // we need to make sure `Assert` is called after `Create` require.Len(t, r.cfgs, r.clientCount) + sameBacking := func(a, b any) bool { + return reflect.ValueOf(a).UnsafePointer() == reflect.ValueOf(b).UnsafePointer() + } + t.Run("new processing configuration each time", func(t *testing.T) { for i, c1 := range r.cfgs { for j, c2 := range r.cfgs { @@ -76,10 +81,8 @@ func (r runnerFactoryMock) Assert(t *testing.T) { continue } - require.NotSamef(t, c1.Processing, c2.Processing, "processing configuration cannot be re-used") - require.NotSamef(t, c1.Processing.Meta, c2.Processing.Meta, "`Processing.Meta` cannot be re-used") - require.NotSamef(t, c1.Processing.Fields, c2.Processing.Fields, "`Processing.Fields` cannot be re-used") - require.NotSamef(t, c1.Processing.Processor, c2.Processing.Processor, "`Processing.Processor` cannot be re-used") + require.Falsef(t, sameBacking(c1.Processing.Meta, c2.Processing.Meta), "`Processing.Meta` cannot be re-used") + require.Falsef(t, sameBacking(c1.Processing.Fields, c2.Processing.Fields), "`Processing.Fields` cannot be re-used") } } }) diff --git a/go.mod b/go.mod index e474081d2096..df2ccc6cd803 100644 --- a/go.mod +++ b/go.mod @@ -118,7 +118,7 @@ require ( github.com/shopspring/decimal v1.3.1 // indirect github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/ugorji/go/codec v1.1.8 github.com/vmware/govmomi v0.39.0 go.elastic.co/ecszap v1.0.2 diff --git a/go.sum b/go.sum index dcfcb934e66e..9fe54fd3bf77 100644 --- a/go.sum +++ b/go.sum @@ -901,8 +901,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/teambition/rrule-go v1.8.2 h1:lIjpjvWTj9fFUZCmuoVDrKVOtdiyzbzc93qTmRVe/J8= github.com/teambition/rrule-go v1.8.2/go.mod h1:Ieq5AbrKGciP1V//Wq8ktsTXwSwJHDD5mD/wLBGl3p4= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= From a28ac4ccf2f31638bdc6b72919089803bcb7cc0c Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Thu, 20 Feb 2025 12:25:07 +0100 Subject: [PATCH 09/41] {filebeat/winlogbeat} Extract runner logic for eventlog (#42736) * Extract runner logic for eventlog * do not use unnecessary runner struct * fix lint * Recover file --- filebeat/input/winlog/input.go | 136 ++++++++----------------------- winlogbeat/beater/eventlogger.go | 117 ++++++-------------------- winlogbeat/eventlog/runner.go | 128 +++++++++++++++++++++++++++++ 3 files changed, 184 insertions(+), 197 deletions(-) create mode 100644 winlogbeat/eventlog/runner.go diff --git a/filebeat/input/winlog/input.go b/filebeat/input/winlog/input.go index ab925cbdd3c6..882d93f30a0c 100644 --- a/filebeat/input/winlog/input.go +++ b/filebeat/input/winlog/input.go @@ -18,27 +18,39 @@ package winlog import ( - "errors" "fmt" - "io" - "time" input "github.com/elastic/beats/v7/filebeat/input/v2" cursor "github.com/elastic/beats/v7/filebeat/input/v2/input-cursor" "github.com/elastic/beats/v7/libbeat/feature" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/go-concert/ctxtool" - "github.com/elastic/go-concert/timed" "github.com/elastic/beats/v7/winlogbeat/checkpoint" "github.com/elastic/beats/v7/winlogbeat/eventlog" conf "github.com/elastic/elastic-agent-libs/config" ) -type eventlogRunner struct{} - const pluginName = "winlog" +type publisher struct { + cursorPub cursor.Publisher +} + +func (pub *publisher) Publish(records []eventlog.Record) error { + for _, record := range records { + event := record.ToEvent() + if err := pub.cursorPub.Publish(event, record.Offset); err != nil { + // Publisher indicates disconnect when returning an error. + // stop trying to publish records and quit + return err + } + } + return nil +} + +type winlogInput struct{} + // Plugin create a stateful input Plugin collecting logs from Windows Event Logs. func Plugin(log *logp.Logger, store cursor.StateStore) input.Plugin { return input.Plugin{ @@ -65,13 +77,13 @@ func configure(cfg *conf.C) ([]cursor.Source, cursor.Input, error) { } sources := []cursor.Source{eventLog} - return sources, eventlogRunner{}, nil + return sources, winlogInput{}, nil } -func (eventlogRunner) Name() string { return pluginName } +func (winlogInput) Name() string { return pluginName } -func (eventlogRunner) Test(source cursor.Source, ctx input.TestContext) error { - api := source.(eventlog.EventLog) +func (in winlogInput) Test(source cursor.Source, ctx input.TestContext) error { + api, _ := source.(eventlog.EventLog) err := api.Open(checkpoint.EventLogState{}) if err != nil { return fmt.Errorf("failed to open %q: %w", api.Channel(), err) @@ -79,105 +91,21 @@ func (eventlogRunner) Test(source cursor.Source, ctx input.TestContext) error { return api.Close() } -func (eventlogRunner) Run( +func (in winlogInput) Run( ctx input.Context, source cursor.Source, cursor cursor.Cursor, - publisher cursor.Publisher, + pub cursor.Publisher, ) error { - api := source.(eventlog.EventLog) + api, _ := source.(eventlog.EventLog) log := ctx.Logger.With("eventlog", source.Name(), "channel", api.Channel()) - - // setup closing the API if either the run function is signaled asynchronously - // to shut down or when returning after io.EOF - cancelCtx, cancelFn := ctxtool.WithFunc(ctx.Cancelation, func() { - if err := api.Close(); err != nil { - log.Errorw("Error while closing Windows Event Log access", "error", err) - } - }) - defer cancelFn() - - // Flag used to detect repeat "channel not found" errors, eliminating log spam. - channelNotFoundErrDetected := false - -runLoop: - for { - //nolint:nilerr // only log error if we are not shutting down - if cancelCtx.Err() != nil { - return nil - } - - evtCheckpoint := initCheckpoint(log, cursor) - openErr := api.Open(evtCheckpoint) - - switch { - case eventlog.IsRecoverable(openErr): - log.Errorw("Encountered recoverable error when opening Windows Event Log", "error", openErr) - _ = timed.Wait(cancelCtx, 5*time.Second) - continue - case !api.IsFile() && eventlog.IsChannelNotFound(openErr): - if !channelNotFoundErrDetected { - log.Errorw("Encountered channel not found error when opening Windows Event Log", "error", openErr) - } else { - log.Debugw("Encountered channel not found error when opening Windows Event Log", "error", openErr) - } - channelNotFoundErrDetected = true - _ = timed.Wait(cancelCtx, 5*time.Second) - continue - case openErr != nil: - return fmt.Errorf("failed to open Windows Event Log channel %q: %w", api.Channel(), openErr) - } - channelNotFoundErrDetected = false - - log.Debug("Windows Event Log opened successfully") - - // read loop - for cancelCtx.Err() == nil { - records, err := api.Read() - if eventlog.IsRecoverable(err) { - log.Errorw("Encountered recoverable error when reading from Windows Event Log", "error", err) - if resetErr := api.Reset(); resetErr != nil { - log.Errorw("Error resetting Windows Event Log handle", "error", resetErr) - } - continue runLoop - } - if !api.IsFile() && eventlog.IsChannelNotFound(err) { - log.Errorw("Encountered channel not found error when reading from Windows Event Log", "error", err) - if resetErr := api.Reset(); resetErr != nil { - log.Errorw("Error resetting Windows Event Log handle", "error", resetErr) - } - continue runLoop - } - - if err != nil { - if errors.Is(err, io.EOF) { - log.Debugw("End of Winlog event stream reached", "error", err) - return nil - } - - //nolint:nilerr // only log error if we are not shutting down - if cancelCtx.Err() != nil { - return nil - } - - log.Errorw("Error occurred while reading from Windows Event Log", "error", err) - return err - } - if len(records) == 0 { - _ = timed.Wait(cancelCtx, time.Second) - continue - } - - for _, record := range records { - event := record.ToEvent() - if err := publisher.Publish(event, record.Offset); err != nil { - // Publisher indicates disconnect when returning an error. - // stop trying to publish records and quit - return err - } - } - } - } + return eventlog.Run( + ctxtool.FromCanceller(ctx.Cancelation), + api, + initCheckpoint(log, cursor), + &publisher{cursorPub: pub}, + log, + ) } func initCheckpoint(log *logp.Logger, cursor cursor.Cursor) checkpoint.EventLogState { diff --git a/winlogbeat/beater/eventlogger.go b/winlogbeat/beater/eventlogger.go index 2a04f39df72b..b4bcab2b7586 100644 --- a/winlogbeat/beater/eventlogger.go +++ b/winlogbeat/beater/eventlogger.go @@ -18,9 +18,7 @@ package beater import ( - "errors" - "io" - "time" + "context" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common/acker" @@ -54,6 +52,19 @@ type eventLoggerConfig struct { KeepNull bool `config:"keep_null"` } +type publisher struct { + client beat.Client + eventACKer *eventACKer +} + +func (p *publisher) Publish(records []eventlog.Record) error { + p.eventACKer.Add(len(records)) + for _, lr := range records { + p.client.Publish(lr.ToEvent()) + } + return nil +} + func newEventLogger( beatInfo beat.Info, source eventlog.EventLog, @@ -122,98 +133,18 @@ func (e *eventLogger) run( client.Close() }() - defer func() { - e.log.Info("Stop processing.") - - if err := api.Close(); err != nil { - e.log.Warnw("Close() error.", "error", err) - return - } + ctx, cancelFn := context.WithCancel(context.Background()) + go func() { + <-done + cancelFn() }() - // Flag used to detect repeat "channel not found" errors, eliminating log spam. - channelNotFoundErrDetected := false - -runLoop: - for stop := false; !stop; { - select { - case <-done: - return - default: - } - - err = api.Open(state) - - switch { - case eventlog.IsRecoverable(err): - e.log.Warnw("Open() encountered recoverable error. Trying again...", "error", err, "channel", api.Channel()) - time.Sleep(time.Second * 5) - continue - case !api.IsFile() && eventlog.IsChannelNotFound(err): - if !channelNotFoundErrDetected { - e.log.Warnw("Open() encountered channel not found error. Trying again...", "error", err, "channel", api.Channel()) - } else { - e.log.Debugw("Open() encountered channel not found error. Trying again...", "error", err, "channel", api.Channel()) - } - channelNotFoundErrDetected = true - time.Sleep(time.Second * 5) - continue - case err != nil: - e.log.Warnw("Open() error. No events will be read from this source.", "error", err, "channel", api.Channel()) - return - } - channelNotFoundErrDetected = false - - e.log.Debug("Opened successfully.") - - for !stop { - select { - case <-done: - return - default: - } - - // Read from the event. - records, err := api.Read() - if eventlog.IsRecoverable(err) { - e.log.Warnw("Read() encountered recoverable error. Reopening handle...", "error", err, "channel", api.Channel()) - if resetErr := api.Reset(); resetErr != nil { - e.log.Warnw("Reset() error.", "error", err) - } - continue runLoop - } - if !api.IsFile() && eventlog.IsChannelNotFound(err) { - e.log.Warnw("Read() encountered channel not found error for channel %q. Reopening handle...", "error", err, "channel", api.Channel()) - if resetErr := api.Reset(); resetErr != nil { - e.log.Warnw("Reset() error.", "error", err) - } - continue runLoop - } - - if err != nil { - if errors.Is(err, io.EOF) { - // Graceful stop. - stop = true - } else { - e.log.Warnw("Read() error.", "error", err, "channel", api.Channel()) - return - } - } - - e.log.Debugf("Read() returned %d records.", len(records)) - if len(records) == 0 { - time.Sleep(time.Second) - if stop { - return - } - continue - } - - eventACKer.Add(len(records)) - for _, lr := range records { - client.Publish(lr.ToEvent()) - } - } + publisher := &publisher{ + client: client, + eventACKer: eventACKer, + } + if err := eventlog.Run(ctx, api, state, publisher, e.log); err != nil { + e.log.Error(err) } } diff --git a/winlogbeat/eventlog/runner.go b/winlogbeat/eventlog/runner.go new file mode 100644 index 000000000000..0486d13ce545 --- /dev/null +++ b/winlogbeat/eventlog/runner.go @@ -0,0 +1,128 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package eventlog + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/elastic/beats/v7/winlogbeat/checkpoint" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/go-concert/ctxtool" + "github.com/elastic/go-concert/timed" +) + +type Publisher interface { + Publish(records []Record) error +} + +func Run( + ctx context.Context, + api EventLog, + evtCheckpoint checkpoint.EventLogState, + publisher Publisher, + log *logp.Logger, +) error { + // setup closing the API if either the run function is signaled asynchronously + // to shut down or when returning after io.EOF + cancelCtx, cancelFn := ctxtool.WithFunc(ctx, func() { + if err := api.Close(); err != nil { + log.Errorw("Error while closing Windows Event Log access", "error", err) + } + }) + defer cancelFn() + + // Flag used to detect repeat "channel not found" errors, eliminating log spam. + channelNotFoundErrDetected := false + +runLoop: + for { + //nolint:nilerr // only log error if we are not shutting down + if cancelCtx.Err() != nil { + return nil + } + + openErr := api.Open(evtCheckpoint) + + switch { + case IsRecoverable(openErr): + log.Errorw("Encountered recoverable error when opening Windows Event Log", "error", openErr) + _ = timed.Wait(cancelCtx, 5*time.Second) + continue + case !api.IsFile() && IsChannelNotFound(openErr): + if !channelNotFoundErrDetected { + log.Errorw("Encountered channel not found error when opening Windows Event Log", "error", openErr) + } else { + log.Debugw("Encountered channel not found error when opening Windows Event Log", "error", openErr) + } + channelNotFoundErrDetected = true + _ = timed.Wait(cancelCtx, 5*time.Second) + continue + case openErr != nil: + return fmt.Errorf("failed to open Windows Event Log channel %q: %w", api.Channel(), openErr) + } + channelNotFoundErrDetected = false + + log.Debug("Windows Event Log opened successfully") + + // read loop + for cancelCtx.Err() == nil { + records, err := api.Read() + if IsRecoverable(err) { + log.Errorw("Encountered recoverable error when reading from Windows Event Log", "error", err) + if resetErr := api.Reset(); resetErr != nil { + log.Errorw("Error resetting Windows Event Log handle", "error", resetErr) + } + continue runLoop + } + if !api.IsFile() && IsChannelNotFound(err) { + log.Errorw("Encountered channel not found error when reading from Windows Event Log", "error", err) + if resetErr := api.Reset(); resetErr != nil { + log.Errorw("Error resetting Windows Event Log handle", "error", resetErr) + } + continue runLoop + } + + if err != nil { + if errors.Is(err, io.EOF) { + log.Debugw("End of Winlog event stream reached", "error", err) + return nil + } + + //nolint:nilerr // only log error if we are not shutting down + if cancelCtx.Err() != nil { + return nil + } + + log.Errorw("Error occurred while reading from Windows Event Log", "error", err) + return err + } + if len(records) == 0 { + _ = timed.Wait(cancelCtx, time.Second) + continue + } + + if err := publisher.Publish(records); err != nil { + return err + } + } + } +} From e26baff8300e552174c1027db79a3cc0fc067ea9 Mon Sep 17 00:00:00 2001 From: Anderson Queiroz Date: Thu, 20 Feb 2025 15:20:17 +0100 Subject: [PATCH 10/41] remove filebeat/filebeat_windows_amd64.syso added by mistake (#42799) filebeat/filebeat_windows_amd64.syso is part of the build process on windows and is deleted during this process. It was added by mistake by https://github.com/elastic/beats/pull/37283. --- filebeat/filebeat_windows_amd64.syso | Bin 1072 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 filebeat/filebeat_windows_amd64.syso diff --git a/filebeat/filebeat_windows_amd64.syso b/filebeat/filebeat_windows_amd64.syso deleted file mode 100644 index c52af94f8e059275dff851e701e42fafefdf4132..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1072 zcmZvcPiqrV6vfY&fOb*nQd|w_QbN*yQBY7!i-l0dN^vLCB#i?x6Ot*k(1j~MPnT}| z7B2f8`VGY2dDEAF_$Kq-efOPr?!D)|N&jaewmS_v}vGgN)sxb-p=70Pf@Vy80h4-2mv0c}8F;8`(uk;7{gmt73I%a-Ee~9h& zyMFiA*=*`jn8z_p?z^g{h3EExcburyRJq0)!j@~K0nB5as{r~dV47Neq)R-B-huhf zGXP&|2EKRi$(g^_w)WLlrWV!?U|^3TYmq!ORo4LK(Bx;jLh;di~0fGJ>biqB-LT?#j`|Hx0ei~6oN+*`lHFHx{AzxVk%8;6vk~%W>futpr>sxm z&0($bd;zY777dhe8|f+dF1R5~qi`Y<;<>^htR)zq(yA|A)9HDr#EQ9QXO-$LH>1nA zJ&)Dlb|u`mirY8F5uULXtKIz vU72-VI_yjMVytfDT-<8unfw8NX4Q9SMjbx&h0c!a@6oNM<$r-Bc)a@`Kf{eH From ce6156b950d076bc8585c9ed358fd0f8f3e071f1 Mon Sep 17 00:00:00 2001 From: Christiano Haesbaert Date: Thu, 20 Feb 2025 17:19:28 +0100 Subject: [PATCH 11/41] auditbeat: system/process module backed by quark (#42032) This introduces a new provider for the sytem/process module in linux. The main motivation is to address some of the limitations of the current implementation. The gosysinfo provider sends state reports by scraping /proc from time to time, so it loses all short lived processes. Some customers also would like to have full telemetry but can't run auditd for various reasons. As a bonus we get some extra ECS fields that were not available before. MAIN DIFFERENCES: * Publishes every process in the system, regardless of lifespan. * Publishes exec events for an existing process (without a fork). * Aggregates fork+exec+exit within one event. * Adds event.exit_code for processes that exited, can't express exit_time in ECS? * Include the original process.args, sysinfo reports args that were fetched when it parsed /proc, so a userland process can masquerade itself. For the initial /proc scraping we report the current value like sysinfo. We can't get the original value since the kernel overwrites it, if you wanna have fun: https://github.com/systemd/systemd/blob/main/src/basic/argv-util.c#L165 * Adds process.args_count. * Adds process.interactive and if true, process.tty.char_device.{major,minor} * Attempts to hash all processes, not just long lived ones. * Hashing is not rate-limited anymore, but it's cached and refreshed based on metadata. It's a LRU keyed by path and refreshed if the metadata of the file changes, statx(2) if the kernel supports, stat(2) otherwise. * No more periodic state reports, only initial batch. * No more saving the timestamp of the last state-report in disk. * No more /proc parsing during runtime, only on boot. MISSING: * Unify entity id with sessionview. * Docs. EXTRA CHANGES: * Added statx(2) to seccomp_linux so we can properly use CachedHasher. * Updated quark to 0.3 so we have namespace inode numbers. Co-authored-by: Nicholas Berlin <56366649+nicholasberlin@users.noreply.github.com> Co-authored-by: Andrew Kroh --- NOTICE.txt | 4 +- go.mod | 2 +- go.sum | 4 +- .../auditbeat/module/system/process/config.go | 12 +- .../system/process/gosysinfo_provider.go | 36 +- .../module/system/process/process.go | 51 ++- .../system/process/quark_provider_linux.go | 337 ++++++++++++++ .../process/quark_provider_linux_test.go | 411 ++++++++++++++++++ .../system/process/quark_provider_other.go | 18 + x-pack/auditbeat/seccomp_linux.go | 7 + 10 files changed, 848 insertions(+), 34 deletions(-) create mode 100644 x-pack/auditbeat/module/system/process/quark_provider_linux.go create mode 100644 x-pack/auditbeat/module/system/process/quark_provider_linux_test.go create mode 100644 x-pack/auditbeat/module/system/process/quark_provider_other.go diff --git a/NOTICE.txt b/NOTICE.txt index e92c00ff1216..ecaf09ee5347 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -14216,11 +14216,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-quark -Version: v0.2.0 +Version: v0.3.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-quark@v0.2.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-quark@v0.3.0/LICENSE.txt: Apache License diff --git a/go.mod b/go.mod index df2ccc6cd803..655fed320c06 100644 --- a/go.mod +++ b/go.mod @@ -179,7 +179,7 @@ require ( github.com/elastic/elastic-agent-libs v0.18.1 github.com/elastic/elastic-agent-system-metrics v0.11.7 github.com/elastic/go-elasticsearch/v8 v8.17.0 - github.com/elastic/go-quark v0.2.0 + github.com/elastic/go-quark v0.3.0 github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 github.com/elastic/mito v1.16.0 github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015 diff --git a/go.sum b/go.sum index 9fe54fd3bf77..19de7bd540aa 100644 --- a/go.sum +++ b/go.sum @@ -370,8 +370,8 @@ github.com/elastic/go-lumber v0.1.2-0.20220819171948-335fde24ea0f h1:TsPpU5EAwlt github.com/elastic/go-lumber v0.1.2-0.20220819171948-335fde24ea0f/go.mod h1:HHaWnZamYKWsR9/eZNHqRHob8iQDKnchHmmskT/SKko= github.com/elastic/go-perf v0.0.0-20241029065020-30bec95324b8 h1:FD01NjsTes0RxZVQ22ebNYJA4KDdInVnR9cn1hmaMwA= github.com/elastic/go-perf v0.0.0-20241029065020-30bec95324b8/go.mod h1:Nt+pnRYvf0POC+7pXsrv8ubsEOSsaipJP0zlz1Ms1RM= -github.com/elastic/go-quark v0.2.0 h1:r2BL4NzvhESrrL/yA3AcHt8mwF7fvQDssBAUiOL1sdg= -github.com/elastic/go-quark v0.2.0/go.mod h1:/ngqgumD/Z5vnFZ4XPN2kCbxnEfG5/Uc+bRvOBabVVA= +github.com/elastic/go-quark v0.3.0 h1:d4vokx0psEJo+93fnhvWpTJMggPd9rfMJSleoLva4xA= +github.com/elastic/go-quark v0.3.0/go.mod h1:bO/XIGZBUJGxyiJ9FTsSYn9YlfOTRJnmOP+iBE2FyjA= github.com/elastic/go-seccomp-bpf v1.5.0 h1:gJV+U1iP+YC70ySyGUUNk2YLJW5/IkEw4FZBJfW8ZZY= github.com/elastic/go-seccomp-bpf v1.5.0/go.mod h1:umdhQ/3aybliBF2jjiZwS492I/TOKz+ZRvsLT3hVe1o= github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 h1:yuiN60oaQUz2PtNpNhDI2H6zrCdfiiptmNdwV5WUaKA= diff --git a/x-pack/auditbeat/module/system/process/config.go b/x-pack/auditbeat/module/system/process/config.go index 52cb6dd98933..f144c5991398 100644 --- a/x-pack/auditbeat/module/system/process/config.go +++ b/x-pack/auditbeat/module/system/process/config.go @@ -5,6 +5,7 @@ package process import ( + "fmt" "time" "github.com/elastic/beats/v7/auditbeat/helper/hasher" @@ -16,11 +17,19 @@ type Config struct { ProcessStatePeriod time.Duration `config:"process.state.period"` HasherConfig hasher.Config `config:"process.hash"` + Backend string `config:"process.backend"` } // Validate validates the config. func (c *Config) Validate() error { - return c.HasherConfig.Validate() + if err := c.HasherConfig.Validate(); err != nil { + return err + } + if c.Backend != "kernel_tracing" && c.Backend != "procfs" { + return fmt.Errorf("invalid process.backend '%s'", c.Backend) + } + + return nil } func (c *Config) effectiveStatePeriod() time.Duration { @@ -40,4 +49,5 @@ var defaultConfig = Config{ ScanRatePerSec: "50 MiB", ScanRateBytesPerSec: 50 * 1024 * 1024, }, + Backend: "procfs", } diff --git a/x-pack/auditbeat/module/system/process/gosysinfo_provider.go b/x-pack/auditbeat/module/system/process/gosysinfo_provider.go index da82a2e18106..b6a0539b8da7 100644 --- a/x-pack/auditbeat/module/system/process/gosysinfo_provider.go +++ b/x-pack/auditbeat/module/system/process/gosysinfo_provider.go @@ -20,7 +20,6 @@ import ( "github.com/elastic/beats/v7/libbeat/common/capabilities" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/x-pack/auditbeat/cache" - "github.com/elastic/beats/v7/x-pack/auditbeat/module/system" "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/go-sysinfo" "github.com/elastic/go-sysinfo/types" @@ -33,7 +32,6 @@ const ( // SysinfoMetricSet collects data about the host. type SysInfoMetricSet struct { - system.SystemMetricSet MetricSet hasher *hasher.FileHasher cache *cache.Cache @@ -81,7 +79,7 @@ func (p Process) toMapStr() mapstr.M { } // NewFromSysInfo constructs a new MetricSet backed by go-sysinfo. -func NewFromSysInfo(base mb.BaseMetricSet, ms MetricSet) (mb.MetricSet, error) { +func NewFromSysInfo(ms MetricSet) (mb.MetricSet, error) { bucket, err := datastore.OpenBucket(bucketName) if err != nil { return nil, fmt.Errorf("failed to open persistent datastore: %w", err) @@ -117,12 +115,11 @@ func NewFromSysInfo(base mb.BaseMetricSet, ms MetricSet) (mb.MetricSet, error) { } sm := &SysInfoMetricSet{ - SystemMetricSet: system.NewSystemMetricSet(base), - MetricSet: ms, - cache: cache.New(), - bucket: bucket, - lastState: lastState, - hasher: hasher, + MetricSet: ms, + cache: cache.New(), + bucket: bucket, + lastState: lastState, + hasher: hasher, } return sm, nil @@ -351,27 +348,12 @@ func putIfNotEmpty(mapstr *mapstr.M, key string, value string) { } func processMessage(process *Process, action eventAction) string { - if process.Error != nil { - return fmt.Sprintf("ERROR for PID %d: %v", process.Info.PID, process.Error) - } - - var actionString string - switch action { - case eventActionProcessStarted: - actionString = "STARTED" - case eventActionProcessStopped: - actionString = "STOPPED" - case eventActionExistingProcess: - actionString = "is RUNNING" - } - - var userString string + var username string if process.User != nil { - userString = fmt.Sprintf(" by user %v", process.User.Username) + username = process.User.Username } - return fmt.Sprintf("Process %v (PID: %d)%v %v", - process.Info.Name, process.Info.PID, userString, actionString) + return makeMessage(process.Info.PID, action, process.Info.Name, username, process.Error) } func convertToCacheable(processes []*Process) []cache.Cacheable { diff --git a/x-pack/auditbeat/module/system/process/process.go b/x-pack/auditbeat/module/system/process/process.go index c79e87ce0fad..a45ef2e80826 100644 --- a/x-pack/auditbeat/module/system/process/process.go +++ b/x-pack/auditbeat/module/system/process/process.go @@ -7,6 +7,7 @@ package process import ( "encoding/binary" "fmt" + "runtime" "time" "github.com/elastic/beats/v7/auditbeat/ab" @@ -26,6 +27,7 @@ const ( // MetricSet collects data about the host. type MetricSet struct { + system.SystemMetricSet config Config log *logp.Logger } @@ -36,6 +38,8 @@ const ( eventActionExistingProcess eventAction = iota eventActionProcessStarted eventActionProcessStopped + eventActionProcessRan + eventActionProcessChangedImage eventActionProcessError ) @@ -47,6 +51,10 @@ func (action eventAction) String() string { return "process_started" case eventActionProcessStopped: return "process_stopped" + case eventActionProcessRan: + return "process_ran" + case eventActionProcessChangedImage: + return "process_changed_image" case eventActionProcessError: return "process_error" default: @@ -62,6 +70,8 @@ func (action eventAction) Type() string { return "start" case eventActionProcessStopped: return "end" + case eventActionProcessChangedImage: + return "change" case eventActionProcessError: return "info" default: @@ -84,12 +94,21 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { ms.config = defaultConfig ms.log = logp.NewLogger(metricsetName) + ms.SystemMetricSet = system.NewSystemMetricSet(base) if err := base.Module().UnpackConfig(&ms.config); err != nil { return nil, fmt.Errorf("failed to unpack the %v/%v config: %w", system.ModuleName, metricsetName, err) } - return NewFromSysInfo(base, ms) + if runtime.GOOS == "linux" && ms.config.Backend == "kernel_tracing" { + if qm, err := NewFromQuark(ms); err == nil { + return qm, nil + } else { + ms.log.Errorf("can't use kernel_tracing, falling back to procfs: %v", err) + } + } + + return NewFromSysInfo(ms) } // entityID creates an ID that uniquely identifies this process across machines. @@ -102,3 +121,33 @@ func entityID(hostID string, pid int, startTime time.Time) string { binary.Write(h, binary.LittleEndian, int64(startTime.Nanosecond())) return h.Sum() } + +func makeMessage(pid int, action eventAction, name string, username string, err error) string { + if err != nil { + return fmt.Sprintf("ERROR for PID %d: %v", pid, err) + } + + var actionString string + switch action { + case eventActionProcessStarted: + actionString = "STARTED" + case eventActionProcessStopped: + actionString = "STOPPED" + case eventActionExistingProcess: + actionString = "is RUNNING" + case eventActionProcessRan: + actionString = "RAN" + case eventActionProcessChangedImage: + actionString = "CHANGED IMAGE" + case eventActionProcessError: // NOTREACHABLE as err != nil if action is ProcessError + actionString = "ERROR" + } + + var userString string + if len(username) > 0 { + userString = fmt.Sprintf(" by user %v", username) + } + + return fmt.Sprintf("Process %v (PID: %d)%v %v", + name, pid, userString, actionString) +} diff --git a/x-pack/auditbeat/module/system/process/quark_provider_linux.go b/x-pack/auditbeat/module/system/process/quark_provider_linux.go new file mode 100644 index 000000000000..fbb7e4aba74f --- /dev/null +++ b/x-pack/auditbeat/module/system/process/quark_provider_linux.go @@ -0,0 +1,337 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build linux && (amd64 || arm64) && cgo + +package process + +import ( + "fmt" + "os/user" + "strconv" + "time" + + "github.com/elastic/beats/v7/auditbeat/helper/hasher" + "github.com/elastic/beats/v7/auditbeat/helper/tty" + "github.com/elastic/beats/v7/libbeat/common/capabilities" + "github.com/elastic/beats/v7/metricbeat/mb" + "github.com/elastic/elastic-agent-libs/mapstr" + "github.com/elastic/elastic-agent-libs/monitoring" + + quark "github.com/elastic/go-quark" +) + +var quarkMetrics = struct { + insertions *monitoring.Uint + removals *monitoring.Uint + aggregations *monitoring.Uint + nonAggregations *monitoring.Uint + lost *monitoring.Uint + backend *monitoring.String +}{} + +func init() { + reg := monitoring.Default.NewRegistry("process@quark") + quarkMetrics.insertions = monitoring.NewUint(reg, "insertions") + quarkMetrics.removals = monitoring.NewUint(reg, "removals") + quarkMetrics.aggregations = monitoring.NewUint(reg, "aggregations") + quarkMetrics.nonAggregations = monitoring.NewUint(reg, "non_aggregations") + quarkMetrics.lost = monitoring.NewUint(reg, "lost") + quarkMetrics.backend = monitoring.NewString(reg, "backend", monitoring.Report) +} + +// QuarkMetricSet is a MetricSet with added members used only in and by +// quark. QuarkMetricSet uses mb.PushReporterV2 instead of +// mb.ReporterV2. More notably we don't do periodic state reports and +// we don't need a cache as it is provided by quark. +type QuarkMetricSet struct { + MetricSet + queue *quark.Queue // Quark runtime state + selfMntNsIno uint32 // Mnt inode from current process + cachedHasher *hasher.CachedHasher +} + +// Used for testing only and not exposed via config +var quarkForceKprobe bool + +// NewFromQuark instantiates the module with quark's backend. +func NewFromQuark(ms MetricSet) (mb.MetricSet, error) { + var qm QuarkMetricSet + + qm.MetricSet = ms + + ino64, err := selfNsIno("mnt") + if err != nil { + return nil, fmt.Errorf("failed to fetch self mount inode: %w", err) + } + qm.selfMntNsIno = uint32(ino64) + qm.cachedHasher, err = hasher.NewFileHasherWithCache(qm.config.HasherConfig, 4096) + if err != nil { + return nil, fmt.Errorf("can't create hash cache: %w", err) + } + + attr := quark.DefaultQueueAttr() + if quarkForceKprobe { + attr.Flags &= ^quark.QQ_ALL_BACKENDS + attr.Flags |= quark.QQ_KPROBE + } + qm.queue, err = quark.OpenQueue(attr, 1) + if err != nil { + qm.cachedHasher.Close() + return nil, fmt.Errorf("can't open quark queue: %w", err) + } + stats := qm.queue.Stats() + if stats.Backend == quark.QQ_EBPF { + qm.log.Info("quark using EBPF") + } else if stats.Backend == quark.QQ_KPROBE { + qm.log.Info("quark using KPROBES") + } else { + qm.queue.Close() + qm.cachedHasher.Close() + return nil, fmt.Errorf("quark has an invalid backend") + } + + return &qm, nil +} + +// Run reads events from quark's queue and pushes them into output. +// The queue is owned by this goroutine and should not be touched +// from outside as there is no synchronization. +func (ms *QuarkMetricSet) Run(r mb.PushReporterV2) { + ms.log.Info("Quark running") + + metricsStamp := time.Now() + +MainLoop: + for { + // Poll for done + select { + case <-r.Done(): + break MainLoop + default: + } + + ms.maybeUpdateMetrics(&metricsStamp) + + x := time.Now() + quarkEvents, err := ms.queue.GetEvents() + if len(quarkEvents) == 1 { + ms.log.Debugf("getevents took %v", time.Since(x)) + } + if err != nil { + ms.log.Error("quark GetEvents, unrecoverable error", err) + break MainLoop + } + if len(quarkEvents) == 0 { + err = ms.queue.Block() + if err != nil { + ms.log.Error("quark Block, unrecoverable error", err) + break MainLoop + } + continue + } + for _, quarkEvent := range quarkEvents { + if !wantedEvent(quarkEvent) { + continue + } + if event, ok := ms.toEvent(quarkEvent); ok { + r.Event(event) + } + } + } + + // Queue is owned by this goroutine, if we ever access it from + // outside, we need to consider synchronization. + ms.cachedHasher.Close() + ms.queue.Close() + ms.queue = nil +} + +// toEvent converts a quark.Event to a mb.Event, returns true if we +// were able to make an event. +func (ms *QuarkMetricSet) toEvent(quarkEvent quark.Event) (mb.Event, bool) { + action, evtype := actionAndTypeOfEvent(quarkEvent) + process := quarkEvent.Process + event := mb.Event{RootFields: mapstr.M{}} + + var username string + var processErr error + defer func() { + // Fill out root message and error.message + event.RootFields.Put("message", + makeMessage(int(process.Pid), action, process.Comm, username, processErr)) + if processErr != nil { + event.RootFields.Put("error.message", processErr.Error()) + } + }() + + // Values that are independent of Proc.Valid + // Fill out event.* + event.RootFields.Put("event.type", evtype) + event.RootFields.Put("event.action", action.String()) + event.RootFields.Put("event.category", []string{"process"}) + event.RootFields.Put("event.kind", "event") + // Fill out process.* + event.RootFields.Put("process.name", process.Comm) + event.RootFields.Put("process.args", process.Cmdline) + event.RootFields.Put("process.args_count", len(process.Cmdline)) + event.RootFields.Put("process.pid", process.Pid) + event.RootFields.Put("process.working_directory", process.Cwd) + event.RootFields.Put("process.executable", process.Filename) + if process.Exit.Valid { + event.RootFields.Put("process.exit_code", process.Exit.ExitCode) + } + if !process.Proc.Valid { + return event, true + } + + // + // Code below can rely on Proc + // + + // Ids + event.RootFields.Put("process.parent.pid", process.Proc.Ppid) + startTime := time.Unix(0, int64(process.Proc.TimeBoot)) + if ms.HostID() != "" { + // TODO unify with sessionview and guarantee loss of precision + event.RootFields.Put("process.entity_id", + entityID(ms.HostID(), int(process.Pid), startTime)) + } + event.RootFields.Put("process.start", startTime) + event.RootFields.Put("user.id", process.Proc.Uid) + event.RootFields.Put("user.group.id", process.Proc.Gid) + event.RootFields.Put("user.effective.id", process.Proc.Euid) + event.RootFields.Put("user.effective.group.id", process.Proc.Egid) + event.RootFields.Put("user.saved.id", process.Proc.Suid) + event.RootFields.Put("user.saved.group.id", process.Proc.Sgid) + if us, err := user.LookupId(strconv.FormatUint(uint64(process.Proc.Uid), 10)); err == nil { + event.RootFields.Put("user.name", us.Username) + username = us.Username + } + if group, err := user.LookupGroupId(strconv.FormatUint(uint64(process.Proc.Gid), 10)); err == nil { + event.RootFields.Put("user.group.name", group.Name) + } + // Tty things + event.RootFields.Put("process.interactive", + tty.InteractiveFromTTY(tty.TTYDev{ + Major: process.Proc.TtyMajor, + Minor: process.Proc.TtyMinor, + })) + if process.Proc.TtyMajor != 0 { + event.RootFields.Put("process.tty.char_device.major", process.Proc.TtyMajor) + event.RootFields.Put("process.tty.char_device.minor", process.Proc.TtyMinor) + } + // Capabilities + capEffective, _ := capabilities.FromUint64(process.Proc.CapEffective) + if len(capEffective) > 0 { + event.RootFields.Put("process.thread.capabilities.effective", capEffective) + } + capPermitted, _ := capabilities.FromUint64(process.Proc.CapPermitted) + if len(capPermitted) > 0 { + event.RootFields.Put("process.thread.capabilities.permitted", capPermitted) + } + // If we are in the same mount namespace of the process, hash + // the file. When quark is running on kprobes, there are + // limitations concerning the full path of the filename, in + // those cases, the path won't start with a slash. + if process.Proc.MntInonum == ms.selfMntNsIno && len(process.Filename) > 0 && process.Filename[0] == '/' { + hashes, err := ms.cachedHasher.HashFile(process.Filename) + if err != nil { + processErr = fmt.Errorf("failed to hash executable %v for PID %v: %w", + process.Filename, process.Pid, err) + ms.log.Warn(processErr.Error()) + } else { + for hashType, digest := range hashes { + fieldName := "process.hash." + string(hashType) + event.RootFields.Put(fieldName, digest) + } + } + } else { + ms.log.Debugf("skipping hash %s (inonum %d vs %d)", process.Filename, process.Proc.MntInonum, ms.selfMntNsIno) + } + + return event, true +} + +// wantedEvent filters in only the wanted events from quark. +func wantedEvent(quarkEvent quark.Event) bool { + const wanted uint64 = quark.QUARK_EV_FORK | + quark.QUARK_EV_EXEC | + quark.QUARK_EV_EXIT | + quark.QUARK_EV_SNAPSHOT + if quarkEvent.Events&wanted == 0 || + quarkEvent.Process.Pid == 2 || + quarkEvent.Process.Proc.Ppid == 2 { // skip kthreads + + return false + } + + return true +} + +// actionAndTypeOfEvent computes eventAction and event.type out of a quark.Event. +func actionAndTypeOfEvent(quarkEvent quark.Event) (eventAction, []string) { + snap := quarkEvent.Events&quark.QUARK_EV_SNAPSHOT != 0 + fork := quarkEvent.Events&quark.QUARK_EV_FORK != 0 + exec := quarkEvent.Events&quark.QUARK_EV_EXEC != 0 + exit := quarkEvent.Events&quark.QUARK_EV_EXIT != 0 + + // Calculate event.action + // If it's a snap, it's existing + // If it forked + exited and executed or not, we consider ran + // If it execed + exited we consider stopped + // If it execed but didn't fork or exit, we consider changed image + var action eventAction + if snap { + action = eventActionExistingProcess + } else if fork && exit { + action = eventActionProcessRan + } else if fork { + action = eventActionProcessStarted + } else if exit { + action = eventActionProcessStopped + } else if exec { + action = eventActionProcessChangedImage + } else { + action = eventActionProcessError + } + // Calculate event.type + evtype := make([]string, 0, 4) + if snap { + evtype = append(evtype, eventActionExistingProcess.Type()) + } + if fork { + evtype = append(evtype, eventActionProcessStarted.Type()) + } + if exec { + evtype = append(evtype, eventActionProcessChangedImage.Type()) + } + if exit { + evtype = append(evtype, eventActionProcessStopped.Type()) + } + + return action, evtype +} + +func (ms *QuarkMetricSet) maybeUpdateMetrics(stamp *time.Time) { + if time.Since(*stamp) < time.Second*5 { + return + } + + stats := ms.queue.Stats() + quarkMetrics.insertions.Set(stats.Insertions) + quarkMetrics.removals.Set(stats.Removals) + quarkMetrics.aggregations.Set(stats.Aggregations) + quarkMetrics.nonAggregations.Set(stats.NonAggregations) + quarkMetrics.lost.Set(stats.Lost) + if stats.Backend == quark.QQ_EBPF { + quarkMetrics.backend.Set("ebpf") + } else if stats.Backend == quark.QQ_KPROBE { + quarkMetrics.backend.Set("kprobe") + } else { + quarkMetrics.backend.Set("invalid") + } + + *stamp = time.Now() +} diff --git a/x-pack/auditbeat/module/system/process/quark_provider_linux_test.go b/x-pack/auditbeat/module/system/process/quark_provider_linux_test.go new file mode 100644 index 000000000000..92e4707a8fec --- /dev/null +++ b/x-pack/auditbeat/module/system/process/quark_provider_linux_test.go @@ -0,0 +1,411 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build linux && (amd64 || arm64) && cgo + +package process + +import ( + "os" + "os/exec" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/v7/auditbeat/ab" + "github.com/elastic/beats/v7/auditbeat/helper/tty" + "github.com/elastic/beats/v7/libbeat/common/capabilities" + "github.com/elastic/beats/v7/metricbeat/mb" + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/beats/v7/x-pack/auditbeat/module/system" + "github.com/elastic/elastic-agent-libs/mapstr" + quark "github.com/elastic/go-quark" +) + +type backend int + +const ( + Ebpf backend = iota + Kprobe +) + +func TestInitialSnapshotEbpf(t *testing.T) { + skipIfNotRoot(t) + testInitialSnapshot(t, Ebpf) +} + +func TestInitialSnapshotKprobe(t *testing.T) { + skipIfNotRoot(t) + testInitialSnapshot(t, Kprobe) +} + +func TestForkExecExitEbpf(t *testing.T) { + skipIfNotRoot(t) + testForkExecExit(t, Ebpf) +} + +func TestForkExecExitKprobe(t *testing.T) { + skipIfNotRoot(t) + testForkExecExit(t, Kprobe) +} + +func TestQuarkMetricSetEbpf(t *testing.T) { + skipIfNotRoot(t) + testQuarkMetricSet(t, Ebpf) +} + +func TestQuarkMetricSetKprobe(t *testing.T) { + skipIfNotRoot(t) + testQuarkMetricSet(t, Kprobe) +} + +// testInitialSnapshot see if quark is generating snapshot events +func testInitialSnapshot(t *testing.T, be backend) { + qq := openQueue(t, be) + defer qq.Close() + + // There should be events of kind quark.QUARK_EV_SNAPSHOT + qevs := drainFor(t, qq, 5*time.Millisecond) + var gotsnap bool + for _, qev := range qevs { + if qev.Events&quark.QUARK_EV_SNAPSHOT != 0 { + gotsnap = true + } + } + + require.True(t, gotsnap) +} + +// testForkExecExit tests if a spawned process shows up in quark +func testForkExecExit(t *testing.T, be backend) { + qq := openQueue(t, be) + defer qq.Close() + + // runNop will fork+exec+exit /bin/true + cmd := runNop(t) + qev := drainFirstOfPid(t, qq, cmd.Process.Pid) + + // We should get at least FORK|EXEC|EXIT in the aggregation + require.Equal(t, + qev.Events&(quark.QUARK_EV_FORK|quark.QUARK_EV_EXEC|quark.QUARK_EV_EXIT), + quark.QUARK_EV_FORK|quark.QUARK_EV_EXEC|quark.QUARK_EV_EXIT) + + // This is virtually impossible to fail, but we're pedantic + require.True(t, qev.Process.Proc.Valid) + + // We need these otherwise nothing works + require.NotZero(t, qev.Process.Proc.MntInonum) + require.NotZero(t, qev.Process.Proc.TimeBoot) + require.NotZero(t, qev.Process.Proc.Ppid) + + // Must be /bin/true + require.Equal(t, qev.Process.Filename, cmd.Path) + require.Equal(t, qev.Process.Filename, cmd.Args[0]) + + // Kprobe cwd path depth is limited + if be != Kprobe { + cwd, err := os.Getwd() + require.NoError(t, err) + + require.Equal(t, cwd, qev.Process.Cwd) + } + + // Check exit + require.True(t, qev.Process.Exit.Valid) + require.Zero(t, qev.Process.Exit.ExitCode) + // Don't care about ExitTime, it's also not precise +} + +// testQuarkMetricSet will start the module and check if the it +// generates the correct event for os.Getpid() (an existing process), +// and for a process we spawn ourselves via runNop(). +func testQuarkMetricSet(t *testing.T, be backend) { + config := getConfigForQuark(be) + + // Start the module, it will open its own queue + f := mbtest.NewPushMetricSetV2WithRegistry(t, config, ab.Registry) + ms, ok := f.(*QuarkMetricSet) + require.True(t, ok) + + // Start our own queue in parallel, so we can compare some + // members we have no other way of fetching + qq := openQueue(t, be) + defer qq.Close() + + // The queue is open, so we can spawn something and it should show up + cmd := runNop(t) + + // Run the main loop, it should get a snapshot event of + // and a fork+exec+exit of the nop we just ran + // XXX sadly we can't control the holdTime of the queue used by beats, + // so we need to wait a full second + events := mbtest.RunPushMetricSetV2(1100*time.Millisecond, 0, ms) + require.NotEmpty(t, events) + + // Lookup self from qq, we need Proc.TimeBoot and Suid/Sgid + selfFromQQ, ok := qq.Lookup(os.Getpid()) + require.True(t, ok) + // Make a fake event from self (Os.getpid()) + selfTarget := makeSelfEvent(t, selfFromQQ, be) + // Lookup what we actually got from beats + selfActual := firstEventOfPid(t, events, os.Getpid()) + // Compare + checkEvent(t, selfTarget, selfActual) + + // Drain until we find the event generated by runNop(), we + // need Proc.TimeBoot and Suid/Sgid + spawnedFromQQ := drainFirstOfPid(t, qq, cmd.Process.Pid) + // Make an event of the spawned cmd + spawnedTarget := makeEventOfCmd(t, cmd, spawnedFromQQ, be) + // Lookup what we actually got from beats + spawnedActual := firstEventOfPid(t, events, cmd.Process.Pid) + // Compare + checkEvent(t, spawnedTarget, spawnedActual) +} + +// checkEvent checks the equality of all the events from target in actual, +// not the other way around since actual is always larger +func checkEvent(t *testing.T, target mb.Event, actual mb.Event) { + for tk, tv := range target.RootFields { + av, err := actual.RootFields.GetValue(tk) + require.NoError(t, err) + require.Equal(t, tv, av) + } +} + +// openQueue opens a quark queue on a specific backend +func openQueue(t *testing.T, be backend) *quark.Queue { + attr := quark.DefaultQueueAttr() + attr.HoldTime = 25 + attr.Flags &= ^quark.QQ_ALL_BACKENDS + if be == Ebpf { + attr.Flags |= quark.QQ_EBPF + } else if be == Kprobe { + attr.Flags |= quark.QQ_KPROBE + } + qq, err := quark.OpenQueue(attr, 1) + require.NoError(t, err) + + return qq +} + +// runNop does fork+exec+exit /bin/true +func runNop(t *testing.T) *exec.Cmd { + cmd := exec.Command("/bin/true") + require.NotNil(t, cmd) + err := cmd.Run() + require.NoError(t, err) + + return cmd +} + +// drainFor drains all events for `d` +func drainFor(t *testing.T, qq *quark.Queue, d time.Duration) []quark.Event { + var allQevs []quark.Event + + start := time.Now() + + for { + qevs, err := qq.GetEvents() + require.NoError(t, err) + for _, qev := range qevs { + if !wantedEvent(qev) { + continue + } + allQevs = append(allQevs, qev) + } + if time.Since(start) > d { + break + } + // Intentionally placed at the end so that we always + // get one more try after the last block + if len(qevs) == 0 { + _ = qq.Block() + } + } + + return allQevs +} + +// drainFirstOfPid returns the first event +func drainFirstOfPid(t *testing.T, qq *quark.Queue, pid int) quark.Event { + start := time.Now() + + for { + qevs, err := qq.GetEvents() + require.NoError(t, err) + for _, qev := range qevs { + if !wantedEvent(qev) { + continue + } + if qev.Process.Pid == uint32(pid) { + return qev + } + } + if time.Since(start) > time.Second { + break + } + // Intentionally placed at the end so that we always + // get one more try after the last block + if len(qevs) == 0 { + _ = qq.Block() + } + } + + t.Fatalf("Can't find event of pid %d", pid) + + return quark.Event{} // NOTREACHED +} + +// firstEventOfPid looks up the first event of `pid` in `events` +func firstEventOfPid(t *testing.T, events []mb.Event, pid int) mb.Event { + for _, event := range events { + pid2, err := event.RootFields.GetValue("process.pid") + require.NoError(t, err) + if pid2.(uint32) == uint32(pid) { + return event + } + } + + t.Fatalf("Can't find event of pid %d", pid) + + return mb.Event{} // NOTREACHED +} + +// makeSelfEvent builds what should be the event that quark will +// generate as an initial snapshot of the current process +func makeSelfEvent(t *testing.T, qp quark.Process, be backend) mb.Event { + exe, err := os.Executable() + require.NoError(t, err) + + interactive := tty.InteractiveFromTTY(tty.TTYDev{ + Major: qp.Proc.TtyMajor, + Minor: qp.Proc.TtyMinor, + }) + + capEff, err := capabilities.FromPid(capabilities.Effective, os.Getpid()) + require.NoError(t, err) + capPer, err := capabilities.FromPid(capabilities.Permitted, os.Getpid()) + require.NoError(t, err) + + self := mb.Event{ + RootFields: mapstr.M{ + "event.type": []string{"info"}, + "event.action": "existing_process", + "event.category": []string{"process"}, + "event.kind": "event", + "process.name": qp.Comm, + "process.args": qp.Cmdline, + "process.args_count": len(qp.Cmdline), + "process.pid": uint32(os.Getpid()), + "process.executable": exe, + "process.parent.pid": uint32(os.Getppid()), + "process.start": time.Unix(0, int64(qp.Proc.TimeBoot)), + "user.id": uint32(0), + "user.group.id": uint32(0), + "user.effective.id": uint32(0), + "user.saved.id": qp.Proc.Suid, + "user.saved.group.id": qp.Proc.Sgid, + "user.name": "root", + "user.group.name": "root", + "process.interactive": interactive, + "process.thread.capabilities.effective": capEff, + "process.thread.capabilities.permitted": capPer, + }, + } + + // Kprobe path depth is limited + if be != Kprobe { + cwd, err := os.Getwd() + require.NoError(t, err) + + self.RootFields["process.working_directory"] = cwd + } + + if qp.Proc.TtyMajor != 0 { + self.RootFields["process.tty.char_device.major"] = qp.Proc.TtyMajor + self.RootFields["process.tty.char_device.minor"] = qp.Proc.TtyMinor + } + + return self +} + +// makeEventOfCmd builds an mb.Event out of cmd and qev +func makeEventOfCmd(t *testing.T, cmd *exec.Cmd, qev quark.Event, be backend) mb.Event { + // We should get at least FORK|EXEC|EXIT in the aggregation + require.Equal(t, + qev.Events&(quark.QUARK_EV_FORK|quark.QUARK_EV_EXEC|quark.QUARK_EV_EXIT), + quark.QUARK_EV_FORK|quark.QUARK_EV_EXEC|quark.QUARK_EV_EXIT) + // This is virtually impossible to fail, but we're pedantic + require.True(t, qev.Process.Proc.Valid) + + qp := qev.Process + + interactive := tty.InteractiveFromTTY(tty.TTYDev{ + Major: qp.Proc.TtyMajor, + Minor: qp.Proc.TtyMinor, + }) + + capEff, err := capabilities.FromPid(capabilities.Effective, os.Getpid()) + require.NoError(t, err) + capPer, err := capabilities.FromPid(capabilities.Permitted, os.Getpid()) + require.NoError(t, err) + + cmdEvent := mb.Event{ + RootFields: mapstr.M{ + "event.type": []string{"start", "change", "end"}, + "event.action": "process_ran", + "event.category": []string{"process"}, + "event.kind": "event", + "process.name": "true", + "process.args": []string{"/bin/true"}, + "process.args_count": 1, + "process.pid": uint32(cmd.Process.Pid), + "process.executable": "/bin/true", + "process.parent.pid": uint32(os.Getpid()), + "process.start": time.Unix(0, int64(qp.Proc.TimeBoot)), + "user.id": uint32(0), + "user.group.id": uint32(0), + "user.effective.id": uint32(0), + "user.saved.id": qp.Proc.Suid, + "user.saved.group.id": qp.Proc.Sgid, + "user.name": "root", + "user.group.name": "root", + "process.interactive": interactive, + "process.thread.capabilities.effective": capEff, + "process.thread.capabilities.permitted": capPer, + }, + } + + // Kprobe path depth is limited + if be != Kprobe { + cwd, err := os.Getwd() + require.NoError(t, err) + + cmdEvent.RootFields["process.working_directory"] = cwd + } + + return cmdEvent +} + +// getConfigForQuark enables quark and allows hashing so we can test +// the cached hasher. +func getConfigForQuark(be backend) map[string]interface{} { + config := map[string]interface{}{ + "module": system.ModuleName, + "datasets": []string{"process"}, + + "process.backend": "kernel_tracing", + } + quarkForceKprobe = be == Kprobe + + return config +} + +func skipIfNotRoot(t *testing.T) { + if os.Getuid() != 0 { + t.Skip("must be root") + } +} diff --git a/x-pack/auditbeat/module/system/process/quark_provider_other.go b/x-pack/auditbeat/module/system/process/quark_provider_other.go new file mode 100644 index 000000000000..513d7e9df514 --- /dev/null +++ b/x-pack/auditbeat/module/system/process/quark_provider_other.go @@ -0,0 +1,18 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !linux || !(amd64 || arm64) || !cgo + +package process + +import ( + "errors" + + "github.com/elastic/beats/v7/metricbeat/mb" +) + +// NewFromQuark instantiates the module with quark's backend. +func NewFromQuark(ms MetricSet) (mb.MetricSet, error) { + return nil, errors.New("quark is only available on linux on amd64/arm64 and needs cgo") +} diff --git a/x-pack/auditbeat/seccomp_linux.go b/x-pack/auditbeat/seccomp_linux.go index 5dd05618d31c..dc8735f9b94b 100644 --- a/x-pack/auditbeat/seccomp_linux.go +++ b/x-pack/auditbeat/seccomp_linux.go @@ -43,5 +43,12 @@ func init() { ); err != nil { panic(err) } + + // The system/process dataset uses additional syscalls + if err := seccomp.ModifyDefaultPolicy(seccomp.AddSyscall, + "statx", + ); err != nil { + panic(err) + } } } From cd883f511c3cc5664a15f0fd66fe6a83ae655c27 Mon Sep 17 00:00:00 2001 From: Gabriel Pop <94497545+gpop63@users.noreply.github.com> Date: Thu, 20 Feb 2025 19:05:40 +0200 Subject: [PATCH 12/41] [meraki] fix panic due to uninitialized device wifi structs (#42746) * initialize structs if nil use interface for client to facilitate testing * add tests * add changelog entry * fix pr id * make update * fix linter * revert getDeviceLicenses changes * add more safety checks * address reviews --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 62 ++--- go.mod | 2 +- .../meraki/device_health/device_health.go | 6 +- .../device_health/device_health_test.go | 224 ++++++++++++++++++ .../module/meraki/device_health/devices.go | 43 +++- .../meraki/device_health/switchports.go | 11 + .../module/meraki/device_health/uplinks.go | 36 ++- 8 files changed, 341 insertions(+), 44 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 21a6b6f294ba..04c3a9bd2c42 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -265,6 +265,7 @@ otherwise no tag is added. {issue}42208[42208] {pull}42403[42403] - Remove `hostname` field from zookeeper's `mntr` data stream. {pull}41887[41887] - Continue collecting metrics even if the Cisco Meraki `getDeviceLicenses` operation fails. {pull}42397[42397] - Fixed errors in the `elasticsearch.index` metricset when index settings are missing. {issue}42424[42424] {pull}42426[42426] +- Fixed panic caused by uninitialized meraki device wifi0 and wifi1 struct pointers in the device WiFi data fetching. {issue}42745[42745] {pull}42746[42746] *Osquerybeat* diff --git a/NOTICE.txt b/NOTICE.txt index ecaf09ee5347..4ff9df845df0 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -16435,6 +16435,37 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/go-resty/resty/v2 +Version: v2.13.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/go-resty/resty/v2@v2.13.1/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2015-2023 Jeevanandam M., https://myjeeva.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/go-sql-driver/mysql Version: v1.6.0 @@ -45330,37 +45361,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/go-resty/resty/v2 -Version: v2.13.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/go-resty/resty/v2@v2.13.1/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2015-2023 Jeevanandam M., https://myjeeva.com - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/go-sourcemap/sourcemap Version: v2.1.2+incompatible diff --git a/go.mod b/go.mod index 655fed320c06..cb9e95e37532 100644 --- a/go.mod +++ b/go.mod @@ -189,6 +189,7 @@ require ( github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 github.com/go-ldap/ldap/v3 v3.4.6 github.com/go-ole/go-ole v1.2.6 + github.com/go-resty/resty/v2 v2.13.1 github.com/gofrs/uuid/v5 v5.2.0 github.com/golang-jwt/jwt/v5 v5.2.1 github.com/google/cel-go v0.19.0 @@ -311,7 +312,6 @@ require ( github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.22.9 // indirect - github.com/go-resty/resty/v2 v2.13.1 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/godror/knownpb v0.1.0 // indirect diff --git a/x-pack/metricbeat/module/meraki/device_health/device_health.go b/x-pack/metricbeat/module/meraki/device_health/device_health.go index bbe301b3b430..52d67a346e74 100644 --- a/x-pack/metricbeat/module/meraki/device_health/device_health.go +++ b/x-pack/metricbeat/module/meraki/device_health/device_health.go @@ -100,7 +100,11 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { getDevicePerformanceScores(m.logger, m.client, devices) - err = getDeviceChannelUtilization(m.client, devices, collectionPeriod) + networkHealthService := &NetworkHealthServiceWrapper{ + service: m.client.Networks, + } + + err = getDeviceChannelUtilization(networkHealthService, devices, collectionPeriod) if err != nil { return fmt.Errorf("getDeviceChannelUtilization failed; %w", err) } diff --git a/x-pack/metricbeat/module/meraki/device_health/device_health_test.go b/x-pack/metricbeat/module/meraki/device_health/device_health_test.go index 668007690346..5147b49a47d4 100644 --- a/x-pack/metricbeat/module/meraki/device_health/device_health_test.go +++ b/x-pack/metricbeat/module/meraki/device_health/device_health_test.go @@ -5,7 +5,17 @@ package device_health import ( + "bytes" + "fmt" + "io" + "net/http" "testing" + "time" + + "github.com/go-resty/resty/v2" + meraki "github.com/meraki/dashboard-api-go/v3/sdk" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestIsEmpty(t *testing.T) { @@ -65,3 +75,217 @@ func TestIsEmpty(t *testing.T) { }) } } + +func TestGetDeviceChannelUtilization(t *testing.T) { + tests := []struct { + name string + client NetworkHealthService + devices map[Serial]*Device + wantErr bool + validate func(t *testing.T, devices map[Serial]*Device) + }{ + { + name: "successful data retrieval", + client: &SuccessfulMockNetworkHealthService{}, + devices: map[Serial]*Device{ + "serial-1": { + details: &meraki.ResponseItemOrganizationsGetOrganizationDevices{ + ProductType: "wireless", + NetworkID: "network-1", + }, + }, + "serial-2": { + details: &meraki.ResponseItemOrganizationsGetOrganizationDevices{ + ProductType: "wireless", + NetworkID: "network-2", + }, + }, + }, + validate: func(t *testing.T, devices map[Serial]*Device) { + assert.NotNil(t, devices["serial-1"].wifi0) + assert.Equal(t, 1.0, *devices["serial-1"].wifi0.Utilization80211) + assert.Equal(t, 1.1, *devices["serial-1"].wifi0.UtilizationNon80211) + assert.Equal(t, 1.2, *devices["serial-1"].wifi0.UtilizationTotal) + assert.NotNil(t, devices["serial-2"].wifi1) + assert.Equal(t, 2.0, *devices["serial-2"].wifi1.Utilization80211) + assert.Equal(t, 2.1, *devices["serial-2"].wifi1.UtilizationNon80211) + assert.Equal(t, 2.2, *devices["serial-2"].wifi1.UtilizationTotal) + }, + }, + { + name: "multiple buckets use first entry", + client: &MultipleBucketsMockNetworkHealthService{}, + devices: map[Serial]*Device{ + "serial-3": { + details: &meraki.ResponseItemOrganizationsGetOrganizationDevices{ + ProductType: "wireless", + NetworkID: "network-3", + }, + }, + }, + validate: func(t *testing.T, devices map[Serial]*Device) { + assert.NotNil(t, devices["serial-3"].wifi0) + assert.Equal(t, 3.0, *devices["serial-3"].wifi0.Utilization80211) + assert.Equal(t, 3.1, *devices["serial-3"].wifi0.UtilizationNon80211) + assert.Equal(t, 3.2, *devices["serial-3"].wifi0.UtilizationTotal) + assert.Nil(t, devices["serial-3"].wifi1) + }, + }, + { + name: "MR 27.0 error skips network", + client: &MR27ErrorMockNetworkHealthService{}, + devices: map[Serial]*Device{ + "serial-4": { + details: &meraki.ResponseItemOrganizationsGetOrganizationDevices{ + ProductType: "wireless", + NetworkID: "network-4", + }, + }, + }, + validate: func(t *testing.T, devices map[Serial]*Device) { + assert.Nil(t, devices["serial-4"].wifi0) + assert.Nil(t, devices["serial-4"].wifi1) + }, + }, + { + name: "other errors propagate", + client: &GenericErrorMockNetworkHealthService{}, + devices: map[Serial]*Device{ + "serial-5": { + details: &meraki.ResponseItemOrganizationsGetOrganizationDevices{ + ProductType: "wireless", + NetworkID: "network-5", + }, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + devicesCopy := make(map[Serial]*Device, len(tt.devices)) + for k, v := range tt.devices { + devicesCopy[k] = &Device{ + details: v.details, + wifi0: v.wifi0, + wifi1: v.wifi1, + } + } + + err := getDeviceChannelUtilization(tt.client, devicesCopy, time.Second) + if tt.wantErr { + assert.Error(t, err) + return + } + require.NoError(t, err) + + if tt.validate != nil { + tt.validate(t, devicesCopy) + } + }) + } +} + +// SuccessfulMockNetworkHealthService returns valid utilization data +type SuccessfulMockNetworkHealthService struct{} + +func (m *SuccessfulMockNetworkHealthService) GetNetworkNetworkHealthChannelUtilization(networkID string, params *meraki.GetNetworkNetworkHealthChannelUtilizationQueryParams) (*meraki.ResponseNetworksGetNetworkNetworkHealthChannelUtilization, *resty.Response, error) { + wifi0utilization80211 := 1.0 + wifi0utilizationNon80211 := 1.1 + wifi0utilizationTotal := 1.2 + + wifi1utilization80211 := 2.0 + wifi1utilizationNon80211 := 2.1 + wifi1utilizationTotal := 2.2 + + return &meraki.ResponseNetworksGetNetworkNetworkHealthChannelUtilization{ + meraki.ResponseItemNetworksGetNetworkNetworkHealthChannelUtilization{ + Serial: "serial-1", + Wifi0: &[]meraki.ResponseItemNetworksGetNetworkNetworkHealthChannelUtilizationWifi0{ + { + Utilization80211: &wifi0utilization80211, + UtilizationNon80211: &wifi0utilizationNon80211, + UtilizationTotal: &wifi0utilizationTotal, + }, + }, + Wifi1: &[]meraki.ResponseItemNetworksGetNetworkNetworkHealthChannelUtilizationWifi1{ + { + Utilization80211: &wifi1utilization80211, + UtilizationNon80211: &wifi1utilizationNon80211, + UtilizationTotal: &wifi1utilizationTotal, + }, + }, + }, + meraki.ResponseItemNetworksGetNetworkNetworkHealthChannelUtilization{ + Serial: "serial-2", + Wifi0: &[]meraki.ResponseItemNetworksGetNetworkNetworkHealthChannelUtilizationWifi0{ + { + Utilization80211: &wifi0utilization80211, + UtilizationNon80211: &wifi0utilizationNon80211, + UtilizationTotal: &wifi0utilizationTotal, + }, + }, + Wifi1: &[]meraki.ResponseItemNetworksGetNetworkNetworkHealthChannelUtilizationWifi1{ + { + Utilization80211: &wifi1utilization80211, + UtilizationNon80211: &wifi1utilizationNon80211, + UtilizationTotal: &wifi1utilizationTotal, + }, + }, + }, + }, &resty.Response{}, nil +} + +// MultipleBucketsMockNetworkHealthService returns multiple utilization buckets +type MultipleBucketsMockNetworkHealthService struct{} + +func (m *MultipleBucketsMockNetworkHealthService) GetNetworkNetworkHealthChannelUtilization(networkID string, params *meraki.GetNetworkNetworkHealthChannelUtilizationQueryParams) (*meraki.ResponseNetworksGetNetworkNetworkHealthChannelUtilization, *resty.Response, error) { + wifi0util_80211 := 3.0 + wifi0util_non80211 := 3.1 + wifi0util_total := 3.2 + + return &meraki.ResponseNetworksGetNetworkNetworkHealthChannelUtilization{ + meraki.ResponseItemNetworksGetNetworkNetworkHealthChannelUtilization{ + Serial: "serial-3", + Wifi0: &[]meraki.ResponseItemNetworksGetNetworkNetworkHealthChannelUtilizationWifi0{ + { // First bucket will be used + Utilization80211: &wifi0util_80211, + UtilizationNon80211: &wifi0util_non80211, + UtilizationTotal: &wifi0util_total, + }, + { // Second bucket will be ignored + Utilization80211: &wifi0util_80211, + UtilizationNon80211: &wifi0util_non80211, + UtilizationTotal: &wifi0util_total, + }, + }, + }, + }, &resty.Response{}, nil +} + +// MR27ErrorMockNetworkHealthService simulates the MR 27.0 version error +type MR27ErrorMockNetworkHealthService struct{} + +func (m *MR27ErrorMockNetworkHealthService) GetNetworkNetworkHealthChannelUtilization(networkID string, params *meraki.GetNetworkNetworkHealthChannelUtilizationQueryParams) (*meraki.ResponseNetworksGetNetworkNetworkHealthChannelUtilization, *resty.Response, error) { + r := &resty.Response{} + bodyContent := []byte("This endpoint is only available for networks on MR 27.0 or above.") + r.SetBody(bodyContent) + r.RawResponse = &http.Response{ + Body: io.NopCloser(bytes.NewBuffer(bodyContent)), + } + return nil, r, fmt.Errorf("MR 27.0 error") +} + +// GenericErrorMockNetworkHealthService simulates generic errors +type GenericErrorMockNetworkHealthService struct{} + +func (m *GenericErrorMockNetworkHealthService) GetNetworkNetworkHealthChannelUtilization(networkID string, params *meraki.GetNetworkNetworkHealthChannelUtilizationQueryParams) (*meraki.ResponseNetworksGetNetworkNetworkHealthChannelUtilization, *resty.Response, error) { + r := &resty.Response{} + bodyContent := []byte("Internal Server Error") + r.SetBody(bodyContent) + r.RawResponse = &http.Response{ + Body: io.NopCloser(bytes.NewBuffer(bodyContent)), + } + return nil, r, fmt.Errorf("mock API error") +} diff --git a/x-pack/metricbeat/module/meraki/device_health/devices.go b/x-pack/metricbeat/module/meraki/device_health/devices.go index 894393647a2e..67792fd83205 100644 --- a/x-pack/metricbeat/module/meraki/device_health/devices.go +++ b/x-pack/metricbeat/module/meraki/device_health/devices.go @@ -5,11 +5,14 @@ package device_health import ( + "errors" "fmt" "net/http" "strings" "time" + "github.com/go-resty/resty/v2" + "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" @@ -59,6 +62,10 @@ func getDeviceStatuses(client *meraki.Client, organizationID string, devices map return fmt.Errorf("GetOrganizationDevicesStatuses failed; [%d] %s. %w", res.StatusCode(), res.Body(), err) } + if val == nil { + return errors.New("GetOrganizationDevicesStatuses returned nil response") + } + for i := range *val { status := (*val)[i] if device, ok := devices[Serial(status.Serial)]; ok { @@ -71,6 +78,9 @@ func getDeviceStatuses(client *meraki.Client, organizationID string, devices map func getDevicePerformanceScores(logger *logp.Logger, client *meraki.Client, devices map[Serial]*Device) { for _, device := range devices { + if device == nil || device.details == nil { + continue + } // attempting to get a performance score for a non-MX device returns a 400 if strings.Index(device.details.Model, "MX") != 0 { continue @@ -92,7 +102,19 @@ func getDevicePerformanceScores(logger *logp.Logger, client *meraki.Client, devi } } -func getDeviceChannelUtilization(client *meraki.Client, devices map[Serial]*Device, period time.Duration) error { +type NetworkHealthService interface { + GetNetworkNetworkHealthChannelUtilization(networkID string, getNetworkNetworkHealthChannelUtilizationQueryParams *meraki.GetNetworkNetworkHealthChannelUtilizationQueryParams) (*meraki.ResponseNetworksGetNetworkNetworkHealthChannelUtilization, *resty.Response, error) +} + +type NetworkHealthServiceWrapper struct { + service *meraki.NetworksService +} + +func (w *NetworkHealthServiceWrapper) GetNetworkNetworkHealthChannelUtilization(networkID string, getNetworkNetworkHealthChannelUtilizationQueryParams *meraki.GetNetworkNetworkHealthChannelUtilizationQueryParams) (*meraki.ResponseNetworksGetNetworkNetworkHealthChannelUtilization, *resty.Response, error) { + return w.service.GetNetworkNetworkHealthChannelUtilization(networkID, getNetworkNetworkHealthChannelUtilizationQueryParams) +} + +func getDeviceChannelUtilization(client NetworkHealthService, devices map[Serial]*Device, period time.Duration) error { // There are two ways to get this information from the API. // An alternative to this would be to use `/organizations/{organizationId}/wireless/devices/channelUtilization/byDevice`, // avoids the need to extract the filtered network IDs below. @@ -101,6 +123,10 @@ func getDeviceChannelUtilization(client *meraki.Client, devices map[Serial]*Devi networkIDs := make(map[string]bool) for _, device := range devices { + if device == nil || device.details == nil { + continue + } + if device.details.ProductType != "wireless" { continue } @@ -111,7 +137,7 @@ func getDeviceChannelUtilization(client *meraki.Client, devices map[Serial]*Devi } for networkID := range networkIDs { - val, res, err := client.Networks.GetNetworkNetworkHealthChannelUtilization( + val, res, err := client.GetNetworkNetworkHealthChannelUtilization( networkID, &meraki.GetNetworkNetworkHealthChannelUtilizationQueryParams{ Timespan: period.Seconds(), @@ -131,11 +157,17 @@ func getDeviceChannelUtilization(client *meraki.Client, devices map[Serial]*Devi if device, ok := devices[Serial(utilization.Serial)]; ok { if utilization.Wifi0 != nil && len(*utilization.Wifi0) != 0 { // only take the first bucket - collection intervals which result in multiple buckets are not supported + if device.wifi0 == nil { + device.wifi0 = &meraki.ResponseItemNetworksGetNetworkNetworkHealthChannelUtilizationWifi0{} + } device.wifi0.Utilization80211 = (*utilization.Wifi0)[0].Utilization80211 device.wifi0.UtilizationNon80211 = (*utilization.Wifi0)[0].UtilizationNon80211 device.wifi0.UtilizationTotal = (*utilization.Wifi0)[0].UtilizationTotal } if utilization.Wifi1 != nil && len(*utilization.Wifi1) != 0 { + if device.wifi1 == nil { + device.wifi1 = &meraki.ResponseItemNetworksGetNetworkNetworkHealthChannelUtilizationWifi1{} + } device.wifi1.Utilization80211 = (*utilization.Wifi1)[0].Utilization80211 device.wifi1.UtilizationNon80211 = (*utilization.Wifi1)[0].UtilizationNon80211 device.wifi1.UtilizationTotal = (*utilization.Wifi1)[0].UtilizationTotal @@ -157,6 +189,10 @@ func getDeviceLicenses(client *meraki.Client, organizationID string, devices map return fmt.Errorf("GetOrganizationLicenses failed; [%d] %s. %w", res.StatusCode(), res.Body(), err) } + if val == nil { + return errors.New("GetOrganizationLicenses returned nil response") + } + for i := range *val { license := (*val)[i] if device, ok := devices[Serial(license.DeviceSerial)]; ok { @@ -188,6 +224,9 @@ func deviceDetailsToMapstr(details *meraki.ResponseItemOrganizationsGetOrganizat func reportDeviceMetrics(reporter mb.ReporterV2, organizationID string, devices map[Serial]*Device) { metrics := []mapstr.M{} for _, device := range devices { + if device == nil || device.details == nil { + continue + } metric := deviceDetailsToMapstr(device.details) if device.haStatus != nil { diff --git a/x-pack/metricbeat/module/meraki/device_health/switchports.go b/x-pack/metricbeat/module/meraki/device_health/switchports.go index b61e052f708d..bf7b625565a4 100644 --- a/x-pack/metricbeat/module/meraki/device_health/switchports.go +++ b/x-pack/metricbeat/module/meraki/device_health/switchports.go @@ -5,6 +5,7 @@ package device_health import ( + "errors" "fmt" "time" @@ -25,6 +26,10 @@ func getDeviceSwitchports(client *meraki.Client, organizationID string, devices return fmt.Errorf("GetOrganizationSwitchPortsBySwitch failed; [%d] %s. %w", res.StatusCode(), res.Body(), err) } + if switches == nil { + return errors.New("GetOrganizationSwitchPortsBySwitch returned nil") + } + for _, device := range *switches { if device.Ports == nil { continue @@ -62,7 +67,13 @@ func getDeviceSwitchports(client *meraki.Client, organizationID string, devices func reportSwitchportMetrics(reporter mb.ReporterV2, organizationID string, devices map[Serial]*Device) { metrics := []mapstr.M{} for _, device := range devices { + if device == nil || device.details == nil { + continue + } for _, switchport := range device.switchports { + if switchport == nil { + continue + } metric := deviceDetailsToMapstr(device.details) if switchport.port != nil { diff --git a/x-pack/metricbeat/module/meraki/device_health/uplinks.go b/x-pack/metricbeat/module/meraki/device_health/uplinks.go index 85a54c267bc6..0797905fdae5 100644 --- a/x-pack/metricbeat/module/meraki/device_health/uplinks.go +++ b/x-pack/metricbeat/module/meraki/device_health/uplinks.go @@ -5,6 +5,7 @@ package device_health import ( + "errors" "fmt" "time" @@ -31,11 +32,6 @@ func getDeviceUplinks(client *meraki.Client, organizationID string, devices map[ return fmt.Errorf("GetOrganizationApplianceUplinkStatuses failed; [%d] %s. %w", res.StatusCode(), res.Body(), err) } - cellularGatewayUplinks, res, err := client.CellularGateway.GetOrganizationCellularGatewayUplinkStatuses(organizationID, &meraki.GetOrganizationCellularGatewayUplinkStatusesQueryParams{}) - if err != nil { - return fmt.Errorf("GetOrganizationCellularGatewayUplinkStatuses failed; [%d] %s. %w", res.StatusCode(), res.Body(), err) - } - lossAndLatency, res, err := client.Organizations.GetOrganizationDevicesUplinksLossAndLatency( organizationID, &meraki.GetOrganizationDevicesUplinksLossAndLatencyQueryParams{ @@ -46,8 +42,13 @@ func getDeviceUplinks(client *meraki.Client, organizationID string, devices map[ return fmt.Errorf("GetOrganizationDevicesUplinksLossAndLatency failed; [%d] %s. %w", res.StatusCode(), res.Body(), err) } + if applicanceUplinks == nil || lossAndLatency == nil { + return errors.New("unexpected response from Meraki API: applicanceUplinks or lossAndLatency is nil") + } + for _, device := range *applicanceUplinks { - if device.HighAvailability != nil { + deviceObj, ok := devices[Serial(device.Serial)] + if device.HighAvailability != nil && ok && deviceObj != nil { devices[Serial(device.Serial)].haStatus = device.HighAvailability } @@ -71,10 +72,21 @@ func getDeviceUplinks(client *meraki.Client, organizationID string, devices map[ uplinks = append(uplinks, uplink) } - devices[Serial(device.Serial)].uplinks = uplinks + if ok && deviceObj != nil { + devices[Serial(device.Serial)].uplinks = uplinks + } } } + cellularGatewayUplinks, res, err := client.CellularGateway.GetOrganizationCellularGatewayUplinkStatuses(organizationID, &meraki.GetOrganizationCellularGatewayUplinkStatusesQueryParams{}) + if err != nil { + return fmt.Errorf("GetOrganizationCellularGatewayUplinkStatuses failed; [%d] %s. %w", res.StatusCode(), res.Body(), err) + } + + if cellularGatewayUplinks == nil { + return errors.New("unexpected response from Meraki API: cellularGatewayUplinks is nil") + } + for _, device := range *cellularGatewayUplinks { if device.Uplinks == nil { continue @@ -99,7 +111,10 @@ func getDeviceUplinks(client *meraki.Client, organizationID string, devices map[ uplinks = append(uplinks, uplink) } - devices[Serial(device.Serial)].uplinks = uplinks + deviceObj, ok := devices[Serial(device.Serial)] + if ok && deviceObj != nil { + devices[Serial(device.Serial)].uplinks = uplinks + } } return nil @@ -108,11 +123,14 @@ func getDeviceUplinks(client *meraki.Client, organizationID string, devices map[ func reportUplinkMetrics(reporter mb.ReporterV2, organizationID string, devices map[Serial]*Device) { metrics := []mapstr.M{} for _, device := range devices { - if len(device.uplinks) == 0 { + if device == nil || device.details == nil || len(device.uplinks) == 0 { continue } for _, uplink := range device.uplinks { + if uplink == nil { + continue + } if uplink.lossAndLatency != nil { // each loss and latency metric can have multiple values per collection. // we report each value as it's own (smaller) metric event, containing From d40ca54d9de1f9deeefc7409aff267288bf8f447 Mon Sep 17 00:00:00 2001 From: Khushi Jain Date: Fri, 21 Feb 2025 09:59:37 +0530 Subject: [PATCH 13/41] [filebeat] Compare documents ingested by fb in normal vs otel mode (#42777) * [filebeat] Compare documents ingested by fb in normal vs otel mode * fix ci * fix tests --- .../beatconverter/beatconverter_test.go | 4 + .../outputs/elasticsearch/config_otel.go | 4 + .../outputs/elasticsearch/config_otel_test.go | 6 + .../filebeat/tests/integration/otel_test.go | 124 ++++++++++++++---- 4 files changed, 111 insertions(+), 27 deletions(-) diff --git a/libbeat/otelbeat/beatconverter/beatconverter_test.go b/libbeat/otelbeat/beatconverter/beatconverter_test.go index 668b7a5238fc..2384fc5b5832 100644 --- a/libbeat/otelbeat/beatconverter/beatconverter_test.go +++ b/libbeat/otelbeat/beatconverter/beatconverter_test.go @@ -47,6 +47,8 @@ exporters: batcher: enabled: true max_size_items: 1600 + mapping: + mode: bodymap ` func TestConverter(t *testing.T) { @@ -187,6 +189,8 @@ exporters: batcher: enabled: true max_size_items: 1600 + mapping: + mode: bodymap receivers: filebeatreceiver: filebeat: diff --git a/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel.go b/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel.go index a1a40d9276fc..fea41bc3add4 100644 --- a/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel.go +++ b/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel.go @@ -150,6 +150,10 @@ func ToOTelConfig(output *config.C) (map[string]any, error) { "enabled": true, "max_size_items": escfg.BulkMaxSize, // bulk_max_size }, + + "mapping": map[string]any{ + "mode": "bodymap", + }, } setIfNotNil(otelYAMLCfg, "headers", escfg.Headers) // headers diff --git a/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel_test.go b/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel_test.go index b5c673683834..2e91af68dd75 100644 --- a/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel_test.go +++ b/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel_test.go @@ -76,6 +76,8 @@ headers: batcher: enabled: true max_size_items: 1600 +mapping: + mode: bodymap ` input := newFromYamlString(t, beatCfg) cfg := config.MustNewConfigFrom(input.ToStringMap()) @@ -111,6 +113,8 @@ logs_index: some-index password: changeme user: elastic timeout: 1m30s +mapping: + mode: bodymap ` tests := []struct { @@ -157,6 +161,8 @@ num_workers: 1 batcher: enabled: true max_size_items: 1600 +mapping: + mode: bodymap `, }, { diff --git a/x-pack/filebeat/tests/integration/otel_test.go b/x-pack/filebeat/tests/integration/otel_test.go index 785e9d151dbf..7ae0bfff7d52 100644 --- a/x-pack/filebeat/tests/integration/otel_test.go +++ b/x-pack/filebeat/tests/integration/otel_test.go @@ -16,9 +16,12 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/tests/integration" + "github.com/elastic/elastic-agent-libs/mapstr" "github.com/elastic/elastic-agent-libs/testing/estools" "github.com/elastic/go-elasticsearch/v8" ) @@ -29,7 +32,7 @@ filebeat.inputs: id: filestream-input-id enabled: true file_identity.native: ~ - prospector.scanner.fingerprint.enabled: false + prospector.scanner.fingerprint.enabled: false paths: - %s output: @@ -41,11 +44,18 @@ output: password: testing index: %s queue.mem.flush.timeout: 0s +processors: + - add_host_metadata: ~ + - add_cloud_metadata: ~ + - add_docker_metadata: ~ + - add_kubernetes_metadata: ~ ` func TestFilebeatOTelE2E(t *testing.T) { integration.EnsureESIsRunning(t) + numEvents := 1 + // start filebeat in otel mode filebeatOTel := integration.NewBeat( t, "filebeat-otel", @@ -55,29 +65,25 @@ func TestFilebeatOTelE2E(t *testing.T) { logFilePath := filepath.Join(filebeatOTel.TempDir(), "log.log") filebeatOTel.WriteConfigFile(fmt.Sprintf(beatsCfgFile, logFilePath, "logs-integration-default")) + writeEventsToLogFile(t, logFilePath, numEvents) + filebeatOTel.Start() - logFile, err := os.Create(logFilePath) - if err != nil { - t.Fatalf("could not create file '%s': %s", logFilePath, err) - } - - numEvents := 5 - - // write events to log file - for i := 0; i < numEvents; i++ { - msg := fmt.Sprintf("Line %d", i) - _, err = logFile.Write([]byte(msg + "\n")) - require.NoErrorf(t, err, "failed to write line %d to temp file", i) - } - - if err := logFile.Sync(); err != nil { - t.Fatalf("could not sync log file '%s': %s", logFilePath, err) - } - if err := logFile.Close(); err != nil { - t.Fatalf("could not close log file '%s': %s", logFilePath, err) - } + // start filebeat + filebeat := integration.NewBeat( + t, + "filebeat", + "../../filebeat.test", + ) + logFilePath = filepath.Join(filebeat.TempDir(), "log.log") + writeEventsToLogFile(t, logFilePath, numEvents) + s := fmt.Sprintf(beatsCfgFile, logFilePath, "logs-filebeat-default") + s = s + ` +setup.template.name: logs-filebeat-default +setup.template.pattern: logs-filebeat-default +` - filebeatOTel.Start() + filebeat.WriteConfigFile(s) + filebeat.Start() // prepare to query ES esCfg := elasticsearch.Config{ @@ -93,19 +99,83 @@ func TestFilebeatOTelE2E(t *testing.T) { es, err := elasticsearch.NewClient(esCfg) require.NoError(t, err) - actualHits := &struct{ Hits int }{} + var filebeatDocs estools.Documents + var otelDocs estools.Documents // wait for logs to be published require.Eventually(t, func() bool { findCtx, findCancel := context.WithTimeout(context.Background(), 10*time.Second) defer findCancel() - OTelDocs, err := estools.GetAllLogsForIndexWithContext(findCtx, es, ".ds-logs-integration-default*") + otelDocs, err = estools.GetAllLogsForIndexWithContext(findCtx, es, ".ds-logs-integration-default*") require.NoError(t, err) - actualHits.Hits = OTelDocs.Hits.Total.Value - return actualHits.Hits == numEvents + filebeatDocs, err = estools.GetAllLogsForIndexWithContext(findCtx, es, ".ds-logs-filebeat-default*") + require.NoError(t, err) + + return otelDocs.Hits.Total.Value >= numEvents && filebeatDocs.Hits.Total.Value >= numEvents }, - 2*time.Minute, 1*time.Second, numEvents, actualHits.Hits) + 2*time.Minute, 1*time.Second, fmt.Sprintf("Number of hits %d not equal to number of events for %d", filebeatDocs.Hits.Total.Value, numEvents)) + + filebeatDoc := filebeatDocs.Hits.Hits[0].Source + otelDoc := otelDocs.Hits.Hits[0].Source + ignoredFields := []string{ + // Expected to change between agentDocs and OtelDocs + "@timestamp", + "agent.ephemeral_id", + "agent.id", + "log.file.inode", + "log.file.path", + } + assertMapsEqual(t, filebeatDoc, otelDoc, ignoredFields, "expected documents to be equal") +} + +func writeEventsToLogFile(t *testing.T, filename string, numEvents int) { + t.Helper() + logFile, err := os.Create(filename) + if err != nil { + t.Fatalf("could not create file '%s': %s", filename, err) + } + // write events to log file + for i := 0; i < numEvents; i++ { + msg := fmt.Sprintf("Line %d", i) + _, err = logFile.Write([]byte(msg + "\n")) + require.NoErrorf(t, err, "failed to write line %d to temp file", i) + } + + if err := logFile.Sync(); err != nil { + t.Fatalf("could not sync log file '%s': %s", filename, err) + } + if err := logFile.Close(); err != nil { + t.Fatalf("could not close log file '%s': %s", filename, err) + } +} + +func assertMapsEqual(t *testing.T, m1, m2 mapstr.M, ignoredFields []string, msg string) { + t.Helper() + + flatM1 := m1.Flatten() + flatM2 := m2.Flatten() + for _, f := range ignoredFields { + hasKeyM1, _ := flatM1.HasKey(f) + hasKeyM2, _ := flatM2.HasKey(f) + + if !hasKeyM1 && !hasKeyM2 { + assert.Failf(t, msg, "ignored field %q does not exist in either map, please remove it from the ignored fields", f) + } + + // If the ignored field exists and is equal in both maps then it shouldn't be ignored + if hasKeyM1 && hasKeyM2 { + valM1, _ := flatM1.GetValue(f) + valM2, _ := flatM2.GetValue(f) + if valM1 == valM2 { + assert.Failf(t, msg, "ignored field %q is equal in both maps, please remove it from the ignored fields", f) + } + } + + flatM1.Delete(f) + flatM2.Delete(f) + } + require.Equal(t, "", cmp.Diff(flatM1, flatM2), "expected maps to be equal") } From 2f3df16948af7e758eddf35cba47cdf0a5edd4d6 Mon Sep 17 00:00:00 2001 From: Vihas Makwana <121151420+VihasMakwana@users.noreply.github.com> Date: Fri, 21 Feb 2025 18:26:28 +0530 Subject: [PATCH 14/41] [fbreceiver] - Fix batcher's configuration (#42797) * fix: batcher min_size_items * update tests * set min_size_items to 0 * fix tests * fix tests * fix test --- libbeat/otelbeat/beatconverter/beatconverter_test.go | 2 ++ .../oteltranslate/outputs/elasticsearch/config_otel.go | 1 + .../oteltranslate/outputs/elasticsearch/config_otel_test.go | 6 ++++++ 3 files changed, 9 insertions(+) diff --git a/libbeat/otelbeat/beatconverter/beatconverter_test.go b/libbeat/otelbeat/beatconverter/beatconverter_test.go index 2384fc5b5832..d2093a7722de 100644 --- a/libbeat/otelbeat/beatconverter/beatconverter_test.go +++ b/libbeat/otelbeat/beatconverter/beatconverter_test.go @@ -47,6 +47,7 @@ exporters: batcher: enabled: true max_size_items: 1600 + min_size_items: 0 mapping: mode: bodymap ` @@ -189,6 +190,7 @@ exporters: batcher: enabled: true max_size_items: 1600 + min_size_items: 0 mapping: mode: bodymap receivers: diff --git a/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel.go b/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel.go index fea41bc3add4..38cf2c7e43f5 100644 --- a/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel.go +++ b/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel.go @@ -149,6 +149,7 @@ func ToOTelConfig(output *config.C) (map[string]any, error) { "batcher": map[string]any{ "enabled": true, "max_size_items": escfg.BulkMaxSize, // bulk_max_size + "min_size_items": 0, // 0 means immediately trigger a flush }, "mapping": map[string]any{ diff --git a/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel_test.go b/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel_test.go index 2e91af68dd75..9da9d93444bf 100644 --- a/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel_test.go +++ b/libbeat/otelbeat/oteltranslate/outputs/elasticsearch/config_otel_test.go @@ -76,6 +76,7 @@ headers: batcher: enabled: true max_size_items: 1600 + min_size_items: 0 mapping: mode: bodymap ` @@ -129,6 +130,7 @@ num_workers: 1 batcher: enabled: true max_size_items: 1600 + min_size_items: 0 `, }, { @@ -139,6 +141,7 @@ num_workers: 4 batcher: enabled: true max_size_items: 1600 + min_size_items: 0 `, }, { @@ -161,6 +164,7 @@ num_workers: 1 batcher: enabled: true max_size_items: 1600 + min_size_items: 0 mapping: mode: bodymap `, @@ -173,6 +177,7 @@ num_workers: 1 batcher: enabled: true max_size_items: 50 + min_size_items: 0 `, }, { @@ -183,6 +188,7 @@ num_workers: 0 batcher: enabled: true max_size_items: 1600 + min_size_items: 0 `, }, } From b29f777cefce9e103dce5c1dd3fcf2ad2017f872 Mon Sep 17 00:00:00 2001 From: kruskall <99559985+kruskall@users.noreply.github.com> Date: Fri, 21 Feb 2025 16:27:06 +0100 Subject: [PATCH 15/41] feat: replace x/crypto/ssh with x/term (#42817) * feat: replace x/crypto/ssh with x/term x/crypto/ssh/terminal is deprecated and moved to x/term removing x/crypto usage also makes it easier to review fips compliance * Update password.go * Update password.go * Update password.go * Update password.go * Update password.go --- libbeat/common/cli/password.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/libbeat/common/cli/password.go b/libbeat/common/cli/password.go index c9546a354c35..25c19c33ac4f 100644 --- a/libbeat/common/cli/password.go +++ b/libbeat/common/cli/password.go @@ -18,14 +18,12 @@ package cli import ( + "errors" "fmt" "os" "strings" - "syscall" - - "errors" - "golang.org/x/crypto/ssh/terminal" + "golang.org/x/term" ) type method func(m string) (string, error) @@ -61,11 +59,13 @@ func ReadPassword(def string) (string, error) { } func stdin(p string) (string, error) { + //nolint:forbidigo // ignore fmt.Print("Enter password: ") - bytePassword, err := terminal.ReadPassword(int(syscall.Stdin)) + bytePassword, err := term.ReadPassword(int(os.Stdin.Fd())) if err != nil { return "", fmt.Errorf("reading password input: %w", err) } + //nolint:forbidigo // ignore fmt.Println() return string(bytePassword), nil } From 99c6ab4fd45ed09d12581478b62b607962d07330 Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Mon, 24 Feb 2025 10:58:52 +0100 Subject: [PATCH 16/41] Add fallback to attach if session exists (#42847) --- CHANGELOG.next.asciidoc | 1 + x-pack/filebeat/input/etw/input.go | 27 +++++++++++++++++---------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 04c3a9bd2c42..9d9c9e36692a 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -421,6 +421,7 @@ otherwise no tag is added. {issue}42208[42208] {pull}42403[42403] - Journald `include_matches.match` now accepts `+` to represent a logical disjunction (OR) {issue}40185[40185] {pull}42517[42517] - The journald input is now generally available. {pull}42107[42107] - Add metrics for number of events and pages published by HTTPJSON input. {issue}42340[42340] {pull}42442[42442] +- Add `etw` input fallback to attach an already existing session. {pull}42847[42847] *Auditbeat* diff --git a/x-pack/filebeat/input/etw/input.go b/x-pack/filebeat/input/etw/input.go index b41e7347a3eb..fd2272b3d4ab 100644 --- a/x-pack/filebeat/input/etw/input.go +++ b/x-pack/filebeat/input/etw/input.go @@ -7,6 +7,7 @@ package etw import ( + "errors" "fmt" "math" "strconv" @@ -124,20 +125,26 @@ func (e *etwInput) Run(ctx input.Context, publisher stateless.Publisher) error { // Handle realtime session creation or attachment if e.etwSession.Realtime { - if !e.etwSession.NewSession { + switch e.etwSession.NewSession { + case true: + // Create a new realtime session + // If it fails with ERROR_ALREADY_EXISTS we try to attach to it + createErr := e.operator.createRealtimeSession(e.etwSession) + if createErr == nil { + e.log.Debug("created new session") + break + } + if !errors.Is(createErr, etw.ERROR_ALREADY_EXISTS) { + return fmt.Errorf("realtime session could not be created: %w", createErr) + } + e.log.Debug("session already exists, trying to attach to it") + fallthrough + case false: // Attach to an existing session - err = e.operator.attachToExistingSession(e.etwSession) - if err != nil { + if err := e.operator.attachToExistingSession(e.etwSession); err != nil { return fmt.Errorf("unable to retrieve handler: %w", err) } e.log.Debug("attached to existing session") - } else { - // Create a new realtime session - err = e.operator.createRealtimeSession(e.etwSession) - if err != nil { - return fmt.Errorf("realtime session could not be created: %w", err) - } - e.log.Debug("created new session") } } From 5409677da6682c0a4aae8b675658f3a893099a96 Mon Sep 17 00:00:00 2001 From: Olga Naydyonock Date: Mon, 24 Feb 2025 11:59:55 +0200 Subject: [PATCH 17/41] Added binfmt to beats packaging step (#42825) Added binfmt docker to run before packaging for multiarch builds --------- Co-authored-by: Dimitrios Liappis Co-authored-by: Victor Martinez --- .buildkite/auditbeat/auditbeat-pipeline.yml | 8 ++------ .buildkite/filebeat/filebeat-pipeline.yml | 6 ++---- .buildkite/heartbeat/heartbeat-pipeline.yml | 6 ++---- .buildkite/metricbeat/pipeline.yml | 6 ++---- .buildkite/packetbeat/pipeline.packetbeat.yml | 6 ++---- .buildkite/scripts/packaging/packaging.sh | 19 +++++++++++++++++++ .buildkite/winlogbeat/pipeline.winlogbeat.yml | 3 +-- .../x-pack/pipeline.xpack.agentbeat.yml | 4 +--- .../x-pack/pipeline.xpack.auditbeat.yml | 6 ++---- .../x-pack/pipeline.xpack.dockerlogbeat.yml | 6 ++---- .buildkite/x-pack/pipeline.xpack.filebeat.yml | 6 ++---- .../x-pack/pipeline.xpack.heartbeat.yml | 6 ++---- .../x-pack/pipeline.xpack.metricbeat.yml | 6 ++---- .../x-pack/pipeline.xpack.osquerybeat.yml | 3 +-- .../x-pack/pipeline.xpack.packetbeat.yml | 6 ++---- .../x-pack/pipeline.xpack.winlogbeat.yml | 3 +-- 16 files changed, 45 insertions(+), 55 deletions(-) create mode 100755 .buildkite/scripts/packaging/packaging.sh diff --git a/.buildkite/auditbeat/auditbeat-pipeline.yml b/.buildkite/auditbeat/auditbeat-pipeline.yml index 47a00b2c96d4..370ca6096bba 100644 --- a/.buildkite/auditbeat/auditbeat-pipeline.yml +++ b/.buildkite/auditbeat/auditbeat-pipeline.yml @@ -454,9 +454,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - set -euo pipefail - cd auditbeat - mage package + .buildkite/scripts/packaging/packaging.sh auditbeat retry: automatic: - limit: 1 @@ -475,9 +473,7 @@ steps: PLATFORMS: "linux/arm64" PACKAGES: "docker" command: | - set -euo pipefail - cd auditbeat - mage package + .buildkite/scripts/packaging/packaging.sh auditbeat retry: automatic: - limit: 1 diff --git a/.buildkite/filebeat/filebeat-pipeline.yml b/.buildkite/filebeat/filebeat-pipeline.yml index 92b007cfdf38..7361ad275f47 100644 --- a/.buildkite/filebeat/filebeat-pipeline.yml +++ b/.buildkite/filebeat/filebeat-pipeline.yml @@ -399,8 +399,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd filebeat - mage package + .buildkite/scripts/packaging/packaging.sh filebeat retry: automatic: - limit: 1 @@ -421,8 +420,7 @@ steps: PLATFORMS: "linux/arm64" PACKAGES: "docker" command: | - cd filebeat - mage package + .buildkite/scripts/packaging/packaging.sh filebeat retry: automatic: - limit: 1 diff --git a/.buildkite/heartbeat/heartbeat-pipeline.yml b/.buildkite/heartbeat/heartbeat-pipeline.yml index 13b0c8035b4f..7d07f804f2e1 100644 --- a/.buildkite/heartbeat/heartbeat-pipeline.yml +++ b/.buildkite/heartbeat/heartbeat-pipeline.yml @@ -343,8 +343,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd heartbeat - mage package + .buildkite/scripts/packaging/packaging.sh heartbeat retry: automatic: - limit: 1 @@ -363,8 +362,7 @@ steps: PLATFORMS: "linux/arm64" PACKAGES: "docker" command: | - cd heartbeat - mage package + .buildkite/scripts/packaging/packaging.sh heartbeat retry: automatic: - limit: 1 diff --git a/.buildkite/metricbeat/pipeline.yml b/.buildkite/metricbeat/pipeline.yml index 674e55c5207f..7082869ba665 100644 --- a/.buildkite/metricbeat/pipeline.yml +++ b/.buildkite/metricbeat/pipeline.yml @@ -399,8 +399,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd metricbeat - mage package + .buildkite/scripts/packaging/packaging.sh metricbeat retry: automatic: - limit: 1 @@ -421,8 +420,7 @@ steps: PLATFORMS: "linux/arm64" PACKAGES: "docker" command: | - cd metricbeat - mage package + .buildkite/scripts/packaging/packaging.sh metricbeat retry: automatic: - limit: 1 diff --git a/.buildkite/packetbeat/pipeline.packetbeat.yml b/.buildkite/packetbeat/pipeline.packetbeat.yml index 31cdecd4ae01..d0b03bd9388e 100644 --- a/.buildkite/packetbeat/pipeline.packetbeat.yml +++ b/.buildkite/packetbeat/pipeline.packetbeat.yml @@ -366,8 +366,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd packetbeat - mage package + .buildkite/scripts/packaging/packaging.sh packetbeat retry: automatic: - limit: 1 @@ -388,8 +387,7 @@ steps: PLATFORMS: "linux/arm64" PACKAGES: "docker" command: | - cd packetbeat - mage package + .buildkite/scripts/packaging/packaging.sh packetbeat retry: automatic: - limit: 1 diff --git a/.buildkite/scripts/packaging/packaging.sh b/.buildkite/scripts/packaging/packaging.sh new file mode 100755 index 000000000000..43d3ae19bfa5 --- /dev/null +++ b/.buildkite/scripts/packaging/packaging.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +# +# Centralise the mage package for a given beat in Buildkite. +# It enables multi-arch builds to avoid the exec format errors when +# attempting to build arm64 inside arm64 workers. +# For further details, see https://github.com/elastic/elastic-agent/pull/6948 +# and https://github.com/elastic/golang-crossbuild/pull/507 +# + +set -ueo pipefail + + +BEAT_DIR=${1:?-"Error: Beat directory must be specified."} + +#Use newer multiarch support for packaging +docker run --privileged --rm tonistiigi/binfmt:master --install all + +cd $BEAT_DIR +mage package diff --git a/.buildkite/winlogbeat/pipeline.winlogbeat.yml b/.buildkite/winlogbeat/pipeline.winlogbeat.yml index d8986a72a54e..e205fb9e8354 100644 --- a/.buildkite/winlogbeat/pipeline.winlogbeat.yml +++ b/.buildkite/winlogbeat/pipeline.winlogbeat.yml @@ -209,8 +209,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd winlogbeat - mage package + .buildkite/scripts/packaging/packaging.sh winlogbeat retry: automatic: - limit: 1 diff --git a/.buildkite/x-pack/pipeline.xpack.agentbeat.yml b/.buildkite/x-pack/pipeline.xpack.agentbeat.yml index 708073449ffc..d59c6025a276 100644 --- a/.buildkite/x-pack/pipeline.xpack.agentbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.agentbeat.yml @@ -70,9 +70,7 @@ steps: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" SNAPSHOT: true command: | - set -euo pipefail - cd x-pack/agentbeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/agentbeat artifact_paths: - x-pack/agentbeat/build/distributions/**/* - "x-pack/agentbeat/build/*.xml" diff --git a/.buildkite/x-pack/pipeline.xpack.auditbeat.yml b/.buildkite/x-pack/pipeline.xpack.auditbeat.yml index cca156252436..55b78659bd93 100644 --- a/.buildkite/x-pack/pipeline.xpack.auditbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.auditbeat.yml @@ -374,8 +374,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd x-pack/auditbeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/auditbeat retry: automatic: - limit: 1 @@ -396,8 +395,7 @@ steps: PLATFORMS: "linux/arm64" PACKAGES: "docker" command: | - cd x-pack/auditbeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/auditbeat retry: automatic: - limit: 1 diff --git a/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml b/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml index 54b3451b23e0..85b8634b90f0 100644 --- a/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.dockerlogbeat.yml @@ -117,8 +117,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd x-pack/dockerlogbeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/dockerlogbeat retry: automatic: - limit: 1 @@ -137,8 +136,7 @@ steps: PLATFORMS: "linux/arm64" PACKAGES: "docker" command: | - cd x-pack/dockerlogbeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/dockerlogbeat retry: automatic: - limit: 1 diff --git a/.buildkite/x-pack/pipeline.xpack.filebeat.yml b/.buildkite/x-pack/pipeline.xpack.filebeat.yml index 2dc0c8186543..2ba553199c71 100644 --- a/.buildkite/x-pack/pipeline.xpack.filebeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.filebeat.yml @@ -430,8 +430,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd x-pack/filebeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/filebeat retry: automatic: - limit: 1 @@ -452,8 +451,7 @@ steps: PLATFORMS: "linux/arm64" PACKAGES: "docker" command: | - cd x-pack/filebeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/filebeat retry: automatic: - limit: 1 diff --git a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml index 839bfc8a35a4..31b224a0f5ae 100644 --- a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml @@ -361,8 +361,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd x-pack/heartbeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/heartbeat retry: automatic: - limit: 1 @@ -383,8 +382,7 @@ steps: PLATFORMS: "linux/arm64" PACKAGES: "docker" command: | - cd x-pack/heartbeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/heartbeat retry: automatic: - limit: 1 diff --git a/.buildkite/x-pack/pipeline.xpack.metricbeat.yml b/.buildkite/x-pack/pipeline.xpack.metricbeat.yml index a074c681a968..156a3b5a5462 100644 --- a/.buildkite/x-pack/pipeline.xpack.metricbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.metricbeat.yml @@ -408,8 +408,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd x-pack/metricbeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/metricbeat retry: automatic: - limit: 1 @@ -430,8 +429,7 @@ steps: PLATFORMS: "linux/arm64" PACKAGES: "docker" command: | - cd x-pack/metricbeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/metricbeat retry: automatic: - limit: 1 diff --git a/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml b/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml index c06f473c000c..89021df0e166 100644 --- a/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.osquerybeat.yml @@ -283,8 +283,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd x-pack/osquerybeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/osquerybeat retry: automatic: - limit: 1 diff --git a/.buildkite/x-pack/pipeline.xpack.packetbeat.yml b/.buildkite/x-pack/pipeline.xpack.packetbeat.yml index 1620bc2320ae..b17749dc4cd6 100644 --- a/.buildkite/x-pack/pipeline.xpack.packetbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.packetbeat.yml @@ -451,8 +451,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd x-pack/packetbeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/packetbeat retry: automatic: - limit: 1 @@ -473,8 +472,7 @@ steps: PLATFORMS: "linux/arm64" PACKAGES: "docker" command: | - cd x-pack/packetbeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/packetbeat retry: automatic: - limit: 1 diff --git a/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml b/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml index d0ffd9fa89c9..407b5e297120 100644 --- a/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.winlogbeat.yml @@ -252,8 +252,7 @@ steps: env: PLATFORMS: "+all linux/amd64 linux/arm64 windows/amd64 darwin/amd64 darwin/arm64" command: | - cd x-pack/winlogbeat - mage package + .buildkite/scripts/packaging/packaging.sh x-pack/winlogbeat retry: automatic: - limit: 1 From 0807e5b97e7707ff18ab0cc259e9e37a3b707fc0 Mon Sep 17 00:00:00 2001 From: Marc Guasch Date: Mon, 24 Feb 2025 14:22:06 +0100 Subject: [PATCH 18/41] Improve error handling (#42826) --- CHANGELOG.next.asciidoc | 2 + winlogbeat/eventlog/errors_unix.go | 7 +- winlogbeat/eventlog/errors_windows.go | 19 +++-- winlogbeat/eventlog/runner.go | 118 ++++++++++++++++---------- 4 files changed, 87 insertions(+), 59 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 9d9c9e36692a..62b8ef12b66c 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -279,6 +279,8 @@ otherwise no tag is added. {issue}42208[42208] {pull}42403[42403] - Fix message handling in the experimental api. {issue}19338[19338] {pull}41730[41730] - Sync missing changes in modules pipelines. {pull}42619[42619] +- Reset EventLog if error EOF is encountered. {pull}42826[42826] +- Implement backoff on error retrial. {pull}42826[42826] *Elastic Logging Plugin* diff --git a/winlogbeat/eventlog/errors_unix.go b/winlogbeat/eventlog/errors_unix.go index 2821a18c99f5..c52a95f52709 100644 --- a/winlogbeat/eventlog/errors_unix.go +++ b/winlogbeat/eventlog/errors_unix.go @@ -22,11 +22,6 @@ package eventlog // IsRecoverable returns a boolean indicating whether the error represents // a condition where the Windows Event Log session can be recovered through a // reopening of the handle (Close, Open). -func IsRecoverable(err error) bool { - return false -} - -// IsChannelNotFound returns true if the error indicates the channel was not found. -func IsChannelNotFound(err error) bool { +func IsRecoverable(error, bool) bool { return false } diff --git a/winlogbeat/eventlog/errors_windows.go b/winlogbeat/eventlog/errors_windows.go index cec248bdde0d..d7734ab8f3a6 100644 --- a/winlogbeat/eventlog/errors_windows.go +++ b/winlogbeat/eventlog/errors_windows.go @@ -19,6 +19,7 @@ package eventlog import ( "errors" + "io" win "github.com/elastic/beats/v7/winlogbeat/sys/wineventlog" ) @@ -28,13 +29,13 @@ import ( // reopening of the handle (Close, Open). // //nolint:errorlint // These are never wrapped. -func IsRecoverable(err error) bool { - return err == win.ERROR_INVALID_HANDLE || err == win.RPC_S_SERVER_UNAVAILABLE || - err == win.RPC_S_CALL_CANCELLED || err == win.ERROR_EVT_QUERY_RESULT_STALE || - err == win.ERROR_INVALID_PARAMETER || err == win.ERROR_EVT_PUBLISHER_DISABLED -} - -// IsChannelNotFound returns true if the error indicates the channel was not found. -func IsChannelNotFound(err error) bool { - return errors.Is(err, win.ERROR_EVT_CHANNEL_NOT_FOUND) +func IsRecoverable(err error, isFile bool) bool { + return err == win.ERROR_INVALID_HANDLE || + err == win.RPC_S_SERVER_UNAVAILABLE || + err == win.RPC_S_CALL_CANCELLED || + err == win.ERROR_EVT_QUERY_RESULT_STALE || + err == win.ERROR_INVALID_PARAMETER || + err == win.ERROR_EVT_PUBLISHER_DISABLED || + (!isFile && errors.Is(err, io.EOF)) || + (!isFile && errors.Is(err, win.ERROR_EVT_CHANNEL_NOT_FOUND)) } diff --git a/winlogbeat/eventlog/runner.go b/winlogbeat/eventlog/runner.go index 0486d13ce545..7703e374563b 100644 --- a/winlogbeat/eventlog/runner.go +++ b/winlogbeat/eventlog/runner.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "io" + "math" "time" "github.com/elastic/beats/v7/winlogbeat/checkpoint" @@ -45,65 +46,52 @@ func Run( // to shut down or when returning after io.EOF cancelCtx, cancelFn := ctxtool.WithFunc(ctx, func() { if err := api.Close(); err != nil { - log.Errorw("Error while closing Windows Event Log access", "error", err) + log.Errorw("error while closing Windows Event Log access", "error", err) } }) defer cancelFn() - // Flag used to detect repeat "channel not found" errors, eliminating log spam. - channelNotFoundErrDetected := false + openErrHandler := newExponentialLimitedBackoff(log, 5*time.Second, time.Minute, func(err error) bool { + if IsRecoverable(err, api.IsFile()) { + log.Errorw("encountered recoverable error when opening Windows Event Log", "error", err) + return true + } + return false + }) -runLoop: - for { - //nolint:nilerr // only log error if we are not shutting down - if cancelCtx.Err() != nil { - return nil + readErrHandler := newExponentialLimitedBackoff(log, 5*time.Second, time.Minute, func(err error) bool { + if IsRecoverable(err, api.IsFile()) { + log.Errorw("encountered recoverable error when reading from Windows Event Log", "error", err) + if resetErr := api.Reset(); resetErr != nil { + log.Errorw("error resetting Windows Event Log handle", "error", resetErr) + } + return true } + return false + }) +runLoop: + for cancelCtx.Err() == nil { openErr := api.Open(evtCheckpoint) - - switch { - case IsRecoverable(openErr): - log.Errorw("Encountered recoverable error when opening Windows Event Log", "error", openErr) - _ = timed.Wait(cancelCtx, 5*time.Second) - continue - case !api.IsFile() && IsChannelNotFound(openErr): - if !channelNotFoundErrDetected { - log.Errorw("Encountered channel not found error when opening Windows Event Log", "error", openErr) - } else { - log.Debugw("Encountered channel not found error when opening Windows Event Log", "error", openErr) + if openErr != nil { + if openErrHandler.backoff(cancelCtx, openErr) { + continue } - channelNotFoundErrDetected = true - _ = timed.Wait(cancelCtx, 5*time.Second) - continue - case openErr != nil: return fmt.Errorf("failed to open Windows Event Log channel %q: %w", api.Channel(), openErr) } - channelNotFoundErrDetected = false - log.Debug("Windows Event Log opened successfully") + log.Debug("windows event log opened successfully") // read loop for cancelCtx.Err() == nil { - records, err := api.Read() - if IsRecoverable(err) { - log.Errorw("Encountered recoverable error when reading from Windows Event Log", "error", err) - if resetErr := api.Reset(); resetErr != nil { - log.Errorw("Error resetting Windows Event Log handle", "error", resetErr) - } - continue runLoop - } - if !api.IsFile() && IsChannelNotFound(err) { - log.Errorw("Encountered channel not found error when reading from Windows Event Log", "error", err) - if resetErr := api.Reset(); resetErr != nil { - log.Errorw("Error resetting Windows Event Log handle", "error", resetErr) + records, readErr := api.Read() + if readErr != nil { + if readErrHandler.backoff(cancelCtx, readErr) { + continue runLoop } - continue runLoop - } - if err != nil { - if errors.Is(err, io.EOF) { - log.Debugw("End of Winlog event stream reached", "error", err) + if errors.Is(readErr, io.EOF) { + log.Debugw("end of Winlog event stream reached", "error", readErr) return nil } @@ -112,9 +100,11 @@ runLoop: return nil } - log.Errorw("Error occurred while reading from Windows Event Log", "error", err) - return err + log.Errorw("error occurred while reading from Windows Event Log", "error", readErr) + + return readErr } + if len(records) == 0 { _ = timed.Wait(cancelCtx, time.Second) continue @@ -125,4 +115,44 @@ runLoop: } } } + return nil +} + +type exponentialLimitedBackoff struct { + log *logp.Logger + initialDelay time.Duration + maxDelay time.Duration + currentDelay time.Duration + backoffCondition func(error) bool +} + +func newExponentialLimitedBackoff(log *logp.Logger, initialDelay, maxDelay time.Duration, errCondition func(error) bool) *exponentialLimitedBackoff { + b := &exponentialLimitedBackoff{ + log: log, + initialDelay: initialDelay, + maxDelay: maxDelay, + backoffCondition: errCondition, + } + b.reset() + return b +} + +func (b *exponentialLimitedBackoff) backoff(ctx context.Context, err error) bool { + if !b.backoffCondition(err) { + b.reset() + return false + } + b.log.Debugf("backing off, waiting for %v", b.currentDelay) + select { + case <-ctx.Done(): + return false + case <-time.After(b.currentDelay): + // Calculate the next delay, doubling it but not exceeding maxDelay + b.currentDelay = time.Duration(math.Min(float64(b.maxDelay), float64(b.currentDelay*2))) + return true + } +} + +func (b *exponentialLimitedBackoff) reset() { + b.currentDelay = b.initialDelay } From 0baabb300245d2757f1711b4d13d4c9cec21126e Mon Sep 17 00:00:00 2001 From: kruskall <99559985+kruskall@users.noreply.github.com> Date: Mon, 24 Feb 2025 23:28:29 +0100 Subject: [PATCH 19/41] feat(fips): disallow non-compliant crypto in fingerprint processor (#42598) * feat(fips): disallow non-compliant crypto in fingerprint processor do not allow md5 and sha1 config values in fingerprint processor * refactor: avoid duplicate maps and add tests * lint: fix linter issues --- .../fingerprint/fingerprint_test.go | 26 +++++++------- libbeat/processors/fingerprint/hash.go | 9 ++--- .../processors/fingerprint/hash_fips_test.go | 34 ++++++++++++++++++ libbeat/processors/fingerprint/hash_nofips.go | 35 ++++++++++++++++++ .../fingerprint/hash_nofips_test.go | 36 +++++++++++++++++++ 5 files changed, 121 insertions(+), 19 deletions(-) create mode 100644 libbeat/processors/fingerprint/hash_fips_test.go create mode 100644 libbeat/processors/fingerprint/hash_nofips.go create mode 100644 libbeat/processors/fingerprint/hash_nofips_test.go diff --git a/libbeat/processors/fingerprint/fingerprint_test.go b/libbeat/processors/fingerprint/fingerprint_test.go index 8d66762e479c..6e36ebfc41c5 100644 --- a/libbeat/processors/fingerprint/fingerprint_test.go +++ b/libbeat/processors/fingerprint/fingerprint_test.go @@ -19,7 +19,7 @@ package fingerprint import ( "fmt" - "math/rand" + "math/rand/v2" "strconv" "testing" "time" @@ -129,11 +129,11 @@ func TestHashMethods(t *testing.T) { "xxhash": {"37bc50682fba6686"}, } - for method, test := range tests { - t.Run(method, func(t *testing.T) { + for _, method := range hashes { + t.Run(method.Name, func(t *testing.T) { testConfig, err := config.NewConfigFrom(mapstr.M{ "fields": []string{"field1", "field2"}, - "method": method, + "method": method.Name, }) assert.NoError(t, err) @@ -150,7 +150,7 @@ func TestHashMethods(t *testing.T) { v, err := newEvent.GetValue("fingerprint") assert.NoError(t, err) - assert.Equal(t, test.expected, v) + assert.Equal(t, tests[method.Name].expected, v) }) } } @@ -212,16 +212,16 @@ func TestEncoding(t *testing.T) { tests := map[string]struct { expectedFingerprint string }{ - "hex": {"8934ca639027aab1ee9f3944d4d6bd1e"}, - "base32": {"RE2MUY4QE6VLD3U7HFCNJVV5DY======"}, - "base64": {"iTTKY5AnqrHunzlE1Na9Hg=="}, + "hex": {"49f15f7c03c606b4bdf43f60481842954ff7b45a020a22a1d0911d76f170c798"}, + "base32": {"JHYV67ADYYDLJPPUH5QEQGCCSVH7PNC2AIFCFIOQSEOXN4LQY6MA===="}, + "base64": {"SfFffAPGBrS99D9gSBhClU/3tFoCCiKh0JEddvFwx5g="}, } for encoding, test := range tests { t.Run(encoding, func(t *testing.T) { testConfig, err := config.NewConfigFrom(mapstr.M{ "fields": []string{"field2", "nested.field"}, - "method": "md5", + "method": "sha256", "encoding": encoding, }) assert.NoError(t, err) @@ -465,12 +465,12 @@ func TestProcessorStringer(t *testing.T) { testConfig, err := config.NewConfigFrom(mapstr.M{ "fields": []string{"field1"}, "encoding": "hex", - "method": "md5", + "method": "sha256", }) require.NoError(t, err) p, err := New(testConfig) require.NoError(t, err) - require.Equal(t, `fingerprint={"Method":"md5","Encoding":"hex","Fields":["field1"],"TargetField":"fingerprint","IgnoreMissing":false}`, fmt.Sprint(p)) + require.Equal(t, `fingerprint={"Method":"sha256","Encoding":"hex","Fields":["field1"],"TargetField":"fingerprint","IgnoreMissing":false}`, fmt.Sprint(p)) } func BenchmarkHashMethods(b *testing.B) { @@ -497,7 +497,7 @@ func BenchmarkHashMethods(b *testing.B) { } func nRandomEvents(num int) []beat.Event { - prng := rand.New(rand.NewSource(12345)) + prng := rand.New(rand.NewPCG(0, 12345)) const charset = "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + @@ -508,7 +508,7 @@ func nRandomEvents(num int) []beat.Event { events := make([]beat.Event, 0, num) for i := 0; i < num; i++ { for j := range b { - b[j] = charset[prng.Intn(charsetLen)] + b[j] = charset[prng.IntN(charsetLen)] } events = append(events, beat.Event{ Fields: mapstr.M{ diff --git a/libbeat/processors/fingerprint/hash.go b/libbeat/processors/fingerprint/hash.go index 1c8cf146a147..61f151486a3e 100644 --- a/libbeat/processors/fingerprint/hash.go +++ b/libbeat/processors/fingerprint/hash.go @@ -18,8 +18,6 @@ package fingerprint import ( - "crypto/md5" - "crypto/sha1" "crypto/sha256" "crypto/sha512" "hash" @@ -37,14 +35,13 @@ type hashMethod func() hash.Hash var hashes = map[string]namedHashMethod{} func init() { - for _, h := range []namedHashMethod{ - {Name: "md5", Hash: md5.New}, - {Name: "sha1", Hash: sha1.New}, + fipsApprovedHashes := []namedHashMethod{ {Name: "sha256", Hash: sha256.New}, {Name: "sha384", Hash: sha512.New384}, {Name: "sha512", Hash: sha512.New}, {Name: "xxhash", Hash: newXxHash}, - } { + } + for _, h := range fipsApprovedHashes { hashes[h.Name] = h } } diff --git a/libbeat/processors/fingerprint/hash_fips_test.go b/libbeat/processors/fingerprint/hash_fips_test.go new file mode 100644 index 000000000000..8beeb4b3e5f5 --- /dev/null +++ b/libbeat/processors/fingerprint/hash_fips_test.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build requirefips + +package fingerprint + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestHashMethod(t *testing.T) { + require.Len(t, hashes, 4) + require.Contains(t, hashes, "sha256") + require.Contains(t, hashes, "sha384") + require.Contains(t, hashes, "sha512") + require.Contains(t, hashes, "xxhash") +} diff --git a/libbeat/processors/fingerprint/hash_nofips.go b/libbeat/processors/fingerprint/hash_nofips.go new file mode 100644 index 000000000000..16a002b84d14 --- /dev/null +++ b/libbeat/processors/fingerprint/hash_nofips.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !requirefips + +package fingerprint + +import ( + "crypto/md5" + "crypto/sha1" +) + +func init() { + nonFipsApprovedHashes := []namedHashMethod{ + {Name: "md5", Hash: md5.New}, + {Name: "sha1", Hash: sha1.New}, + } + for _, h := range nonFipsApprovedHashes { + hashes[h.Name] = h + } +} diff --git a/libbeat/processors/fingerprint/hash_nofips_test.go b/libbeat/processors/fingerprint/hash_nofips_test.go new file mode 100644 index 000000000000..865e6249e640 --- /dev/null +++ b/libbeat/processors/fingerprint/hash_nofips_test.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !requirefips + +package fingerprint + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestHashMethod(t *testing.T) { + require.Len(t, hashes, 6) + require.Contains(t, hashes, "md5") + require.Contains(t, hashes, "sha1") + require.Contains(t, hashes, "sha256") + require.Contains(t, hashes, "sha384") + require.Contains(t, hashes, "sha512") + require.Contains(t, hashes, "xxhash") +} From da802a88cbf970027b80599a86cb9af2e964adc4 Mon Sep 17 00:00:00 2001 From: Dan Kortschak Date: Tue, 25 Feb 2025 19:20:01 +1030 Subject: [PATCH 20/41] mod: update elastic/mito to version v1.17.0 (#42851) This adds: - two parameter tail function and its conjugate (elastic/mito#81) - array sum function (elastic/mito#82) --- CHANGELOG.next.asciidoc | 1 + NOTICE.txt | 4 +-- go.mod | 2 +- go.sum | 4 +-- .../filebeat/docs/inputs/input-cel.asciidoc | 4 ++- x-pack/filebeat/input/cel/input_test.go | 29 +++++++++++++++++++ 6 files changed, 38 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 62b8ef12b66c..0b58e035277b 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -424,6 +424,7 @@ otherwise no tag is added. {issue}42208[42208] {pull}42403[42403] - The journald input is now generally available. {pull}42107[42107] - Add metrics for number of events and pages published by HTTPJSON input. {issue}42340[42340] {pull}42442[42442] - Add `etw` input fallback to attach an already existing session. {pull}42847[42847] +- Update CEL mito extensions to v1.17.0. {pull}42851[42851] *Auditbeat* diff --git a/NOTICE.txt b/NOTICE.txt index 4ff9df845df0..afc3813e3c76 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -15516,11 +15516,11 @@ limitations under the License. -------------------------------------------------------------------------------- Dependency : github.com/elastic/mito -Version: v1.16.0 +Version: v1.17.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/mito@v1.16.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/mito@v1.17.0/LICENSE: Apache License diff --git a/go.mod b/go.mod index cb9e95e37532..30f5522581b2 100644 --- a/go.mod +++ b/go.mod @@ -181,7 +181,7 @@ require ( github.com/elastic/go-elasticsearch/v8 v8.17.0 github.com/elastic/go-quark v0.3.0 github.com/elastic/go-sfdc v0.0.0-20241010131323-8e176480d727 - github.com/elastic/mito v1.16.0 + github.com/elastic/mito v1.17.0 github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015 github.com/elastic/sarama v1.19.1-0.20241120141909-c7eabfcee7e5 github.com/elastic/tk-btf v0.1.0 diff --git a/go.sum b/go.sum index 19de7bd540aa..5d1702535981 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/elastic/gopacket v1.1.20-0.20241002174017-e8c5fda595e6 h1:VgOx6omXIMK github.com/elastic/gopacket v1.1.20-0.20241002174017-e8c5fda595e6/go.mod h1:riddUzxTSBpJXk3qBHtYr4qOhFhT6k/1c0E3qkQjQpA= github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/mito v1.16.0 h1:7UYy1OpJ8rlr4nzy/HDYQHuHjUIDMCofk5ICalYC2LA= -github.com/elastic/mito v1.16.0/go.mod h1:J+wCf4HccW2YoSFmZMGu+d06gN+WmnIlj5ehBqine74= +github.com/elastic/mito v1.17.0 h1:UEEFfQy5WhS6vVvMPMwHvdn5rH24eBJMb2ZOlGBkI5s= +github.com/elastic/mito v1.17.0/go.mod h1:nG5MoOsgJwVlglhlANiBFmHWqoIjrpbR5vy612wE8yE= github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015 h1:z8cC8GASpPo8yKlbnXI36HQ/BM9wYjhBPNbDjAWm0VU= github.com/elastic/mock-es v0.0.0-20240712014503-e5b47ece0015/go.mod h1:qH9DX/Dmflz6EAtaks/+2SsdQzecVAKE174Zl66hk7E= github.com/elastic/pkcs8 v1.0.0 h1:HhitlUKxhN288kcNcYkjW6/ouvuwJWd9ioxpjnD9jVA= diff --git a/x-pack/filebeat/docs/inputs/input-cel.asciidoc b/x-pack/filebeat/docs/inputs/input-cel.asciidoc index 42ca34dd57bb..b238545350c0 100644 --- a/x-pack/filebeat/docs/inputs/input-cel.asciidoc +++ b/x-pack/filebeat/docs/inputs/input-cel.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] :type: cel -:mito_version: v1.16.0 +:mito_version: v1.17.0 :mito_docs: https://pkg.go.dev/github.com/elastic/mito@{mito_version} [id="{beatname_lc}-input-{type}"] @@ -160,9 +160,11 @@ As noted above the `cel` input provides functions, macros, and global variables ** {mito_docs}/lib#hdr-Drop[Drop] ** {mito_docs}/lib#hdr-Drop_Empty[Drop Empty] ** {mito_docs}/lib#hdr-Flatten[Flatten] +** {mito_docs}/lib#hdr-Front[Front] ** {mito_docs}/lib#hdr-Keys[Keys] ** {mito_docs}/lib#hdr-Max[Max] ** {mito_docs}/lib#hdr-Min[Min] +** {mito_docs}/lib#hdr-Sum[Sum] ** {mito_docs}/lib#hdr-Tail[Tail] ** {mito_docs}/lib#hdr-Values[Values] ** {mito_docs}/lib#hdr-With[With] diff --git a/x-pack/filebeat/input/cel/input_test.go b/x-pack/filebeat/input/cel/input_test.go index 7ccfb9ccbee2..6c1f90a60dda 100644 --- a/x-pack/filebeat/input/cel/input_test.go +++ b/x-pack/filebeat/input/cel/input_test.go @@ -95,6 +95,35 @@ var inputTests = []struct { }, }}, }, + { + name: "hello_world_sum", + config: map[string]interface{}{ + "interval": 1, + "program": `{"events":[{"message":string(sum([1,2,3,4]))}]}`, + "state": nil, + "resource": map[string]interface{}{ + "url": "", + }, + }, + want: []map[string]interface{}{{ + "message": "10", + }}, + }, + { + name: "hello_world_front_and_tail_2", + config: map[string]interface{}{ + "interval": 1, + "program": `{"events":[{"message":front([1,2,3,4,5],2)}, {"message":tail([1,2,3,4,5],2)}]}`, + "state": nil, + "resource": map[string]interface{}{ + "url": "", + }, + }, + want: []map[string]interface{}{ + {"message": []any{1.0, 2.0}}, + {"message": []any{3.0, 4.0, 5.0}}, + }, + }, { name: "bad_events_type", config: map[string]interface{}{ From bbfa29021664ae4d5f4a67465593ec441dad6c13 Mon Sep 17 00:00:00 2001 From: kruskall <99559985+kruskall@users.noreply.github.com> Date: Tue, 25 Feb 2025 12:51:10 +0100 Subject: [PATCH 21/41] feat(fips): return an error when creating a kerberos client (#42597) * feat(fips): return an error when creating a kerberos client kerberos lib is implementing a lot of crypto :( * test: add kerberos client tests --------- Co-authored-by: Pierre HILBERT --- libbeat/common/transport/kerberos/client.go | 43 ++------------- .../common/transport/kerberos/client_fips.go | 29 ++++++++++ .../transport/kerberos/client_fips_test.go | 39 ++++++++++++++ .../transport/kerberos/client_nofips.go | 53 +++++++++++++++++++ .../transport/kerberos/client_nofips_test.go | 39 ++++++++++++++ 5 files changed, 163 insertions(+), 40 deletions(-) create mode 100644 libbeat/common/transport/kerberos/client_fips.go create mode 100644 libbeat/common/transport/kerberos/client_fips_test.go create mode 100644 libbeat/common/transport/kerberos/client_nofips.go create mode 100644 libbeat/common/transport/kerberos/client_nofips_test.go diff --git a/libbeat/common/transport/kerberos/client.go b/libbeat/common/transport/kerberos/client.go index 3561f235bd06..3c05d9c69930 100644 --- a/libbeat/common/transport/kerberos/client.go +++ b/libbeat/common/transport/kerberos/client.go @@ -18,48 +18,11 @@ package kerberos import ( - "fmt" "net/http" - - krbclient "github.com/jcmturner/gokrb5/v8/client" - krbconfig "github.com/jcmturner/gokrb5/v8/config" - "github.com/jcmturner/gokrb5/v8/keytab" - "github.com/jcmturner/gokrb5/v8/spnego" ) -type Client struct { - spClient *spnego.Client -} - -func NewClient(config *Config, httpClient *http.Client, esurl string) (*Client, error) { - var krbClient *krbclient.Client - krbConf, err := krbconfig.Load(config.ConfigPath) - if err != nil { - return nil, fmt.Errorf("error creating Kerberos client: %w", err) - } - - switch config.AuthType { - case authKeytab: - kTab, err := keytab.Load(config.KeyTabPath) - if err != nil { - return nil, fmt.Errorf("cannot load keytab file %s: %w", config.KeyTabPath, err) - } - krbClient = krbclient.NewWithKeytab(config.Username, config.Realm, kTab, krbConf) - case authPassword: - krbClient = krbclient.NewWithPassword(config.Username, config.Realm, config.Password, krbConf) - default: - return nil, InvalidAuthType - } - - return &Client{ - spClient: spnego.NewClient(krbClient, httpClient, ""), - }, nil -} - -func (c *Client) Do(req *http.Request) (*http.Response, error) { - return c.spClient.Do(req) -} +type Client interface { + Do(req *http.Request) (*http.Response, error) -func (c *Client) CloseIdleConnections() { - c.spClient.CloseIdleConnections() + CloseIdleConnections() } diff --git a/libbeat/common/transport/kerberos/client_fips.go b/libbeat/common/transport/kerberos/client_fips.go new file mode 100644 index 000000000000..13d0b8f74d1a --- /dev/null +++ b/libbeat/common/transport/kerberos/client_fips.go @@ -0,0 +1,29 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build requirefips + +package kerberos + +import ( + "errors" + "net/http" +) + +func NewClient(config *Config, httpClient *http.Client, esurl string) (Client, error) { + return nil, errors.New("kerberos is not supported in fips mode") +} diff --git a/libbeat/common/transport/kerberos/client_fips_test.go b/libbeat/common/transport/kerberos/client_fips_test.go new file mode 100644 index 000000000000..dc45a590786e --- /dev/null +++ b/libbeat/common/transport/kerberos/client_fips_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build requirefips + +package kerberos + +import ( + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewClient(t *testing.T) { + cfg, err := os.CreateTemp(t.TempDir(), "config") + require.NoError(t, err) + c, err := NewClient(&Config{ + AuthType: authPassword, + ConfigPath: cfg.Name(), + }, http.DefaultClient, "") + require.Nil(t, c) + require.EqualError(t, err, "kerberos is not supported in fips mode") +} diff --git a/libbeat/common/transport/kerberos/client_nofips.go b/libbeat/common/transport/kerberos/client_nofips.go new file mode 100644 index 000000000000..f734cb750164 --- /dev/null +++ b/libbeat/common/transport/kerberos/client_nofips.go @@ -0,0 +1,53 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !requirefips + +package kerberos + +import ( + "fmt" + "net/http" + + krbclient "github.com/jcmturner/gokrb5/v8/client" + krbconfig "github.com/jcmturner/gokrb5/v8/config" + "github.com/jcmturner/gokrb5/v8/keytab" + "github.com/jcmturner/gokrb5/v8/spnego" +) + +func NewClient(config *Config, httpClient *http.Client, esurl string) (Client, error) { + var krbClient *krbclient.Client + krbConf, err := krbconfig.Load(config.ConfigPath) + if err != nil { + return nil, fmt.Errorf("error creating Kerberos client: %w", err) + } + + switch config.AuthType { + case authKeytab: + kTab, err := keytab.Load(config.KeyTabPath) + if err != nil { + return nil, fmt.Errorf("cannot load keytab file %s: %w", config.KeyTabPath, err) + } + krbClient = krbclient.NewWithKeytab(config.Username, config.Realm, kTab, krbConf) + case authPassword: + krbClient = krbclient.NewWithPassword(config.Username, config.Realm, config.Password, krbConf) + default: + return nil, InvalidAuthType + } + + return spnego.NewClient(krbClient, httpClient, ""), nil +} diff --git a/libbeat/common/transport/kerberos/client_nofips_test.go b/libbeat/common/transport/kerberos/client_nofips_test.go new file mode 100644 index 000000000000..d64f9cc87f04 --- /dev/null +++ b/libbeat/common/transport/kerberos/client_nofips_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !requirefips + +package kerberos + +import ( + "net/http" + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewClient(t *testing.T) { + cfg, err := os.CreateTemp(t.TempDir(), "config") + require.NoError(t, err) + c, err := NewClient(&Config{ + AuthType: authPassword, + ConfigPath: cfg.Name(), + }, http.DefaultClient, "") + require.Nil(t, err) + require.NotNil(t, c) +} From 9dad72a7aa1f3f2393adbe5fb89544165b8c4b7a Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Tue, 25 Feb 2025 16:28:12 +0100 Subject: [PATCH 22/41] github-actions: dependabot configuration (#42437) --- .github/dependabot.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 2b2bc132b1c3..6427c5ff284a 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -66,7 +66,8 @@ updates: # GitHub actions - package-ecosystem: "github-actions" - directory: "/" + directories: + - "/" schedule: interval: "weekly" day: "sunday" From 42c4aa0af9f1d6f28e98a675dadc824fe2c60c11 Mon Sep 17 00:00:00 2001 From: Kaan Yalti Date: Tue, 25 Feb 2025 12:05:36 -0500 Subject: [PATCH 23/41] enhancement(4534): removed encryption and encrytion related logic from segments (#42848) * enhancement(4534): removed encryption and encrytion related logic from segments * enhancement(4534): removed encryption related tests and config * enhancement(4534): updated documentation * enhancement(4534): ran make update * enhancement(4534): updated changelog * enhancement(4534): replaces rand with rand/v2 * enhancement(4534): remove commented config var --- CHANGELOG.next.asciidoc | 1 + .../queue/diskqueue/benchmark_test.go | 88 ++++------ .../queue/diskqueue/compression_test.go | 21 ++- libbeat/publisher/queue/diskqueue/config.go | 3 - .../diskqueue/docs/on-disk-structures.md | 12 +- .../queue/diskqueue/docs/schemaV2.pic | 2 - .../queue/diskqueue/docs/schemaV2.svg | 18 +- .../queue/diskqueue/enc_compress_test.go | 76 -------- .../publisher/queue/diskqueue/encryption.go | 166 ------------------ .../queue/diskqueue/encryption_test.go | 75 -------- libbeat/publisher/queue/diskqueue/segments.go | 79 +-------- .../queue/diskqueue/segments_test.go | 72 +------- 12 files changed, 70 insertions(+), 543 deletions(-) delete mode 100644 libbeat/publisher/queue/diskqueue/enc_compress_test.go delete mode 100644 libbeat/publisher/queue/diskqueue/encryption.go delete mode 100644 libbeat/publisher/queue/diskqueue/encryption_test.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 0b58e035277b..6da9b2653ee7 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -20,6 +20,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Replace default Ubuntu-based images with UBI-minimal-based ones {pull}42150[42150] - Fix templates and docs to use correct `--` version of command line arguments. {issue}42038[42038] {pull}42060[42060] - removed support for a single `-` to precede multi-letter command line arguments. Use `--` instead. {issue}42117[42117] {pull}42209[42209] +- Removed encryption from diskqueue V2 for fips compliance {issue}4534[4534]{pull}42848[42848] *Auditbeat* diff --git a/libbeat/publisher/queue/diskqueue/benchmark_test.go b/libbeat/publisher/queue/diskqueue/benchmark_test.go index 7665c4fd780b..c84868732a8f 100644 --- a/libbeat/publisher/queue/diskqueue/benchmark_test.go +++ b/libbeat/publisher/queue/diskqueue/benchmark_test.go @@ -30,7 +30,7 @@ package diskqueue import ( - "math/rand" + "math/rand/v2" "testing" "time" @@ -45,7 +45,7 @@ var ( // constant event time eventTime = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - //sample event messages, so size of every frame isn't identical + // sample event messages, so size of every frame isn't identical msgs = []string{ "192.168.33.1 - - [26/Dec/2016:16:22:00 +0000] \"GET / HTTP/1.1\" 200 484 \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36\"", "{\"eventVersion\":\"1.05\",\"userIdentity\":{\"type\":\"IAMUser\",\"principalId\":\"EXAMPLE_ID\",\"arn\":\"arn:aws:iam::0123456789012:user/Alice\",\"accountId\":\"0123456789012\",\"accessKeyId\":\"EXAMPLE_KEY\",\"userName\":\"Alice\",\"sessionContext\":{\"sessionIssuer\":{},\"webIdFederationData\":{},\"attributes\":{\"mfaAuthenticated\":\"true\",\"creationDate\":\"2020-01-08T15:12:16Z\"}},\"invokedBy\":\"signin.amazonaws.com\"},\"eventTime\":\"2020-01-08T20:58:45Z\",\"eventSource\":\"cloudtrail.amazonaws.com\",\"eventName\":\"UpdateTrail\",\"awsRegion\":\"us-west-2\",\"sourceIPAddress\":\"127.0.0.1\",\"userAgent\":\"signin.amazonaws.com\",\"requestParameters\":{\"name\":\"arn:aws:cloudtrail:us-west-2:0123456789012:trail/TEST-trail\",\"s3BucketName\":\"test-cloudtrail-bucket\",\"snsTopicName\":\"\",\"isMultiRegionTrail\":true,\"enableLogFileValidation\":false,\"kmsKeyId\":\"\"},\"responseElements\":{\"name\":\"TEST-trail\",\"s3BucketName\":\"test-cloudtrail-bucket\",\"snsTopicName\":\"\",\"snsTopicARN\":\"\",\"includeGlobalServiceEvents\":true,\"isMultiRegionTrail\":true,\"trailARN\":\"arn:aws:cloudtrail:us-west-2:0123456789012:trail/TEST-trail\",\"logFileValidationEnabled\":false,\"isOrganizationTrail\":false},\"requestID\":\"EXAMPLE-f3da-42d1-84f5-EXAMPLE\",\"eventID\":\"EXAMPLE-b5e9-4846-8407-EXAMPLE\",\"readOnly\":false,\"eventType\":\"AwsApiCall\",\"recipientAccountId\":\"0123456789012\"}", @@ -63,7 +63,7 @@ func makePublisherEvent(r *rand.Rand) publisher.Event { Content: beat.Event{ Timestamp: eventTime, Fields: mapstr.M{ - "message": msgs[r.Intn(len(msgs))], + "message": msgs[r.IntN(len(msgs))], }, }, } @@ -73,12 +73,10 @@ func makePublisherEvent(r *rand.Rand) publisher.Event { // hold the queue. Location of the temporary directory is stored in // the queue settings. Call `cleanup` when done with the queue to // close the queue and remove the temp dir. -func setup(b *testing.B, encrypt bool, compress bool, protobuf bool) (*diskQueue, queue.Producer) { +func setup(b *testing.B, compress bool, protobuf bool) (*diskQueue, queue.Producer) { s := DefaultSettings() s.Path = b.TempDir() - if encrypt { - s.EncryptionKey = []byte("testtesttesttest") - } + s.UseCompression = compress q, err := NewQueue(logp.L(), nil, s, nil) if err != nil { @@ -138,14 +136,14 @@ func produceThenConsume(r *rand.Rand, p queue.Producer, q *diskQueue, num_events // benchmarkQueue is a wrapper for produceAndConsume, it tries to limit // timers to just produceAndConsume -func benchmarkQueue(num_events int, batch_size int, encrypt bool, compress bool, async bool, protobuf bool, b *testing.B) { +func benchmarkQueue(num_events int, batch_size int, compress bool, async bool, protobuf bool, b *testing.B) { b.ResetTimer() var err error for n := 0; n < b.N; n++ { b.StopTimer() - r := rand.New(rand.NewSource(1)) - q, p := setup(b, encrypt, compress, protobuf) + r := rand.New(rand.NewPCG(1, 2)) + q, p := setup(b, compress, protobuf) b.StartTimer() if async { if err = produceAndConsume(r, p, q, num_events, batch_size); err != nil { @@ -164,76 +162,50 @@ func benchmarkQueue(num_events int, batch_size int, encrypt bool, compress bool, // Async benchmarks func BenchmarkAsync1k(b *testing.B) { - benchmarkQueue(1000, 10, false, false, true, false, b) + benchmarkQueue(1000, 10, false, true, false, b) } + func BenchmarkAsync100k(b *testing.B) { - benchmarkQueue(100000, 1000, false, false, true, false, b) -} -func BenchmarkEncryptAsync1k(b *testing.B) { - benchmarkQueue(1000, 10, true, false, true, false, b) -} -func BenchmarkEncryptAsync100k(b *testing.B) { - benchmarkQueue(100000, 1000, true, false, true, false, b) + benchmarkQueue(100000, 1000, false, true, false, b) } + func BenchmarkCompressAsync1k(b *testing.B) { - benchmarkQueue(1000, 10, false, true, true, false, b) + benchmarkQueue(1000, 10, true, true, false, b) } + func BenchmarkCompressAsync100k(b *testing.B) { - benchmarkQueue(100000, 1000, false, true, true, false, b) -} -func BenchmarkEncryptCompressAsync1k(b *testing.B) { - benchmarkQueue(1000, 10, true, true, true, false, b) -} -func BenchmarkEncryptCompressAsync100k(b *testing.B) { - benchmarkQueue(100000, 1000, true, true, true, false, b) + benchmarkQueue(100000, 1000, true, true, false, b) } + func BenchmarkProtoAsync1k(b *testing.B) { - benchmarkQueue(1000, 10, false, false, true, true, b) + benchmarkQueue(1000, 10, false, true, true, b) } + func BenchmarkProtoAsync100k(b *testing.B) { - benchmarkQueue(100000, 1000, false, false, true, true, b) -} -func BenchmarkEncCompProtoAsync1k(b *testing.B) { - benchmarkQueue(1000, 10, true, true, true, true, b) -} -func BenchmarkEncCompProtoAsync100k(b *testing.B) { - benchmarkQueue(100000, 1000, true, true, true, true, b) + benchmarkQueue(100000, 1000, false, true, true, b) } // Sync Benchmarks func BenchmarkSync1k(b *testing.B) { - benchmarkQueue(1000, 10, false, false, false, false, b) + benchmarkQueue(1000, 10, false, false, false, b) } + func BenchmarkSync100k(b *testing.B) { - benchmarkQueue(100000, 1000, false, false, false, false, b) -} -func BenchmarkEncryptSync1k(b *testing.B) { - benchmarkQueue(1000, 10, true, false, false, false, b) -} -func BenchmarkEncryptSync100k(b *testing.B) { - benchmarkQueue(100000, 1000, true, false, false, false, b) + benchmarkQueue(100000, 1000, false, false, false, b) } + func BenchmarkCompressSync1k(b *testing.B) { - benchmarkQueue(1000, 10, false, true, false, false, b) + benchmarkQueue(1000, 10, true, false, false, b) } + func BenchmarkCompressSync100k(b *testing.B) { - benchmarkQueue(100000, 1000, false, true, false, false, b) -} -func BenchmarkEncryptCompressSync1k(b *testing.B) { - benchmarkQueue(1000, 10, true, true, false, false, b) -} -func BenchmarkEncryptCompressSync100k(b *testing.B) { - benchmarkQueue(100000, 1000, true, true, false, false, b) + benchmarkQueue(100000, 1000, true, false, false, b) } + func BenchmarkProtoSync1k(b *testing.B) { - benchmarkQueue(1000, 10, false, false, false, true, b) + benchmarkQueue(1000, 10, false, false, true, b) } + func BenchmarkProtoSync100k(b *testing.B) { - benchmarkQueue(100000, 1000, false, false, false, true, b) -} -func BenchmarkEncCompProtoSync1k(b *testing.B) { - benchmarkQueue(1000, 10, true, true, false, true, b) -} -func BenchmarkEncCompProtoSync100k(b *testing.B) { - benchmarkQueue(100000, 1000, true, true, false, true, b) + benchmarkQueue(100000, 1000, false, false, true, b) } diff --git a/libbeat/publisher/queue/diskqueue/compression_test.go b/libbeat/publisher/queue/diskqueue/compression_test.go index 2b684e0f28dd..e4e89cf7f3d6 100644 --- a/libbeat/publisher/queue/diskqueue/compression_test.go +++ b/libbeat/publisher/queue/diskqueue/compression_test.go @@ -47,7 +47,8 @@ func TestCompressionReader(t *testing.T) { 0x00, 0x00, 0x80, 0x61, 0x62, 0x63, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x6c, - 0x3e, 0x7b, 0x08, 0x00}, + 0x3e, 0x7b, 0x08, 0x00, + }, }, "abc compressed with pierrec lz4": { plaintext: []byte("abc"), @@ -57,7 +58,8 @@ func TestCompressionReader(t *testing.T) { 0x00, 0x00, 0x80, 0x61, 0x62, 0x63, 0x00, 0x00, 0x00, 0x00, 0xff, 0x53, - 0xd1, 0x32}, + 0xd1, 0x32, + }, }, } @@ -85,7 +87,8 @@ func TestCompressionWriter(t *testing.T) { 0x00, 0x00, 0x80, 0x61, 0x62, 0x63, 0x00, 0x00, 0x00, 0x00, 0xff, 0x53, - 0xd1, 0x32}, + 0xd1, 0x32, + }, }, } @@ -100,6 +103,16 @@ func TestCompressionWriter(t *testing.T) { } } +func NopWriteCloseSyncer(w io.WriteCloser) WriteCloseSyncer { + return nopWriteCloseSyncer{w} +} + +type nopWriteCloseSyncer struct { + io.WriteCloser +} + +func (nopWriteCloseSyncer) Sync() error { return nil } + func TestCompressionRoundTrip(t *testing.T) { tests := map[string]struct { plaintext []byte @@ -141,7 +154,7 @@ func TestCompressionSync(t *testing.T) { src1 := bytes.NewReader(tc.plaintext) _, err := io.Copy(cw, src1) assert.Nil(t, err, name) - //prior to v4.1.15 of pierrec/lz4 there was a + // prior to v4.1.15 of pierrec/lz4 there was a // bug that prevented writing after a Flush. // The call to Sync here exercises Flush. err = cw.Sync() diff --git a/libbeat/publisher/queue/diskqueue/config.go b/libbeat/publisher/queue/diskqueue/config.go index 56e9d35e08d2..309292e025c4 100644 --- a/libbeat/publisher/queue/diskqueue/config.go +++ b/libbeat/publisher/queue/diskqueue/config.go @@ -64,9 +64,6 @@ type Settings struct { RetryInterval time.Duration MaxRetryInterval time.Duration - // EncryptionKey is used to encrypt data if SchemaVersion 2 is used. - EncryptionKey []byte - // UseCompression enables or disables LZ4 compression UseCompression bool } diff --git a/libbeat/publisher/queue/diskqueue/docs/on-disk-structures.md b/libbeat/publisher/queue/diskqueue/docs/on-disk-structures.md index 81d83e922b1a..e51dd1b89fb6 100644 --- a/libbeat/publisher/queue/diskqueue/docs/on-disk-structures.md +++ b/libbeat/publisher/queue/diskqueue/docs/on-disk-structures.md @@ -58,21 +58,11 @@ a count of the number of frames in the segment, which is an unsigned flags, which signify options. The size of options is 32-bits in little-endian format. -If no fields are set in the options field, then un-encrypted frames -follow the header. - -If the options field has the first bit set, then encryption is -enabled. In which case, the next 128-bits are the initialization -vector and the rest of the file is encrypted frames. +If no fields are set in the options field, then uncompressed frames follow the header. If the options field has the second bit set, then compression is enabled. In which case, LZ4 compressed frames follow the header. -If both the first and second bit of the options field are set, then -both compression and encryption are enabled. The next 128-bits are -the initialization vector and the rest of the file is LZ4 compressed -frames. - If the options field has the third bit set, then Google Protobuf is used to serialize the data in the frame instead of CBOR. diff --git a/libbeat/publisher/queue/diskqueue/docs/schemaV2.pic b/libbeat/publisher/queue/diskqueue/docs/schemaV2.pic index 7cf0a4425c4c..44bbfd6ab504 100644 --- a/libbeat/publisher/queue/diskqueue/docs/schemaV2.pic +++ b/libbeat/publisher/queue/diskqueue/docs/schemaV2.pic @@ -2,5 +2,3 @@ boxht = 0.25 VERSION: box "version (uint32)" wid 4; COUNT: box "count (uint32)" wid 4 with .nw at VERSION.sw; OPTIONS: box "options (uint32)" wid 4 with .nw at COUNT.sw; -IV: box "initialization vector (128 bits)" wid 4 ht 1 with .nw at OPTIONS.sw -FRAME: box "Encrypted Frames" dashed wid 4 ht 2 with .nw at IV.sw; \ No newline at end of file diff --git a/libbeat/publisher/queue/diskqueue/docs/schemaV2.svg b/libbeat/publisher/queue/diskqueue/docs/schemaV2.svg index 76f5a51feccf..85928aba47e6 100644 --- a/libbeat/publisher/queue/diskqueue/docs/schemaV2.svg +++ b/libbeat/publisher/queue/diskqueue/docs/schemaV2.svg @@ -1,13 +1,9 @@ - - -version (uint32) - -count (uint32) - -options (uint32) - -initialization vector (128 bits) - -Encrypted Frames + + +version (uint32) + +count (uint32) + +options (uint32) diff --git a/libbeat/publisher/queue/diskqueue/enc_compress_test.go b/libbeat/publisher/queue/diskqueue/enc_compress_test.go deleted file mode 100644 index 637765c24cb5..000000000000 --- a/libbeat/publisher/queue/diskqueue/enc_compress_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package diskqueue - -import ( - "bytes" - "io" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEncryptionCompressionRoundTrip(t *testing.T) { - tests := map[string]struct { - plaintext []byte - }{ - "1 rune": {plaintext: []byte("a")}, - "16 runes": {plaintext: []byte("bbbbbbbbbbbbbbbb")}, - "17 runes": {plaintext: []byte("ccccccccccccccccc")}, - "small json": {plaintext: []byte("{\"message\":\"2 123456789010 eni-1235b8ca123456789 - - - - - - - 1431280876 1431280934 - NODATA\"}")}, - "large json": {plaintext: []byte("{\"message\":\"{\\\"CacheCacheStatus\\\":\\\"hit\\\",\\\"CacheResponseBytes\\\":26888,\\\"CacheResponseStatus\\\":200,\\\"CacheTieredFill\\\":true,\\\"ClientASN\\\":1136,\\\"ClientCountry\\\":\\\"nl\\\",\\\"ClientDeviceType\\\":\\\"desktop\\\",\\\"ClientIP\\\":\\\"89.160.20.156\\\",\\\"ClientIPClass\\\":\\\"noRecord\\\",\\\"ClientRequestBytes\\\":5324,\\\"ClientRequestHost\\\":\\\"eqlplayground.io\\\",\\\"ClientRequestMethod\\\":\\\"GET\\\",\\\"ClientRequestPath\\\":\\\"/40865/bundles/plugin/securitySolution/8.0.0/securitySolution.chunk.9.js\\\",\\\"ClientRequestProtocol\\\":\\\"HTTP/1.1\\\",\\\"ClientRequestReferer\\\":\\\"https://eqlplayground.io/s/eqldemo/app/security/timelines/default?sourcerer=(default:!(.siem-signals-eqldemo))&timerange=(global:(linkTo:!(),timerange:(from:%272021-03-03T19:55:15.519Z%27,fromStr:now-24h,kind:relative,to:%272021-03-04T19:55:15.519Z%27,toStr:now)),timeline:(linkTo:!(),timerange:(from:%272020-03-04T19:55:28.684Z%27,fromStr:now-1y,kind:relative,to:%272021-03-04T19:55:28.692Z%27,toStr:now)))&timeline=(activeTab:eql,graphEventId:%27%27,id:%2769f93840-7d23-11eb-866c-79a0609409ba%27,isOpen:!t)\\\",\\\"ClientRequestURI\\\":\\\"/40865/bundles/plugin/securitySolution/8.0.0/securitySolution.chunk.9.js\\\",\\\"ClientRequestUserAgent\\\":\\\"Mozilla/5.0(WindowsNT10.0;Win64;x64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/91.0.4472.124Safari/537.36\\\",\\\"ClientSSLCipher\\\":\\\"NONE\\\",\\\"ClientSSLProtocol\\\":\\\"none\\\",\\\"ClientSrcPort\\\":0,\\\"ClientXRequestedWith\\\":\\\"\\\",\\\"EdgeColoCode\\\":\\\"33.147.138.217\\\",\\\"EdgeColoID\\\":20,\\\"EdgeEndTimestamp\\\":1625752958875000000,\\\"EdgePathingOp\\\":\\\"wl\\\",\\\"EdgePathingSrc\\\":\\\"macro\\\",\\\"EdgePathingStatus\\\":\\\"nr\\\",\\\"EdgeRateLimitAction\\\":\\\"\\\",\\\"EdgeRateLimitID\\\":0,\\\"EdgeRequestHost\\\":\\\"eqlplayground.io\\\",\\\"EdgeResponseBytes\\\":24743,\\\"EdgeResponseCompressionRatio\\\":0,\\\"EdgeResponseContentType\\\":\\\"application/javascript\\\",\\\"EdgeResponseStatus\\\":200,\\\"EdgeServerIP\\\":\\\"89.160.20.156\\\",\\\"EdgeStartTimestamp\\\":1625752958812000000,\\\"FirewallMatchesActions\\\":[],\\\"FirewallMatchesRuleIDs\\\":[],\\\"FirewallMatchesSources\\\":[],\\\"OriginIP\\\":\\\"\\\",\\\"OriginResponseBytes\\\":0,\\\"OriginResponseHTTPExpires\\\":\\\"\\\",\\\"OriginResponseHTTPLastModified\\\":\\\"\\\",\\\"OriginResponseStatus\\\":0,\\\"OriginResponseTime\\\":0,\\\"OriginSSLProtocol\\\":\\\"unknown\\\",\\\"ParentRayID\\\":\\\"66b9d9f88b5b4c4f\\\",\\\"RayID\\\":\\\"66b9d9f890ae4c4f\\\",\\\"SecurityLevel\\\":\\\"off\\\",\\\"WAFAction\\\":\\\"unknown\\\",\\\"WAFFlags\\\":\\\"0\\\",\\\"WAFMatchedVar\\\":\\\"\\\",\\\"WAFProfile\\\":\\\"unknown\\\",\\\"WAFRuleID\\\":\\\"\\\",\\\"WAFRuleMessage\\\":\\\"\\\",\\\"WorkerCPUTime\\\":0,\\\"WorkerStatus\\\":\\\"unknown\\\",\\\"WorkerSubrequest\\\":true,\\\"WorkerSubrequestCount\\\":0,\\\"ZoneID\\\":393347122}\"}")}, - } - - for name, tc := range tests { - pr, pw := io.Pipe() - key := []byte("keykeykeykeykeyk") - src := bytes.NewReader(tc.plaintext) - var dst bytes.Buffer - var tEncBuf bytes.Buffer - var tCompBuf bytes.Buffer - - go func() { - ew, err := NewEncryptionWriter(NopWriteCloseSyncer(pw), key) - assert.Nil(t, err, name) - cw := NewCompressionWriter(ew) - _, err = io.Copy(cw, src) - assert.Nil(t, err, name) - err = cw.Close() - assert.Nil(t, err, name) - }() - - ter := io.TeeReader(pr, &tEncBuf) - er, err := NewEncryptionReader(io.NopCloser(ter), key) - assert.Nil(t, err, name) - - tcr := io.TeeReader(er, &tCompBuf) - - cr := NewCompressionReader(io.NopCloser(tcr)) - - _, err = io.Copy(&dst, cr) - assert.Nil(t, err, name) - // Check round trip worked - assert.Equal(t, tc.plaintext, dst.Bytes(), name) - // Check that cipher text and plaintext don't match - assert.NotEqual(t, tc.plaintext, tEncBuf.Bytes(), name) - // Check that compressed text and plaintext don't match - assert.NotEqual(t, tc.plaintext, tCompBuf.Bytes(), name) - // Check that compressed text and ciphertext don't match - assert.NotEqual(t, tEncBuf.Bytes(), tCompBuf.Bytes(), name) - } -} diff --git a/libbeat/publisher/queue/diskqueue/encryption.go b/libbeat/publisher/queue/diskqueue/encryption.go deleted file mode 100644 index 497442017eb4..000000000000 --- a/libbeat/publisher/queue/diskqueue/encryption.go +++ /dev/null @@ -1,166 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package diskqueue - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/rand" - "fmt" - "io" -) - -const ( - // KeySize is 128-bit - KeySize = 16 -) - -// EncryptionReader allows reading from a AES-128-CTR stream -type EncryptionReader struct { - src io.ReadCloser - stream cipher.Stream - block cipher.Block - iv []byte - ciphertext []byte -} - -// NewEncryptionReader returns a new AES-128-CTR decrypter -func NewEncryptionReader(r io.ReadCloser, key []byte) (*EncryptionReader, error) { - if len(key) != KeySize { - return nil, fmt.Errorf("key must be %d bytes long", KeySize) - } - - er := &EncryptionReader{} - er.src = r - - // turn key into block & save - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - er.block = block - - // read IV from the io.ReadCloser - iv := make([]byte, aes.BlockSize) - if _, err := io.ReadFull(er.src, iv); err != nil { - return nil, err - } - er.iv = iv - - // create Stream - er.stream = cipher.NewCTR(block, iv) - - return er, nil -} - -func (er *EncryptionReader) Read(buf []byte) (int, error) { - if cap(er.ciphertext) >= len(buf) { - er.ciphertext = er.ciphertext[:len(buf)] - } else { - er.ciphertext = make([]byte, len(buf)) - } - n, err := er.src.Read(er.ciphertext) - if err != nil { - return n, err - } - er.stream.XORKeyStream(buf, er.ciphertext) - return n, nil -} - -func (er *EncryptionReader) Close() error { - return er.src.Close() -} - -// Reset Sets up stream again, assumes that caller has already set the -// src to the iv -func (er *EncryptionReader) Reset() error { - iv := make([]byte, aes.BlockSize) - if _, err := io.ReadFull(er.src, iv); err != nil { - return err - } - if !bytes.Equal(iv, er.iv) { - return fmt.Errorf("different iv, something is wrong") - } - - // create Stream - er.stream = cipher.NewCTR(er.block, iv) - return nil -} - -// EncryptionWriter allows writing to a AES-128-CTR stream -type EncryptionWriter struct { - dst WriteCloseSyncer - stream cipher.Stream - ciphertext []byte -} - -// NewEncryptionWriter returns a new AES-128-CTR stream encryptor -func NewEncryptionWriter(w WriteCloseSyncer, key []byte) (*EncryptionWriter, error) { - if len(key) != KeySize { - return nil, fmt.Errorf("key must be %d bytes long", KeySize) - } - - ew := &EncryptionWriter{} - - // turn key into block - block, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - // create random IV - iv := make([]byte, aes.BlockSize) - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return nil, err - } - - // create stream - stream := cipher.NewCTR(block, iv) - - //write IV - n, err := w.Write(iv) - if err != nil { - return nil, err - } - if n != len(iv) { - return nil, io.ErrShortWrite - } - - ew.dst = w - ew.stream = stream - return ew, nil -} - -func (ew *EncryptionWriter) Write(buf []byte) (int, error) { - if cap(ew.ciphertext) >= len(buf) { - ew.ciphertext = ew.ciphertext[:len(buf)] - } else { - ew.ciphertext = make([]byte, len(buf)) - } - ew.stream.XORKeyStream(ew.ciphertext, buf) - return ew.dst.Write(ew.ciphertext) -} - -func (ew *EncryptionWriter) Close() error { - return ew.dst.Close() -} - -func (ew *EncryptionWriter) Sync() error { - return ew.dst.Sync() -} diff --git a/libbeat/publisher/queue/diskqueue/encryption_test.go b/libbeat/publisher/queue/diskqueue/encryption_test.go deleted file mode 100644 index fb956d699b16..000000000000 --- a/libbeat/publisher/queue/diskqueue/encryption_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package diskqueue - -import ( - "bytes" - "crypto/aes" - "io" - "testing" - - "github.com/stretchr/testify/assert" -) - -func NopWriteCloseSyncer(w io.WriteCloser) WriteCloseSyncer { - return nopWriteCloseSyncer{w} -} - -type nopWriteCloseSyncer struct { - io.WriteCloser -} - -func (nopWriteCloseSyncer) Sync() error { return nil } - -func TestEncryptionRoundTrip(t *testing.T) { - tests := map[string]struct { - plaintext []byte - }{ - "8 bits": {plaintext: []byte("a")}, - "128 bits": {plaintext: []byte("bbbbbbbbbbbbbbbb")}, - "136 bits": {plaintext: []byte("ccccccccccccccccc")}, - } - for name, tc := range tests { - pr, pw := io.Pipe() - src := bytes.NewReader(tc.plaintext) - var dst bytes.Buffer - var teeBuf bytes.Buffer - key := []byte("kkkkkkkkkkkkkkkk") - - go func() { - //NewEncryptionWriter writes iv, so needs to be in go routine - ew, err := NewEncryptionWriter(NopWriteCloseSyncer(pw), key) - assert.Nil(t, err, name) - _, err = io.Copy(ew, src) - assert.Nil(t, err, name) - ew.Close() - }() - - tr := io.TeeReader(pr, &teeBuf) - er, err := NewEncryptionReader(io.NopCloser(tr), key) - assert.Nil(t, err, name) - _, err = io.Copy(&dst, er) - assert.Nil(t, err, name) - // Check round trip worked - assert.Equal(t, tc.plaintext, dst.Bytes(), name) - // Check that iv & cipher text were written - assert.Equal(t, len(tc.plaintext)+aes.BlockSize, teeBuf.Len(), name) - // Check that cipher text and plaintext don't match - assert.NotEqual(t, tc.plaintext, teeBuf.Bytes()[aes.BlockSize:], name) - } -} diff --git a/libbeat/publisher/queue/diskqueue/segments.go b/libbeat/publisher/queue/diskqueue/segments.go index 7e3661f6e5b4..11eeb9991c6a 100644 --- a/libbeat/publisher/queue/diskqueue/segments.go +++ b/libbeat/publisher/queue/diskqueue/segments.go @@ -33,7 +33,6 @@ import ( // diskQueueSegments encapsulates segment-related queue metadata. type diskQueueSegments struct { - // A list of the segments that have not yet been completely written, sorted // by increasing segment ID. When the first entry has been completely // written, it is removed from this list and appended to reading. @@ -132,7 +131,7 @@ type segmentHeader struct { // Only present in schema version >= 1. frameCount uint32 - // options holds flags to enable features, for example encryption. + // options holds flags to enable features, for example compression. options uint32 } @@ -151,7 +150,7 @@ const currentSegmentVersion = 2 const segmentHeaderSize = 12 const ( - ENABLE_ENCRYPTION uint32 = 1 << iota // 0x1 + _ uint32 = 1 << iota // 0x1 ENABLE_COMPRESSION // 0x2 ENABLE_PROTOBUF // 0x4 ) @@ -256,28 +255,14 @@ func (segment *queueSegment) getReader(queueSettings Settings) (*segmentReader, sr.serializationFormat = SerializationCBOR } - if (header.options & ENABLE_ENCRYPTION) == ENABLE_ENCRYPTION { - sr.er, err = NewEncryptionReader(sr.src, queueSettings.EncryptionKey) - if err != nil { - sr.src.Close() - return nil, fmt.Errorf("couldn't create encryption reader: %w", err) - } - } if (header.options & ENABLE_COMPRESSION) == ENABLE_COMPRESSION { - if sr.er != nil { - sr.cr = NewCompressionReader(sr.er) - } else { - sr.cr = NewCompressionReader(sr.src) - } + sr.cr = NewCompressionReader(sr.src) } return sr, nil } -// getWriter sets up the segmentWriter. The order of encryption and -// compression is important. If both options are enabled we want -// encrypted compressed data not compressed encrypted data. This is -// because encryption will mask the repetions in the data making -// compression much less effective. getWriter should only be called +// getWriter sets up the segmentWriter. +// getWriter should only be called. // from the writer loop. func (segment *queueSegment) getWriter(queueSettings Settings) (*segmentWriter, error) { var options uint32 @@ -287,10 +272,6 @@ func (segment *queueSegment) getWriter(queueSettings Settings) (*segmentWriter, return nil, err } - if len(queueSettings.EncryptionKey) > 0 { - options = options | ENABLE_ENCRYPTION - } - if queueSettings.UseCompression { options = options | ENABLE_COMPRESSION } @@ -302,20 +283,8 @@ func (segment *queueSegment) getWriter(queueSettings Settings) (*segmentWriter, return nil, err } - if (options & ENABLE_ENCRYPTION) == ENABLE_ENCRYPTION { - sw.ew, err = NewEncryptionWriter(sw.dst, queueSettings.EncryptionKey) - if err != nil { - sw.dst.Close() - return nil, fmt.Errorf("couldn't create encryption writer: %w", err) - } - } - if (options & ENABLE_COMPRESSION) == ENABLE_COMPRESSION { - if sw.ew != nil { - sw.cw = NewCompressionWriter(sw.ew) - } else { - sw.cw = NewCompressionWriter(sw.dst) - } + sw.cw = NewCompressionWriter(sw.dst) } return sw, nil @@ -474,7 +443,6 @@ func (segments *diskQueueSegments) sizeOnDisk() uint64 { // less compressable. type segmentReader struct { src io.ReadSeekCloser - er *EncryptionReader cr *CompressionReader serializationFormat SerializationFormat } @@ -483,9 +451,6 @@ func (r *segmentReader) Read(p []byte) (int, error) { if r.cr != nil { return r.cr.Read(p) } - if r.er != nil { - return r.er.Read(p) - } return r.src.Read(p) } @@ -493,9 +458,6 @@ func (r *segmentReader) Close() error { if r.cr != nil { return r.cr.Close() } - if r.er != nil { - return r.er.Close() - } return r.src.Close() } @@ -508,31 +470,12 @@ func (r *segmentReader) Seek(offset int64, whence int) (int64, error) { if _, err := r.src.Seek(segmentHeaderSize, io.SeekStart); err != nil { return 0, fmt.Errorf("could not seek past segment header: %w", err) } - if r.er != nil { - if err := r.er.Reset(); err != nil { - return 0, fmt.Errorf("could not reset encryption: %w", err) - } - } if err := r.cr.Reset(); err != nil { return 0, fmt.Errorf("could not reset compression: %w", err) } written, err := io.CopyN(io.Discard, r.cr, (offset+int64(whence))-segmentHeaderSize) return written + segmentHeaderSize, err } - if r.er != nil { - //can't seek before segment header - if (offset + int64(whence)) < segmentHeaderSize { - return 0, fmt.Errorf("illegal seek offset %d, whence %d", offset, whence) - } - if _, err := r.src.Seek(segmentHeaderSize, io.SeekStart); err != nil { - return 0, fmt.Errorf("could not seek past segment header: %w", err) - } - if err := r.er.Reset(); err != nil { - return 0, fmt.Errorf("could not reset encryption: %w", err) - } - written, err := io.CopyN(io.Discard, r.er, (offset+int64(whence))-segmentHeaderSize) - return written + segmentHeaderSize, err - } return r.src.Seek(offset, whence) } @@ -545,7 +488,6 @@ func (r *segmentReader) Seek(offset int64, whence int) (int64, error) { // data less compressable. type segmentWriter struct { dst *os.File - ew *EncryptionWriter cw *CompressionWriter } @@ -553,9 +495,6 @@ func (w *segmentWriter) Write(p []byte) (int, error) { if w.cw != nil { return w.cw.Write(p) } - if w.ew != nil { - return w.ew.Write(p) - } return w.dst.Write(p) } @@ -563,9 +502,6 @@ func (w *segmentWriter) Close() error { if w.cw != nil { return w.cw.Close() } - if w.ew != nil { - return w.ew.Close() - } return w.dst.Close() } @@ -573,9 +509,6 @@ func (w *segmentWriter) Sync() error { if w.cw != nil { return w.cw.Sync() } - if w.ew != nil { - return w.ew.Sync() - } return w.dst.Sync() } diff --git a/libbeat/publisher/queue/diskqueue/segments_test.go b/libbeat/publisher/queue/diskqueue/segments_test.go index b80bba22e895..44721debeefd 100644 --- a/libbeat/publisher/queue/diskqueue/segments_test.go +++ b/libbeat/publisher/queue/diskqueue/segments_test.go @@ -27,43 +27,25 @@ import ( func TestSegmentsRoundTrip(t *testing.T) { tests := map[string]struct { id segmentID - encrypt bool compress bool plaintext []byte }{ - "No Encryption or Compression": { + "No Compression": { id: 0, - encrypt: false, compress: false, plaintext: []byte("no encryption or compression"), }, - "Encryption Only": { - id: 1, - encrypt: true, - compress: false, - plaintext: []byte("encryption only"), - }, - "Compression Only": { + "With Compression": { id: 2, - encrypt: false, compress: true, plaintext: []byte("compression only"), }, - "Encryption and Compression": { - id: 3, - encrypt: true, - compress: true, - plaintext: []byte("encryption and compression"), - }, } dir := t.TempDir() for name, tc := range tests { dst := make([]byte, len(tc.plaintext)) settings := DefaultSettings() settings.Path = dir - if tc.encrypt { - settings.EncryptionKey = []byte("keykeykeykeykeyk") - } settings.UseCompression = tc.compress qs := &queueSegment{ id: tc.id, @@ -86,7 +68,7 @@ func TestSegmentsRoundTrip(t *testing.T) { assert.Equal(t, len(dst), n, name) - //make sure we read back what we wrote + // make sure we read back what we wrote assert.Equal(t, tc.plaintext, dst, name) _, err = sr.Read(dst) @@ -101,31 +83,16 @@ func TestSegmentsRoundTrip(t *testing.T) { func TestSegmentReaderSeek(t *testing.T) { tests := map[string]struct { id segmentID - encrypt bool compress bool plaintexts [][]byte }{ - "No Encryption or compression": { + "No Compression": { id: 0, - encrypt: false, - compress: false, - plaintexts: [][]byte{[]byte("abc"), []byte("defg")}, - }, - "Encryption Only": { - id: 1, - encrypt: true, compress: false, plaintexts: [][]byte{[]byte("abc"), []byte("defg")}, }, - "Compression Only": { + "With Compression": { id: 2, - encrypt: false, - compress: true, - plaintexts: [][]byte{[]byte("abc"), []byte("defg")}, - }, - "Encryption and Compression": { - id: 3, - encrypt: true, compress: true, plaintexts: [][]byte{[]byte("abc"), []byte("defg")}, }, @@ -134,9 +101,6 @@ func TestSegmentReaderSeek(t *testing.T) { for name, tc := range tests { settings := DefaultSettings() settings.Path = dir - if tc.encrypt { - settings.EncryptionKey = []byte("keykeykeykeykeyk") - } settings.UseCompression = tc.compress qs := &queueSegment{ @@ -154,7 +118,7 @@ func TestSegmentReaderSeek(t *testing.T) { sw.Close() sr, err := qs.getReader(settings) assert.Nil(t, err, name) - //seek to second data piece + // seek to second data piece n, err := sr.Seek(segmentHeaderSize+int64(len(tc.plaintexts[0])), io.SeekStart) assert.Nil(t, err, name) assert.Equal(t, segmentHeaderSize+int64(len(tc.plaintexts[0])), n, name) @@ -171,35 +135,18 @@ func TestSegmentReaderSeek(t *testing.T) { func TestSegmentReaderSeekLocations(t *testing.T) { tests := map[string]struct { id segmentID - encrypt bool compress bool plaintexts [][]byte location int64 }{ - "No Encryption or Compression": { + "No Compression": { id: 0, - encrypt: false, compress: false, plaintexts: [][]byte{[]byte("abc"), []byte("defg")}, location: -1, }, - "Encryption": { - id: 1, - encrypt: true, - compress: false, - plaintexts: [][]byte{[]byte("abc"), []byte("defg")}, - location: 2, - }, "Compression": { id: 1, - encrypt: false, - compress: true, - plaintexts: [][]byte{[]byte("abc"), []byte("defg")}, - location: 2, - }, - "Encryption and Compression": { - id: 1, - encrypt: true, compress: true, plaintexts: [][]byte{[]byte("abc"), []byte("defg")}, location: 2, @@ -209,9 +156,6 @@ func TestSegmentReaderSeekLocations(t *testing.T) { for name, tc := range tests { settings := DefaultSettings() settings.Path = dir - if tc.encrypt { - settings.EncryptionKey = []byte("keykeykeykeykeyk") - } settings.UseCompression = tc.compress qs := &queueSegment{ id: tc.id, @@ -226,7 +170,7 @@ func TestSegmentReaderSeekLocations(t *testing.T) { sw.Close() sr, err := qs.getReader(settings) assert.Nil(t, err, name) - //seek to location + // seek to location _, err = sr.Seek(tc.location, io.SeekStart) assert.NotNil(t, err, name) } From 5197c4320a262ac850fa8bdb9718c2ba5e54027d Mon Sep 17 00:00:00 2001 From: "Alex K." <8418476+fearful-symmetry@users.noreply.github.com> Date: Tue, 25 Feb 2025 10:48:56 -0800 Subject: [PATCH 24/41] Add metrics for kernel_tracing provider, fix mutex issue (#42795) * add metrics for kernel_tracing provider, fix mutex issue * fix metrics setup, add tests * still tinkering with monitoring * add changelog --- CHANGELOG.next.asciidoc | 1 + .../sessionmd/add_session_metadata.go | 46 ++++++++++++++----- .../sessionmd/add_session_metadata_test.go | 22 +++++++++ .../kerneltracingprovider_linux.go | 32 +++++++++---- .../provider/kerneltracingprovider/metrics.go | 31 +++++++++++++ 5 files changed, 112 insertions(+), 20 deletions(-) create mode 100644 x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/metrics.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 6da9b2653ee7..12976ffcd40c 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -149,6 +149,7 @@ otherwise no tag is added. {issue}42208[42208] {pull}42403[42403] - hasher: Geneneral improvements and fixes. {pull}41863[41863] - hasher: Add a cached hasher for upcoming backend. {pull}41952[41952] - Split common tty definitions. {pull}42004[42004] +- Fix potential data loss in add_session_metadata. {pull}42795[42795] *Filebeat* diff --git a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go index ed6701e18064..6a5d115a5463 100644 --- a/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go +++ b/x-pack/auditbeat/processors/sessionmd/add_session_metadata.go @@ -32,6 +32,9 @@ const ( logName = "processor." + processorName procfsType = "procfs" kernelTracingType = "kernel_tracing" + + regNameProcessDB = "processor.add_session_metadata.processdb" + regNameKernelTracing = "processor.add_session_metadata.kernel_tracing" ) // InitializeModule initializes this module. @@ -53,6 +56,31 @@ type addSessionMetadata struct { providerType string } +func genRegistry(reg *monitoring.Registry, base string) *monitoring.Registry { + // if more than one instance of the DB is running, start to increment the metrics keys. + // This is kind of an edge case, but best to handle it so monitoring does not explode. + // This seems like awkward code, but NewRegistry() loves to panic, so we need to be careful. + id := 0 + if reg.GetRegistry(base) != nil { + current := int(instanceID.Load()) + // because we call genRegistry() multiple times, make sure the registry doesn't exist before we iterate the counter + if current > 0 && reg.GetRegistry(fmt.Sprintf("%s.%d", base, current)) == nil { + id = current + } else { + id = int(instanceID.Add(1)) + } + + } + + regName := base + if id > 0 { + regName = fmt.Sprintf("%s.%d", base, id) + } + + metricsReg := reg.NewRegistry(regName) + return metricsReg +} + func New(cfg *cfg.C) (beat.Processor, error) { c := defaultConfig() if err := cfg.Unpack(&c); err != nil { @@ -60,18 +88,10 @@ func New(cfg *cfg.C) (beat.Processor, error) { } logger := logp.NewLogger(logName) - - id := int(instanceID.Add(1)) - regName := "processor.add_session_metadata.processdb" - // if more than one instance of the DB is running, start to increment the metrics keys. - if id > 1 { - regName = fmt.Sprintf("%s.%d", regName, id) - } - metricsReg := monitoring.Default.NewRegistry(regName) - + procDBReg := genRegistry(monitoring.Default, regNameProcessDB) ctx, cancel := context.WithCancel(context.Background()) reader := procfs.NewProcfsReader(*logger) - db, err := processdb.NewDB(ctx, metricsReg, reader, logger, c.DBReaperPeriod, c.ReapProcesses) + db, err := processdb.NewDB(ctx, procDBReg, reader, logger, c.DBReaperPeriod, c.ReapProcesses) if err != nil { cancel() return nil, fmt.Errorf("failed to create DB: %w", err) @@ -82,7 +102,8 @@ func New(cfg *cfg.C) (beat.Processor, error) { switch c.Backend { case "auto": - p, err = kerneltracingprovider.NewProvider(ctx, logger) + procDBReg := genRegistry(monitoring.Default, regNameKernelTracing) + p, err = kerneltracingprovider.NewProvider(ctx, logger, procDBReg) if err != nil { // Most likely cause of error is not supporting ebpf or kprobes on system, try procfs backfilledPIDs := db.ScrapeProcfs() @@ -108,7 +129,8 @@ func New(cfg *cfg.C) (beat.Processor, error) { } pType = procfsType case "kernel_tracing": - p, err = kerneltracingprovider.NewProvider(ctx, logger) + procDBReg := genRegistry(monitoring.Default, regNameKernelTracing) + p, err = kerneltracingprovider.NewProvider(ctx, logger, procDBReg) if err != nil { cancel() return nil, fmt.Errorf("failed to create kernel_tracing provider: %w", err) diff --git a/x-pack/auditbeat/processors/sessionmd/add_session_metadata_test.go b/x-pack/auditbeat/processors/sessionmd/add_session_metadata_test.go index 422af4c935c2..d0fa6fad0665 100644 --- a/x-pack/auditbeat/processors/sessionmd/add_session_metadata_test.go +++ b/x-pack/auditbeat/processors/sessionmd/add_session_metadata_test.go @@ -8,6 +8,7 @@ package sessionmd import ( "context" + "fmt" "testing" "time" @@ -339,6 +340,27 @@ var ( logger = logp.NewLogger("add_session_metadata_test") ) +func TestMetricsSetup(t *testing.T) { + // init a metrics registry multiple times with the same name, ensure we don't panic, and the names are correct + reg := monitoring.NewRegistry() + firstName := "test.metrics" + secondName := "other.stuff" + genRegistry(reg, firstName) + require.NotNil(t, reg.Get(firstName)) + + genRegistry(reg, firstName) + require.NotNil(t, reg.Get(fmt.Sprintf("%s.1", firstName))) + + genRegistry(reg, secondName) + require.NotNil(t, reg.Get(secondName)) + require.Nil(t, reg.Get(fmt.Sprintf("%s.1", secondName))) + + genRegistry(reg, secondName) + require.NotNil(t, reg.Get(secondName)) + require.NotNil(t, reg.Get(fmt.Sprintf("%s.1", secondName))) + require.Nil(t, reg.Get(fmt.Sprintf("%s.2", secondName))) +} + func TestEnrich(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15) defer cancel() diff --git a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go index e57c5d693557..c94e6bb1dec9 100644 --- a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go +++ b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/kerneltracingprovider_linux.go @@ -25,6 +25,7 @@ import ( "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/provider" "github.com/elastic/beats/v7/x-pack/auditbeat/processors/sessionmd/types" "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/monitoring" ) type prvdr struct { @@ -82,7 +83,7 @@ func readPIDNsInode() (uint64, error) { } // NewProvider returns a new instance of kerneltracingprovider -func NewProvider(ctx context.Context, logger *logp.Logger) (provider.Provider, error) { +func NewProvider(ctx context.Context, logger *logp.Logger, reg *monitoring.Registry) (provider.Provider, error) { attr := quark.DefaultQueueAttr() attr.Flags = quark.QQ_ALL_BACKENDS | quark.QQ_ENTRY_LEADER | quark.QQ_NO_SNAPSHOT qq, err := quark.OpenQueue(attr, 64) @@ -90,6 +91,8 @@ func NewProvider(ctx context.Context, logger *logp.Logger) (provider.Provider, e return nil, fmt.Errorf("open queue: %w", err) } + procMetrics := NewStats(reg) + p := &prvdr{ ctx: ctx, logger: logger, @@ -102,7 +105,10 @@ func NewProvider(ctx context.Context, logger *logp.Logger) (provider.Provider, e backoffSkipped: 0, } - go func(ctx context.Context, qq *quark.Queue, logger *logp.Logger, p *prvdr) { + go func(ctx context.Context, qq *quark.Queue, logger *logp.Logger, p *prvdr, stats *Stats) { + + lastUpdate := time.Now() + defer qq.Close() for ctx.Err() == nil { p.qqMtx.Lock() @@ -112,6 +118,19 @@ func NewProvider(ctx context.Context, logger *logp.Logger) (provider.Provider, e logger.Errorw("get events from quark, no more process enrichment from this processor will be done", "error", err) break } + if time.Since(lastUpdate) > time.Second*5 { + p.qqMtx.Lock() + metrics := qq.Stats() + p.qqMtx.Unlock() + + stats.Aggregations.Set(metrics.Aggregations) + stats.Insertions.Set(metrics.Insertions) + stats.Lost.Set(metrics.Lost) + stats.NonAggregations.Set(metrics.NonAggregations) + stats.Removals.Set(metrics.Removals) + lastUpdate = time.Now() + } + if len(events) == 0 { err = qq.Block() if err != nil { @@ -120,7 +139,7 @@ func NewProvider(ctx context.Context, logger *logp.Logger) (provider.Provider, e } } } - }(ctx, qq, logger, p) + }(ctx, qq, logger, p, procMetrics) bootID, err = readBootID() if err != nil { @@ -150,11 +169,8 @@ const ( // does not exceed a reasonable threshold that would delay all other events processed by auditbeat. When in the backoff state, enrichment // will proceed without waiting for the process data to exist in the cache, likely resulting in missing enrichment data. func (p *prvdr) Sync(_ *beat.Event, pid uint32) error { - p.qqMtx.Lock() - defer p.qqMtx.Unlock() - // If pid is already in qq, return immediately - if _, found := p.qq.Lookup(int(pid)); found { + if _, found := p.lookupLocked(pid); found { return nil } @@ -169,7 +185,7 @@ func (p *prvdr) Sync(_ *beat.Event, pid uint32) error { nextWait := 5 * time.Millisecond for { waited := time.Since(start) - if _, found := p.qq.Lookup(int(pid)); found { + if _, found := p.lookupLocked(pid); found { p.logger.Debugw("got process that was missing ", "waited", waited) p.combinedWait = p.combinedWait + waited return nil diff --git a/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/metrics.go b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/metrics.go new file mode 100644 index 000000000000..3f7378306c26 --- /dev/null +++ b/x-pack/auditbeat/processors/sessionmd/provider/kerneltracingprovider/metrics.go @@ -0,0 +1,31 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build linux && (amd64 || arm64) && cgo + +package kerneltracingprovider + +import ( + "github.com/elastic/elastic-agent-libs/monitoring" +) + +// / Stats tracks the quark internal stats, which are integrated into the beats monitoring runtime +type Stats struct { + Insertions *monitoring.Uint + Removals *monitoring.Uint + Aggregations *monitoring.Uint + NonAggregations *monitoring.Uint + Lost *monitoring.Uint +} + +// / NewStats creates a new stats object +func NewStats(reg *monitoring.Registry) *Stats { + return &Stats{ + Insertions: monitoring.NewUint(reg, "insertions"), + Removals: monitoring.NewUint(reg, "removals"), + Aggregations: monitoring.NewUint(reg, "aggregations"), + NonAggregations: monitoring.NewUint(reg, "nonaggregations"), + Lost: monitoring.NewUint(reg, "lost"), + } +} From b14b1e026d94ab6b5bdc2dedc256d09fa53b5f0b Mon Sep 17 00:00:00 2001 From: Mauri de Souza Meneguzzo Date: Wed, 26 Feb 2025 06:54:11 -0300 Subject: [PATCH 25/41] fix(tests): ensure TestFilebeatOTelE2E does not fail if timestamps are equal (#42895) --- x-pack/filebeat/tests/integration/otel_test.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/x-pack/filebeat/tests/integration/otel_test.go b/x-pack/filebeat/tests/integration/otel_test.go index 7ae0bfff7d52..7a147add0dbe 100644 --- a/x-pack/filebeat/tests/integration/otel_test.go +++ b/x-pack/filebeat/tests/integration/otel_test.go @@ -165,15 +165,6 @@ func assertMapsEqual(t *testing.T, m1, m2 mapstr.M, ignoredFields []string, msg assert.Failf(t, msg, "ignored field %q does not exist in either map, please remove it from the ignored fields", f) } - // If the ignored field exists and is equal in both maps then it shouldn't be ignored - if hasKeyM1 && hasKeyM2 { - valM1, _ := flatM1.GetValue(f) - valM2, _ := flatM2.GetValue(f) - if valM1 == valM2 { - assert.Failf(t, msg, "ignored field %q is equal in both maps, please remove it from the ignored fields", f) - } - } - flatM1.Delete(f) flatM2.Delete(f) } From a4bc9f09ada841247be459710bdc814f9c69fc6d Mon Sep 17 00:00:00 2001 From: Mauri de Souza Meneguzzo Date: Wed, 26 Feb 2025 08:01:52 -0300 Subject: [PATCH 26/41] fix(tests): relax TestGroup_Go timeouts (#42862) --- .../filestream/internal/task/group_test.go | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/filebeat/input/filestream/internal/task/group_test.go b/filebeat/input/filestream/internal/task/group_test.go index 6ba0ac2cf1db..159bcfb5b13e 100644 --- a/filebeat/input/filestream/internal/task/group_test.go +++ b/filebeat/input/filestream/internal/task/group_test.go @@ -21,7 +21,7 @@ import ( "context" "errors" "fmt" - "math/rand" + "math/rand/v2" "strings" "sync" "sync/atomic" @@ -95,7 +95,7 @@ func TestGroup_Go(t *testing.T) { assert.Eventually(t, func() bool { return want == runningCount.Load() }, - time.Second, 100*time.Millisecond) + 1*time.Second, 10*time.Millisecond) }) t.Run("workloads wait for available worker", func(t *testing.T) { @@ -158,7 +158,7 @@ func TestGroup_Go(t *testing.T) { // Wait to ensure f1 and f2 are running, thus there is no workers free. assert.Eventually(t, func() bool { return int64(2) == runningCount.Load() }, - 100*time.Millisecond, time.Millisecond) + 1*time.Second, 10*time.Millisecond) err = g.Go(f3) require.NoError(t, err) @@ -170,7 +170,7 @@ func TestGroup_Go(t *testing.T) { func() bool { return f3Started.Load() }, - 100*time.Millisecond, time.Millisecond) + 1*time.Second, 10*time.Millisecond) // If f3 started, f2 must have finished assert.True(t, f2Finished.Load()) @@ -186,8 +186,8 @@ func TestGroup_Go(t *testing.T) { assert.Eventually(t, func() bool { return doneCount.Load() == 3 }, - 50*time.Millisecond, - time.Millisecond, + 1*time.Second, + 10*time.Millisecond, "not all goroutines finished") }) @@ -202,14 +202,13 @@ func TestGroup_Go(t *testing.T) { t.Run("without limit, all goroutines run", func(t *testing.T) { // 100 <= limit <= 10000 - limit := rand.Int63n(10000-100) + 100 + limit := rand.IntN(10000-100) + 100 t.Logf("running %d goroutines", limit) g := NewGroup(uint64(limit), time.Second, noopLogger{}, "") done := make(chan struct{}) var runningCounter atomic.Int64 - var i int64 - for i = 0; i < limit; i++ { + for i := 0; i < limit; i++ { err := g.Go(func(context.Context) error { runningCounter.Add(1) defer runningCounter.Add(-1) @@ -221,9 +220,9 @@ func TestGroup_Go(t *testing.T) { } assert.Eventually(t, - func() bool { return limit == runningCounter.Load() }, - 100*time.Millisecond, - time.Millisecond) + func() bool { return int64(limit) == runningCounter.Load() }, + 1*time.Second, + 10*time.Millisecond) close(done) err := g.Stop() @@ -253,7 +252,7 @@ func TestGroup_Go(t *testing.T) { assert.Eventually(t, func() bool { return count.Load() == want && logger.String() != "" - }, 100*time.Millisecond, time.Millisecond) + }, 1*time.Second, 10*time.Millisecond) err = g.Stop() require.NoError(t, err) @@ -286,7 +285,7 @@ func TestGroup_Go(t *testing.T) { assert.Eventually(t, func() bool { return count.Load() == want && logger.String() != "" - }, 100*time.Millisecond, time.Millisecond, "not all workloads finished") + }, 1*time.Second, 10*time.Millisecond, "not all workloads finished") assert.Contains(t, logger.String(), wantErr.Error()) From bd538ffb75907bca79066ec66a760dd8f638e3b2 Mon Sep 17 00:00:00 2001 From: Valentin Crettaz Date: Wed, 26 Feb 2025 13:33:13 +0100 Subject: [PATCH 27/41] Update Stack Monitoring data stream to 9 (#42823) * Update Stack Monitoring data stream to 9 * Fix linter issues * Append instead of set by index * Use goimports * Remove empty line Co-authored-by: Anderson Queiroz * Remove empty line Co-authored-by: Anderson Queiroz * Fix logged message Co-authored-by: Anderson Queiroz --------- Co-authored-by: Chris Earle Co-authored-by: Anderson Queiroz --- metricbeat/helper/elastic/elastic.go | 24 ++++++++++++++++------- metricbeat/helper/elastic/elastic_test.go | 10 +++++----- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/metricbeat/helper/elastic/elastic.go b/metricbeat/helper/elastic/elastic.go index 25fa5835434f..a952497d9f83 100644 --- a/metricbeat/helper/elastic/elastic.go +++ b/metricbeat/helper/elastic/elastic.go @@ -18,8 +18,11 @@ package elastic import ( + "errors" "fmt" - "strings" + + "golang.org/x/text/cases" + "golang.org/x/text/language" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/mapstr" @@ -84,7 +87,7 @@ func (p Product) String() string { // MakeXPackMonitoringIndexName method returns the name of the monitoring index for // a given product { elasticsearch, kibana, logstash, beats } func MakeXPackMonitoringIndexName(product Product) string { - const version = "8" + const version = "9" return fmt.Sprintf(".monitoring-%v-%v-mb", product.xPackMonitoringIndexString(), version) } @@ -100,7 +103,7 @@ func ReportErrorForMissingField(field string, product Product, r mb.ReporterV2) // MakeErrorForMissingField returns an error message for the given field being missing in an API // response received from a given product func MakeErrorForMissingField(field string, product Product) error { - return fmt.Errorf("Could not find field '%v' in %v API response", field, strings.Title(product.String())) + return fmt.Errorf("could not find field '%v' in %v API response", field, cases.Title(language.English).String(product.String())) } // IsFeatureAvailable returns whether a feature is available in the current product version @@ -120,7 +123,7 @@ func ReportAndLogError(err error, r mb.ReporterV2, l *logp.Logger) { // for it's date fields: https://github.com/elastic/elasticsearch/pull/36691 func FixTimestampField(m mapstr.M, field string) error { v, err := m.GetValue(field) - if err == mapstr.ErrKeyNotFound { + if errors.Is(err, mapstr.ErrKeyNotFound) { return nil } if err != nil { @@ -161,10 +164,17 @@ func NewModule(base *mb.BaseModule, xpackEnabledMetricsets []string, optionalXpa metricsets := xpackEnabledMetricsets if err == nil && cfgdMetricsets != nil { // Type cast the metricsets to a slice of strings - cfgdMetricsetsSlice := cfgdMetricsets.([]interface{}) - cfgdMetricsetsStrings := make([]string, len(cfgdMetricsetsSlice)) + cfgdMetricsetsSlice, ok := cfgdMetricsets.([]interface{}) + if !ok { + return nil, fmt.Errorf("configured metricsets are not an slice for module %s: %v", moduleName, cfgdMetricsets) + } + + cfgdMetricsetsStrings := make([]string, 0, len(cfgdMetricsetsSlice)) for i := range cfgdMetricsetsSlice { - cfgdMetricsetsStrings[i] = cfgdMetricsetsSlice[i].(string) + asString, ok := cfgdMetricsetsSlice[i].(string) + if ok { + cfgdMetricsetsStrings = append(cfgdMetricsetsStrings, asString) + } } // Add any optional metricsets which are not already configured diff --git a/metricbeat/helper/elastic/elastic_test.go b/metricbeat/helper/elastic/elastic_test.go index be5529928029..e6939cd42583 100644 --- a/metricbeat/helper/elastic/elastic_test.go +++ b/metricbeat/helper/elastic/elastic_test.go @@ -38,22 +38,22 @@ func TestMakeXPackMonitoringIndexName(t *testing.T) { { "Elasticsearch monitoring index", Elasticsearch, - ".monitoring-es-8-mb", + ".monitoring-es-9-mb", }, { "Kibana monitoring index", Kibana, - ".monitoring-kibana-8-mb", + ".monitoring-kibana-9-mb", }, { "Logstash monitoring index", Logstash, - ".monitoring-logstash-8-mb", + ".monitoring-logstash-9-mb", }, { "Beats monitoring index", Beats, - ".monitoring-beats-8-mb", + ".monitoring-beats-9-mb", }, } @@ -86,7 +86,7 @@ func TestReportErrorForMissingField(t *testing.T) { r := MockReporterV2{} err := ReportErrorForMissingField(field, Elasticsearch, r) - expectedError := fmt.Errorf("Could not find field '%v' in Elasticsearch API response", field) + expectedError := fmt.Errorf("could not find field '%v' in Elasticsearch API response", field) assert.Equal(t, expectedError, err) assert.Equal(t, expectedError, currentErr) } From 947233a892ce62f135a03d78c0f79983bb93bcaf Mon Sep 17 00:00:00 2001 From: Valentin Crettaz Date: Wed, 26 Feb 2025 17:33:23 +0100 Subject: [PATCH 28/41] =?UTF-8?q?[Stack=20Monitoring]=C2=A0Only=20fetch=20?= =?UTF-8?q?cluster-level=20index=20stats=20summary=20(#42901)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Only fetch cluster-level index stats summary * Only fetch cluster-level index stats summary (fix tests) --- CHANGELOG.next.asciidoc | 1 + metricbeat/module/elasticsearch/_meta/README.md | 6 +++--- .../module/elasticsearch/index_summary/index_summary.go | 4 +++- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 12976ffcd40c..e8235a24a645 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -268,6 +268,7 @@ otherwise no tag is added. {issue}42208[42208] {pull}42403[42403] - Continue collecting metrics even if the Cisco Meraki `getDeviceLicenses` operation fails. {pull}42397[42397] - Fixed errors in the `elasticsearch.index` metricset when index settings are missing. {issue}42424[42424] {pull}42426[42426] - Fixed panic caused by uninitialized meraki device wifi0 and wifi1 struct pointers in the device WiFi data fetching. {issue}42745[42745] {pull}42746[42746] +- Only fetch cluster-level index stats summary {issue}36019[36019] {pull}42901[42901] *Osquerybeat* diff --git a/metricbeat/module/elasticsearch/_meta/README.md b/metricbeat/module/elasticsearch/_meta/README.md index 4e0381e7e9ee..f759caa41d58 100644 --- a/metricbeat/module/elasticsearch/_meta/README.md +++ b/metricbeat/module/elasticsearch/_meta/README.md @@ -33,7 +33,7 @@ Metricbeat will call the following Elasticsearch API endpoints corresponding to - [mb exported fields](https://www.elastic.co/guide/en/beats/metricbeat/current/exported-fields-elasticsearch.html#_index_recovery) ### index_summary -- `/_stats` +- `/_stats?level=cluster` - [api reference](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) - [mb exported fields](https://www.elastic.co/guide/en/beats/metricbeat/current/exported-fields-elasticsearch.html#_index_summary) @@ -48,7 +48,7 @@ Metricbeat will call the following Elasticsearch API endpoints corresponding to - [mb exported fields](https://www.elastic.co/guide/en/beats/metricbeat/current/exported-fields-elasticsearch.html#_node_2) ### node_stats -- `/_nodes/{_local|_all}/stats` +- `/_nodes/{_local|_all}/stats/jvm,indices,fs,os,process,transport,thread_pool,indexing_pressure,ingest/bulk,docs,get,merge,translog,fielddata,indexing,query_cache,request_cache,search,shard_stats,store,segments,refresh,flush` - `_local` | `_all` from [`scope`](https://www.elastic.co/guide/en/elasticsearch/reference/current/configuring-metricbeat.html#CO490-2) setting - [api reference](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-info.html) - [mb exported fields](https://www.elastic.co/guide/en/beats/metricbeat/current/exported-fields-elasticsearch.html#_node_stats) @@ -59,4 +59,4 @@ Metricbeat will call the following Elasticsearch API endpoints corresponding to ### shard - `/_cluster/state/version,nodes,master_node,routing_table` - [api reference](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-state.html) -- [mb exported fields](https://www.elastic.co/guide/en/beats/metricbeat/current/exported-fields-elasticsearch.html#_shard) \ No newline at end of file +- [mb exported fields](https://www.elastic.co/guide/en/beats/metricbeat/current/exported-fields-elasticsearch.html#_shard) diff --git a/metricbeat/module/elasticsearch/index_summary/index_summary.go b/metricbeat/module/elasticsearch/index_summary/index_summary.go index d3a91b442479..7a2c8bbcd7e9 100644 --- a/metricbeat/module/elasticsearch/index_summary/index_summary.go +++ b/metricbeat/module/elasticsearch/index_summary/index_summary.go @@ -40,7 +40,8 @@ func init() { const ( statsPath = "/_stats" - allowClosedIndices = "forbid_closed_indices=false" + onlyClusterLevel = "level=cluster" + allowClosedIndices = "&forbid_closed_indices=false" ) var ( @@ -109,6 +110,7 @@ func getServicePath(esVersion version.V) (string, error) { return "", err } + u.RawQuery += onlyClusterLevel if !esVersion.LessThan(elasticsearch.BulkStatsAvailableVersion) { u.RawQuery += allowClosedIndices } From 14db218f6bb18f7d1d616adfede14aa664b4f5fe Mon Sep 17 00:00:00 2001 From: Colleen McGinnis Date: Wed, 26 Feb 2025 11:10:51 -0600 Subject: [PATCH 29/41] add the new ci checks (#42896) Co-authored-by: David Kilfoyle <41695641+kilfoyle@users.noreply.github.com> --- .github/workflows/docs-build.yml | 19 +++++++++++++++++++ .github/workflows/docs-cleanup.yml | 14 ++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 .github/workflows/docs-build.yml create mode 100644 .github/workflows/docs-cleanup.yml diff --git a/.github/workflows/docs-build.yml b/.github/workflows/docs-build.yml new file mode 100644 index 000000000000..bb466166d0e8 --- /dev/null +++ b/.github/workflows/docs-build.yml @@ -0,0 +1,19 @@ +name: docs-build + +on: + push: + branches: + - main + pull_request_target: ~ + merge_group: ~ + +jobs: + docs-preview: + uses: elastic/docs-builder/.github/workflows/preview-build.yml@main + with: + path-pattern: docs/** + permissions: + deployments: write + id-token: write + contents: read + pull-requests: read diff --git a/.github/workflows/docs-cleanup.yml b/.github/workflows/docs-cleanup.yml new file mode 100644 index 000000000000..f83e017b5f7c --- /dev/null +++ b/.github/workflows/docs-cleanup.yml @@ -0,0 +1,14 @@ +name: docs-cleanup + +on: + pull_request_target: + types: + - closed + +jobs: + docs-preview: + uses: elastic/docs-builder/.github/workflows/preview-cleanup.yml@main + permissions: + contents: none + id-token: write + deployments: write From c4054adec82dc0a4626fc50e03066bb87d4cd35e Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Wed, 26 Feb 2025 13:18:49 -0800 Subject: [PATCH 30/41] Disable keystore for beats in FIPS mode (#42846) Disable keystore functionality when in FIPS mode. --- libbeat/cmd/instance/beat.go | 21 ++++------ libbeat/cmd/instance/keystore_fips.go | 30 ++++++++++++++ libbeat/cmd/instance/keystore_fips_test.go | 36 +++++++++++++++++ libbeat/cmd/instance/keystore_nofips.go | 36 +++++++++++++++++ libbeat/cmd/keystore.go | 20 ---------- libbeat/cmd/keystore_fips.go | 31 +++++++++++++++ libbeat/cmd/keystore_nofips.go | 46 ++++++++++++++++++++++ libbeat/cmd/root.go | 4 +- 8 files changed, 190 insertions(+), 34 deletions(-) create mode 100644 libbeat/cmd/instance/keystore_fips.go create mode 100644 libbeat/cmd/instance/keystore_fips_test.go create mode 100644 libbeat/cmd/instance/keystore_nofips.go create mode 100644 libbeat/cmd/keystore_fips.go create mode 100644 libbeat/cmd/keystore_nofips.go diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index e16cab2f3311..99bf7cbd2c99 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -307,9 +307,10 @@ func NewBeatReceiver(settings Settings, receiverConfig map[string]interface{}, u if settings.DisableConfigResolver { config.OverwriteConfigOpts(obfuscateConfigOpts()) - } else { + } else if store != nil { // TODO: Allow the options to be more flexible for dynamic changes - config.OverwriteConfigOpts(configOpts(store)) + // note that if the store is nil it should be excluded as an option + config.OverwriteConfigOpts(configOptsWithKeystore(store)) } b.Beat.Info.Monitoring.Namespace = monitoring.GetNamespace(b.Info.Beat + "-" + b.Info.ID.String()) @@ -1004,9 +1005,10 @@ func (b *Beat) configure(settings Settings) error { if settings.DisableConfigResolver { config.OverwriteConfigOpts(obfuscateConfigOpts()) - } else { + } else if store != nil { // TODO: Allow the options to be more flexible for dynamic changes - config.OverwriteConfigOpts(configOpts(store)) + // note that if the store is nil it should be excluded as an option + config.OverwriteConfigOpts(configOptsWithKeystore(store)) } instrumentation, err := instrumentation.New(cfg, b.Info.Beat, b.Info.Version) @@ -1667,9 +1669,9 @@ func (b *Beat) logSystemInfo(log *logp.Logger) { } } -// configOpts returns ucfg config options with a resolver linked to the current keystore. +// configOptsWithKeystore returns ucfg config options with a resolver linked to the current keystore. // Refactor to allow insert into the config option array without having to redefine everything -func configOpts(store keystore.Keystore) []ucfg.Option { +func configOptsWithKeystore(store keystore.Keystore) []ucfg.Option { return []ucfg.Option{ ucfg.PathSep("."), ucfg.Resolve(keystore.ResolverWrap(store)), @@ -1687,13 +1689,6 @@ func obfuscateConfigOpts() []ucfg.Option { } } -// LoadKeystore returns the appropriate keystore based on the configuration. -func LoadKeystore(cfg *config.C, name string) (keystore.Keystore, error) { - keystoreCfg, _ := cfg.Child("keystore", -1) - defaultPathConfig := paths.Resolve(paths.Data, fmt.Sprintf("%s.keystore", name)) - return keystore.Factory(keystoreCfg, defaultPathConfig, common.IsStrictPerms()) -} - func InitKibanaConfig(beatConfig beatConfig) *config.C { var esConfig *config.C if isElasticsearchOutput(beatConfig.Output.Name()) { diff --git a/libbeat/cmd/instance/keystore_fips.go b/libbeat/cmd/instance/keystore_fips.go new file mode 100644 index 000000000000..3f5fe0fde633 --- /dev/null +++ b/libbeat/cmd/instance/keystore_fips.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build requirefips + +package instance + +import ( + "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/keystore" +) + +// LoadKeystore returns nil in FIPS mode +func LoadKeystore(cfg *config.C, name string) (keystore.Keystore, error) { + return nil, nil +} diff --git a/libbeat/cmd/instance/keystore_fips_test.go b/libbeat/cmd/instance/keystore_fips_test.go new file mode 100644 index 000000000000..5231714783d8 --- /dev/null +++ b/libbeat/cmd/instance/keystore_fips_test.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build requirefips + +package instance + +import ( + "testing" + + "github.com/elastic/elastic-agent-libs/config" +) + +func TestLoadKeystore(t *testing.T) { + ks, err := LoadKeystore(config.NewConfig(), "test") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if ks != nil { + t.Error("keystore is not nil.") + } +} diff --git a/libbeat/cmd/instance/keystore_nofips.go b/libbeat/cmd/instance/keystore_nofips.go new file mode 100644 index 000000000000..44b7e6813c35 --- /dev/null +++ b/libbeat/cmd/instance/keystore_nofips.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !requirefips + +package instance + +import ( + "fmt" + + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/keystore" + "github.com/elastic/elastic-agent-libs/paths" +) + +// LoadKeystore returns the appropriate keystore based on the configuration. +func LoadKeystore(cfg *config.C, name string) (keystore.Keystore, error) { + keystoreCfg, _ := cfg.Child("keystore", -1) + defaultPathConfig := paths.Resolve(paths.Data, fmt.Sprintf("%s.keystore", name)) + return keystore.Factory(keystoreCfg, defaultPathConfig, common.IsStrictPerms()) +} diff --git a/libbeat/cmd/keystore.go b/libbeat/cmd/keystore.go index da0f5371693c..f6ef52b80897 100644 --- a/libbeat/cmd/keystore.go +++ b/libbeat/cmd/keystore.go @@ -44,26 +44,6 @@ func getKeystore(settings instance.Settings) (keystore.Keystore, error) { return b.Keystore(), nil } -// genKeystoreCmd initialize the Keystore command to manage the Keystore -// with the following subcommands: -// - create -// - add -// - remove -// - list -func genKeystoreCmd(settings instance.Settings) *cobra.Command { - keystoreCmd := cobra.Command{ - Use: "keystore", - Short: "Manage secrets keystore", - } - - keystoreCmd.AddCommand(genCreateKeystoreCmd(settings)) - keystoreCmd.AddCommand(genAddKeystoreCmd(settings)) - keystoreCmd.AddCommand(genRemoveKeystoreCmd(settings)) - keystoreCmd.AddCommand(genListKeystoreCmd(settings)) - - return &keystoreCmd -} - func genCreateKeystoreCmd(settings instance.Settings) *cobra.Command { var flagForce bool command := &cobra.Command{ diff --git a/libbeat/cmd/keystore_fips.go b/libbeat/cmd/keystore_fips.go new file mode 100644 index 000000000000..03c9df2211d6 --- /dev/null +++ b/libbeat/cmd/keystore_fips.go @@ -0,0 +1,31 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build requirefips + +package cmd + +import ( + "github.com/spf13/cobra" + + "github.com/elastic/beats/v7/libbeat/cmd/instance" +) + +// genKeystoreCmd returns nil in fips mode as the keystore is disabled. +func genKeystoreCmd(_ instance.Settings) *cobra.Command { + return nil +} diff --git a/libbeat/cmd/keystore_nofips.go b/libbeat/cmd/keystore_nofips.go new file mode 100644 index 000000000000..0aa2fed6074a --- /dev/null +++ b/libbeat/cmd/keystore_nofips.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !requirefips + +package cmd + +import ( + "github.com/spf13/cobra" + + "github.com/elastic/beats/v7/libbeat/cmd/instance" +) + +// genKeystoreCmd initialize the Keystore command to manage the Keystore +// with the following subcommands: +// - create +// - add +// - remove +// - list +func genKeystoreCmd(settings instance.Settings) *cobra.Command { + keystoreCmd := cobra.Command{ + Use: "keystore", + Short: "Manage secrets keystore", + } + + keystoreCmd.AddCommand(genCreateKeystoreCmd(settings)) + keystoreCmd.AddCommand(genAddKeystoreCmd(settings)) + keystoreCmd.AddCommand(genRemoveKeystoreCmd(settings)) + keystoreCmd.AddCommand(genListKeystoreCmd(settings)) + + return &keystoreCmd +} diff --git a/libbeat/cmd/root.go b/libbeat/cmd/root.go index cf0a379716fc..1bd4f694ddab 100644 --- a/libbeat/cmd/root.go +++ b/libbeat/cmd/root.go @@ -105,7 +105,9 @@ func GenRootCmdWithSettings(beatCreator beat.Creator, settings instance.Settings rootCmd.AddCommand(rootCmd.CompletionCmd) rootCmd.AddCommand(rootCmd.ExportCmd) rootCmd.AddCommand(rootCmd.TestCmd) - rootCmd.AddCommand(rootCmd.KeystoreCmd) + if rootCmd.KeystoreCmd != nil { + rootCmd.AddCommand(rootCmd.KeystoreCmd) + } return rootCmd } From 849e95d9e19ec5dde497bc7e5b45d001f3d3f793 Mon Sep 17 00:00:00 2001 From: Yehor Shvedov <146825775+ev1yehor@users.noreply.github.com> Date: Thu, 27 Feb 2025 13:08:51 +0200 Subject: [PATCH 31/41] Disable interpolation for monorepo plugin (#42936) * Disable interpolation for monorepo plugin * Changes to trigger pipelines for test * Revert "Changes to trigger pipelines for test" This reverts commit 6592ba1662131b29bac2f20c9b59f7d9bd49b46a. --- .buildkite/pipeline.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index a4e204fb2a41..7bc0916239c4 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -6,6 +6,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - auditbeat/ @@ -40,6 +41,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - heartbeat/ @@ -74,6 +76,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - filebeat/ @@ -109,6 +112,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - filebeat/ @@ -146,6 +150,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - x-pack/dockerlogbeat/ @@ -180,6 +185,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - metricbeat/ @@ -214,6 +220,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - metricbeat/ @@ -251,6 +258,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - x-pack/osquerybeat/** @@ -288,6 +296,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - winlogbeat/ @@ -323,6 +332,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - .buildkite/deploy/kubernetes/** @@ -353,6 +363,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - libbeat/ @@ -386,6 +397,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - libbeat/ @@ -421,6 +433,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - auditbeat/ @@ -457,6 +470,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - heartbeat/ @@ -493,6 +507,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - packetbeat/ @@ -529,6 +544,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - winlogbeat/ @@ -563,6 +579,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - packetbeat/ @@ -597,6 +614,7 @@ steps: plugins: - monorepo-diff#v1.0.1: diff: "git diff --name-only origin/${GITHUB_PR_TARGET_BRANCH}...HEAD" + interpolation: false watch: - path: - auditbeat/ From 1a120cc21cf03ca3434300452f4e0e4dd2b87cfa Mon Sep 17 00:00:00 2001 From: Ishleen Kaur <102962586+ishleenk17@users.noreply.github.com> Date: Thu, 27 Feb 2025 18:14:05 +0530 Subject: [PATCH 32/41] Update Prometheus Module Codeowner (#42935) * Update codeowner * Update owners --- .github/CODEOWNERS | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 896f67f8f98e..087d464cdc95 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -99,7 +99,10 @@ CHANGELOG* /metricbeat/module/nats/ @elastic/obs-ds-hosted-services /metricbeat/module/nginx @elastic/obs-infraobs-integrations /metricbeat/module/php_fpm @elastic/obs-infraobs-integrations -/metricbeat/module/prometheus/ @elastic/obs-ds-hosted-services +/metricbeat/module/prometheus @elastic/obs-infraobs-integrations +/metricbeat/module/prometheus/collector @elastic/obs-infraobs-integrations +/metricbeat/module/prometheus/query @elastic/obs-infraobs-integrations +/metricbeat/module/prometheus/remote_write @elastic/obs-ds-hosted-services /metricbeat/module/postgresql @elastic/obs-infraobs-integrations /metricbeat/module/rabbitmq @elastic/obs-infraobs-integrations /metricbeat/module/redis @elastic/obs-infraobs-integrations @@ -228,7 +231,9 @@ CHANGELOG* /x-pack/metricbeat/module/openai @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/oracle @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/panw @elastic/obs-infraobs-integrations -/x-pack/metricbeat/module/prometheus/ @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/prometheus @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/prometheus/collector @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/prometheus/remote_write @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/redisenterprise @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/sql @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/statsd @elastic/obs-infraobs-integrations From 11ee01ad823c1a43692afc8993c86b30a832be24 Mon Sep 17 00:00:00 2001 From: Baptiste Turquet <28805281+bturquet@users.noreply.github.com> Date: Thu, 27 Feb 2025 15:07:14 +0100 Subject: [PATCH 33/41] Update CODEOWNERS for AWS filebeat and metricbeat modules (#42852) * Update CODEOWNERS for AWS filebeat and metricbeat modules Align with Hosted services and Integrations teams ownerships * fix typo for transitgateway fix typo for transitgateway --- .github/CODEOWNERS | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 087d464cdc95..b8a0ef3a69f2 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -142,6 +142,12 @@ CHANGELOG* /x-pack/filebeat/input/streaming/ @elastic/security-service-integrations /x-pack/filebeat/module/activemq @elastic/obs-infraobs-integrations /x-pack/filebeat/module/aws @elastic/obs-ds-hosted-services +/x-pack/filebeat/module/aws/cloudtrail @elastic/obs-infraobs-integrations +/x-pack/filebeat/module/aws/cloudwatch @elastic/obs-ds-hosted-services +/x-pack/filebeat/module/aws/ec2 @elastic/obs-ds-hosted-services +/x-pack/filebeat/module/aws/elb @elastic/obs-infraobs-integrations +/x-pack/filebeat/module/aws/s3access @elastic/obs-ds-hosted-services @elastic/security-service-integrations +/x-pack/filebeat/module/aws/vpcflow @elastic/security-service-integrations /x-pack/filebeat/module/awsfargate @elastic/obs-ds-hosted-services /x-pack/filebeat/module/azure @elastic/obs-ds-hosted-services /x-pack/filebeat/module/barracuda @elastic/security-service-integrations @@ -199,6 +205,24 @@ CHANGELOG* /x-pack/metricbeat/module/activemq @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/airflow @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/aws @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/aws/billing @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws/cloudwatch @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/aws/dynamodb @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws/ebs @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/aws/ec2 @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/aws/elb @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws/kinesis @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws/lambda @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws/natgateway @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws/rds @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws/s3_daily_storage @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/aws/s3_request @elastic/obs-ds-hosted-services +/x-pack/metricbeat/module/aws/sns @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws/sqs @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws/transitgateway @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws/usage @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws/vpn @elastic/obs-infraobs-integrations +/x-pack/metricbeat/module/aws/sqs @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/awsfargate @elastic/obs-infraobs-integrations /x-pack/metricbeat/module/azure @elastic/obs-ds-hosted-services /x-pack/metricbeat/module/azure/billing @elastic/obs-infraobs-integrations From 9ece1e3afcfb4401171374d9d46413e0295f0c3f Mon Sep 17 00:00:00 2001 From: Tiago Queiroz Date: Thu, 27 Feb 2025 09:40:38 -0500 Subject: [PATCH 34/41] Skip Flaky test TestTranslateGUIDWithLDAP (#42617) --- filebeat/tests/integration/translate_ldap_attribute_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/filebeat/tests/integration/translate_ldap_attribute_test.go b/filebeat/tests/integration/translate_ldap_attribute_test.go index d7c4f129593e..b4c74c20e03b 100644 --- a/filebeat/tests/integration/translate_ldap_attribute_test.go +++ b/filebeat/tests/integration/translate_ldap_attribute_test.go @@ -79,6 +79,7 @@ processors: ` func TestTranslateGUIDWithLDAP(t *testing.T) { + t.Skip("Flaky Test: https://github.com/elastic/beats/issues/42616") startOpenldapContainer(t) var entryUUID string From 0ea31fd849122b917c713919cdc4d18250e7fe01 Mon Sep 17 00:00:00 2001 From: stefans-elastic Date: Thu, 27 Feb 2025 15:44:45 +0000 Subject: [PATCH 35/41] fix: don't fail merticbeat/windows/perfmon when no data is available (#42803) * fix: don't fail merticbeat/windows/perfmon when no data is available * add CHANGELOG.next.asciidoc entry * fix: linter issue * fix: linter issue * fix: fix code according to PR comments * refactor: make CollectData error handling testable * fix: add mossing imports to windows/perfmon/reader_test.go * fix: add mossing imports to windows/perfmon/reader_test.go --------- Co-authored-by: Pierre HILBERT --- CHANGELOG.next.asciidoc | 1 + metricbeat/module/windows/perfmon/reader.go | 22 ++++++--- .../module/windows/perfmon/reader_test.go | 45 +++++++++++++++++++ 3 files changed, 61 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index e8235a24a645..e0a94383ec83 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -269,6 +269,7 @@ otherwise no tag is added. {issue}42208[42208] {pull}42403[42403] - Fixed errors in the `elasticsearch.index` metricset when index settings are missing. {issue}42424[42424] {pull}42426[42426] - Fixed panic caused by uninitialized meraki device wifi0 and wifi1 struct pointers in the device WiFi data fetching. {issue}42745[42745] {pull}42746[42746] - Only fetch cluster-level index stats summary {issue}36019[36019] {pull}42901[42901] +- Fixed an issue in Metricbeat's Windows module where data collection would fail if the data was unavailable. {issue}42802[42802] {pull}42803[42803] *Osquerybeat* diff --git a/metricbeat/module/windows/perfmon/reader.go b/metricbeat/module/windows/perfmon/reader.go index cc156f843e5b..a158be266670 100644 --- a/metricbeat/module/windows/perfmon/reader.go +++ b/metricbeat/module/windows/perfmon/reader.go @@ -96,13 +96,8 @@ func (re *Reader) Read() ([]mb.Event, error) { // Some counters, such as rate counters, require two counter values in order to compute a displayable value. In this case we must call PdhCollectQueryData twice before calling PdhGetFormattedCounterValue. // For more information, see Collecting Performance Data (https://docs.microsoft.com/en-us/windows/desktop/PerfCtrs/collecting-performance-data). if err := re.query.CollectData(); err != nil { - // users can encounter the case no counters are found (services/processes stopped), this should not generate an event with the error message, - //could be the case the specific services are started after and picked up by the next RefreshCounterPaths func - if err == pdh.PDH_NO_COUNTERS { //nolint:errorlint // Bad linter! This is always errno or nil. - re.log.Warnf("%s %v", collectFailedMsg, err) - } else { - return nil, fmt.Errorf("%v: %w", collectFailedMsg, err) - } + err = re.handleCollectDataError(err) + return nil, err } // Get the values. @@ -121,6 +116,19 @@ func (re *Reader) Read() ([]mb.Event, error) { return events, nil } +func (re *Reader) handleCollectDataError(err error) error { + // users can encounter the case no counters are found (services/processes stopped), this should not generate an event with the error message, + //could be the case the specific services are started after and picked up by the next RefreshCounterPaths func + if err == pdh.PDH_NO_COUNTERS || err == pdh.PDH_NO_DATA { //nolint:errorlint // linter complains about comparing error using '==' operator but here error is always of type pdh.PdhErrno (or nil) so `errors.Is` is redundant here + re.log.Warnf("%s %v", collectFailedMsg, err) + + // Ensure the returned error is nil to prevent the Elastic Agent from transitioning to an UNHEALTHY state. + return nil + } + + return fmt.Errorf("%v: %w", collectFailedMsg, err) +} + func (re *Reader) getValues() (map[string][]pdh.CounterValue, error) { var val map[string][]pdh.CounterValue // Sleep for one second before collecting the second raw value- diff --git a/metricbeat/module/windows/perfmon/reader_test.go b/metricbeat/module/windows/perfmon/reader_test.go index 55f29d404f10..9a26b7c87285 100644 --- a/metricbeat/module/windows/perfmon/reader_test.go +++ b/metricbeat/module/windows/perfmon/reader_test.go @@ -20,11 +20,13 @@ package perfmon import ( + "errors" "testing" "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/metricbeat/helper/windows/pdh" + "github.com/elastic/elastic-agent-libs/logp" ) func TestGetCounter(t *testing.T) { @@ -153,3 +155,46 @@ func TestIsWildcard(t *testing.T) { result = isWildcard(queries, instance) assert.False(t, result) } + +func Test_handleCollectDataError(t *testing.T) { + tests := []struct { + name string + + mockErr error + expectedErrMsg string + }{ + { + name: "no counters error", + + mockErr: pdh.PDH_NO_COUNTERS, + expectedErrMsg: "", + }, + { + name: "no data error", + + mockErr: pdh.PDH_NO_DATA, + expectedErrMsg: "", + }, + { + name: "unexpected error", + + mockErr: errors.New("test error"), + expectedErrMsg: "failed collecting counter values: test error", + }, + } + + reader := &Reader{ + log: logp.NewLogger("perfmon"), + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := reader.handleCollectDataError(tt.mockErr) + if tt.expectedErrMsg == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tt.expectedErrMsg) + } + }) + } +} From 1ff5c49bf39960690b2cd2f1cd2c6ab8879f55a9 Mon Sep 17 00:00:00 2001 From: Valentin Crettaz Date: Thu, 27 Feb 2025 19:25:38 +0100 Subject: [PATCH 36/41] [Stack Monitoring] Remove `kibana.settings` metricset (#42937) * Remove kibana.settings metricset --- CHANGELOG.next.asciidoc | 1 + metricbeat/docs/fields.asciidoc | 93 ----------- metricbeat/docs/modules/kibana.asciidoc | 4 - .../docs/modules/kibana/settings.asciidoc | 26 ---- metricbeat/docs/modules_list.asciidoc | 3 +- metricbeat/include/list_common.go | 1 - metricbeat/module/kibana/_meta/README.md | 9 -- metricbeat/module/kibana/fields.go | 2 +- metricbeat/module/kibana/kibana.go | 10 -- .../module/kibana/settings/_meta/data.json | 35 ----- .../kibana/settings/_meta/docs.asciidoc | 8 - .../module/kibana/settings/_meta/fields.yml | 34 ----- .../settings/_meta/test/settings.700.json | 18 --- .../settings/_meta/test/stats-legacy.700.json | 122 --------------- .../kibana/settings/_meta/test/stats.700.json | 144 ------------------ metricbeat/module/kibana/settings/data.go | 70 --------- metricbeat/module/kibana/settings/settings.go | 103 ------------- .../module/kibana/settings/settings_test.go | 79 ---------- 18 files changed, 3 insertions(+), 759 deletions(-) delete mode 100644 metricbeat/docs/modules/kibana/settings.asciidoc delete mode 100644 metricbeat/module/kibana/settings/_meta/data.json delete mode 100644 metricbeat/module/kibana/settings/_meta/docs.asciidoc delete mode 100644 metricbeat/module/kibana/settings/_meta/fields.yml delete mode 100644 metricbeat/module/kibana/settings/_meta/test/settings.700.json delete mode 100644 metricbeat/module/kibana/settings/_meta/test/stats-legacy.700.json delete mode 100644 metricbeat/module/kibana/settings/_meta/test/stats.700.json delete mode 100644 metricbeat/module/kibana/settings/data.go delete mode 100644 metricbeat/module/kibana/settings/settings.go delete mode 100644 metricbeat/module/kibana/settings/settings_test.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index e0a94383ec83..42cf55b071f5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -81,6 +81,7 @@ otherwise no tag is added. {issue}42208[42208] {pull}42403[42403] - Add GCP organization and project details to ECS cloud fields. {pull}40461[40461] - Add support for specifying a custom endpoint for GCP service clients. {issue}40848[40848] {pull}40918[40918] - Fix incorrect handling of types in SQL module. {issue}40090[40090] {pull}41607[41607] +- Remove kibana.settings metricset since the API was removed in 8.0 {issue}30592[30592] {pull}42937[42937] *Osquerybeat* diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index c09e2a5bb87c..3031dd08ba9c 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -44787,99 +44787,6 @@ type: long -- -[float] -=== settings - -Kibana stats and run-time metrics. - - - -*`kibana.settings.uuid`*:: -+ --- -Kibana instance UUID - -type: keyword - --- - -*`kibana.settings.name`*:: -+ --- -Kibana instance name - -type: keyword - --- - -*`kibana.settings.index`*:: -+ --- -Name of Kibana's internal index - -type: keyword - --- - -*`kibana.settings.host`*:: -+ --- -Kibana instance hostname - -type: keyword - --- - -*`kibana.settings.transport_address`*:: -+ --- -Kibana server's hostname and port - -type: keyword - --- - -*`kibana.settings.version`*:: -+ --- -Kibana version - -type: keyword - --- - -*`kibana.settings.snapshot`*:: -+ --- -Whether the Kibana build is a snapshot build - -type: boolean - --- - -*`kibana.settings.status`*:: -+ --- -Kibana instance's health status - -type: keyword - --- - -*`kibana.settings.locale`*:: -+ --- -type: keyword - --- - -*`kibana.settings.port`*:: -+ --- -type: integer - --- - [float] === stats diff --git a/metricbeat/docs/modules/kibana.asciidoc b/metricbeat/docs/modules/kibana.asciidoc index 70c1054f463f..0ecf5cfe16b2 100644 --- a/metricbeat/docs/modules/kibana.asciidoc +++ b/metricbeat/docs/modules/kibana.asciidoc @@ -76,8 +76,6 @@ The following metricsets are available: * <> -* <> - * <> * <> @@ -90,8 +88,6 @@ include::kibana/node_actions.asciidoc[] include::kibana/node_rules.asciidoc[] -include::kibana/settings.asciidoc[] - include::kibana/stats.asciidoc[] include::kibana/status.asciidoc[] diff --git a/metricbeat/docs/modules/kibana/settings.asciidoc b/metricbeat/docs/modules/kibana/settings.asciidoc deleted file mode 100644 index 8f3bfd90469a..000000000000 --- a/metricbeat/docs/modules/kibana/settings.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -//// -This file is generated! See scripts/mage/docs_collector.go -//// -:edit_url: https://github.com/elastic/beats/edit/main/metricbeat/module/kibana/settings/_meta/docs.asciidoc - - -[[metricbeat-metricset-kibana-settings]] -=== Kibana settings metricset - -include::../../../module/kibana/settings/_meta/docs.asciidoc[] - - -:edit_url: - -==== Fields - -For a description of each field in the metricset, see the -<> section. - -Here is an example document generated by this metricset: - -[source,json] ----- -include::../../../module/kibana/settings/_meta/data.json[] ----- -:edit_url!: \ No newline at end of file diff --git a/metricbeat/docs/modules_list.asciidoc b/metricbeat/docs/modules_list.asciidoc index 1acf29688dc7..89c634032ef0 100644 --- a/metricbeat/docs/modules_list.asciidoc +++ b/metricbeat/docs/modules_list.asciidoc @@ -168,11 +168,10 @@ This file is generated! See scripts/mage/docs_collector.go |<> |<> beta[] |<> |image:./images/icon-no.png[No prebuilt dashboards] | -.7+| .7+| |<> beta[] +.6+| .6+| |<> beta[] |<> beta[] |<> beta[] |<> beta[] -|<> |<> |<> |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | diff --git a/metricbeat/include/list_common.go b/metricbeat/include/list_common.go index c8957a4443a0..e461c7f6dfa6 100644 --- a/metricbeat/include/list_common.go +++ b/metricbeat/include/list_common.go @@ -93,7 +93,6 @@ import ( _ "github.com/elastic/beats/v7/metricbeat/module/kibana/cluster_rules" _ "github.com/elastic/beats/v7/metricbeat/module/kibana/node_actions" _ "github.com/elastic/beats/v7/metricbeat/module/kibana/node_rules" - _ "github.com/elastic/beats/v7/metricbeat/module/kibana/settings" _ "github.com/elastic/beats/v7/metricbeat/module/kibana/stats" _ "github.com/elastic/beats/v7/metricbeat/module/kibana/status" _ "github.com/elastic/beats/v7/metricbeat/module/kvm" diff --git a/metricbeat/module/kibana/_meta/README.md b/metricbeat/module/kibana/_meta/README.md index 47addd8292a9..27dfdedfaa33 100644 --- a/metricbeat/module/kibana/_meta/README.md +++ b/metricbeat/module/kibana/_meta/README.md @@ -7,15 +7,6 @@ Metricbeat will call the following Kibana API endpoints corresponding to each metricset. -### settings - -- `/api/settings` -- [mb exported fields](https://www.elastic.co/guide/en/beats/metricbeat/current/exported-fields-kibana.html#_settings_2) - -This endpoint provides some basic information about the Kibana instance and how it's configured (uuids, local settings, status). - -The endpoint was removed from kibana in 8.0.0-beta1 ([changelog](https://www.elastic.co/guide/en/kibana/master/release-notes-8.0.0-beta1.html#rest-api-breaking-changes-8.0.0-beta1)) but the metricset can still be used on older kibana versions. - ### stats [xpack] - `/api/stats` diff --git a/metricbeat/module/kibana/fields.go b/metricbeat/module/kibana/fields.go index 2a1db1d08e31..2d38a0cae6fd 100644 --- a/metricbeat/module/kibana/fields.go +++ b/metricbeat/module/kibana/fields.go @@ -32,5 +32,5 @@ func init() { // AssetKibana returns asset data. // This is the base64 encoded zlib format compressed contents of module/kibana. func AssetKibana() string { - return "eJzsW0uP3LgRvs+vKMzFG8AjZA5GsnMIEngXSBDYCbxr5BAEQrVU3eKaIrUssj29vz6gHj16UI9Wq3eNhftgDCTxq49VZD1Y9AN8otMTfBI7VHgHYIWV9AT3/ywf3N8BpMSJEYUVWj3BX+4AAKqXkOvUSboD4EwbGyda7cXhCfYo2T81JAmZnuDggZmsFerAT/Dfe2Z5/xruM2uL+//dAewFyZSfSuwHUJhTwyhmi5bLFwD2VHg0o11RP2kPbA+2Iie2mBfnN81olAK59bRAmz3B/V/PI+4HYBWTyBAXWjHF/tMox+eF2PXwciJDkCjnMYF+hOM1UsIADbzmKKdcm1O0N0SxUPHuZGmVoDmoRmRhdELMkSv8vP13uZBSrJLZxQrpr/mippYRFhGLXyiWIhf2GplhxCg86USrxBlDqtwaihK/hVZNeQZpZN6GWKR+DJONS7bXmHoOfEQJmiOpMY0e85Xrqz+6D/zmKuA348CP1yE/BqAbHdLRa05qXcQpSTxdY48+1tSGaH3rrJDiF/QLKcLEiiNtRGICeCEdkcpbkOnALqTS+vsGjELoDTFDPztiy5HVFuW6UFMi9AAG+Kng2qescguNlBDMi6xW0OObhs6eKDySwcOq1dQVh8fDRKR2TqQLZTCZo0goqgd0gS7Oc0giW5EwoUmyKJGOLZkoQOYTnT5rkw5jVDUkxpHg1Cbhf4FksPnVSWGNCDUi5GSNSDhqfXtODHdksfW8P8tRBU1zHINqww1yq2l1tYfqI5nU0WZUEu2UHWUitTqMDu0Hj3kyU4TayMWbPwbfN9h7qTFEuoXw7bcXIPTXo3EykKasX40e7+tS/LoUFyA0o5ROaUO/6OF+v05xj0I6Q/3BIwvnHL6eKXF9BS8Y50OydnbBqI4xt3IqpSl/nx7lS7djc4q0gRXLNA9QpWCcevBUJs15WGrMXjI4rfce0ZqaUGxRJQQfP/7ju6AQ/+9WQgZYjRChUnpeK+U95gR6X0t7xSCUJaNQBmAbgZnmvidfPSuPNToza1BxoY2NMU0NcXgFLpfqs3oyr/gstVxXXkBQ/JEMd0vKNUJDKOd9orDgTIe1udNaEvbFd0T8JyObkQGbUSNu54RMQTDgGb16FiYQ8kFrjekVSyhtFoJtJEqdoFy+Kc7JQd9IzSC/Xg9khi6odSINX6D/+XKCieNu7b2GyJQvWsRhzBv3y3MYK9Fffrk4GKwsas0g7Z6wOLxY/baufV7sbZz9UO5a9x/ddvobRIX5hRMevcXq2TDOhKaxjEQYtauC8DdTKtgqcg1p/8axbHZRLopuOix/jUdPBVujL/Low+Efqri0GqWQaPfa5FcDXEuk6meNDr/m5GKsvTqUEjxaaUOVh/gbYTmm9Aqo82FR4TBJxo+LrtFcopU1WsZjGDBrXujnIbFCNdhEl0y35BLfcLk0Ld5trMyfsYivh3zJrjGk5Otn/RhyAS/QGcp9PH/E92YTlMd1MIsb/jCl67mEyuU7Mj6lSqQgZaElAnJMCawuQ1yVI0TwXlsCm6GFndGfmQxDggqYVAq5k1YUkoCF/xMVaccdRKubFmHr4xIZkJl85WJBq4RelymIzehUwht6cExAbHEnBWeUtmGjcAlWtUkXB7gZTf27gmuqqYvKjIW3G2aN2cNDY/AU79x+T4avxKLnKpFeCzO8N7N46NRdgy4I+1o8HdlyM/YD+N7LAS+n6iuAUFBdGKJEq7QvNEAv3LhvMwz7rFlqP2YEZQ7rd2JZtPs9Ry+EBYPSFkQqhTp44i/vC6OPIvWZaLVTy6A+BmKcUkIdogGHufRucJvjAsssUADAd65K4s8aaBHPkGFHpGoWwMJXXRK9GyFkZygnZYeTOleh6eAI51en7jlcTHxqwW3H/63OC2cpbYuDI0pHYKgwxKSsX3bnFVqbwepqVuWcl02t53UywuFuuWonNW7aI4/46vnFXm6hEU84kyr54gHtE4wNXmCOH8sNXE4ApdQJetNY3QQ0v/vD6O1U/Ddj/3fP2zOA3Wk55dFrj78e8Xf4HIGWKXCBCZWMuvp/r1OKfuJmUq8XGGI0KM5MZwHdjyW2340tJY+Gs94Fra2SIoAPdTpXNvkvSo46tX/gsthWinpJcZvbaVX++pkMtUTTeDHfvyy3FbNqo6sBvxHLtW6OrbPfhxqiusi+0li9O2trtPG36v7ceUpVAJnOxlqho3tHbw2Bd/gscpdfQCB4NS6uaqZ1xvi+jQRvS6RXPOgDLTFJdYxTBeWYdfKJJjZSvwE1Qq+/NpvMawS9d6KUypsRqfKoRTR+duQojYPbagMmFfxw144e8F7c0/uhRKjt33pzefvu9u2fcBH+xTbKPLEw5brJEFWGXkz+oi5HvYr62fFm3Y5y3UTa+1kpy6vH21u/Ri+FEXxz/FOdhP0hrNYeJ0lH6sfU7TiV6PDN8c8XcWKX5zg4hq1YWXruO/o1ahIKEDKXo3owhCnuJNUUJxkm2lDUvZZ9GwV2o1LSjkojLaRxhjdR5yTBq9TLeKT0X7uffBp6I+3+4EVALeNi7XYI3kS5k/wuVm44v1xda7yr4IbREBbdQV5wiA4b5PRvKyktgeMn1RD4nzOjlMa6IQs4NTWat6Mod8+Qx7K2ynSlBvN9pEV8IVAbtQu1aY3Cgsrtlkw75P4fAAD//xNN+qo=" + return "eJzsWkuP5LYRvu+vKMxlHWBHyBwWifsQJFgbSBDsJlh7kUMQCNVSdYteipRZZO+0f31APXr0oB6tVseO4T4MBpL41VdVJKuKxUf4TOcdfBZ7VPgKwAoraQcPfy8fPLwCSIkTIwortNrBn14BAFQvIdepk/QKgDNtbJxodRDHHRxQsn9qSBIy7eDogZmsFerIO/j3A7N8eAMPmbXFw39eARwEyZR3JfYjKMypYRSzRcvlCwB7Ljya0a6on7QHtgdbkRNbzIvLm2Y0SoHcelqgzXbw8OfLiIcBWMUkMsSFVkyx/zTK8Xkhdj28VGQIEuU8JtCPcLxGShiggdcc5ZRrc44OhigWKt6fLa0SNAfViCyMTog5coXX23+XCynFKpldrJD9mi9qahlhEbH4iWIpcmFvkRlGjMJKJ1olzhhS5dJQlPgltErlGaQRvQ2xSP0YJhuXbG9x9Rz4iBE0R1JjGj3lK+dXf3Qf+O1NwG/HgZ9uQ34KQDc2pJO3nNS6iFOSeL7FH32sqQXR+tZZIcVP6CdShIkVJ9qIxATwQjoilfcg04FdSKX1/x0YhdAbYoZ+dMSWI6stynWhpkToAQzwU8H1nrJqW2ikhGBeZLWCHt81dPZE4YkMHlfNpq44PB0nIrVzIl0og8mcREJRPaALdHWeQxLZioQJTZJFiXRsyUQBMp/p/EWbdBijqiExjgSnNgn/CySDza9OCmtEqBEhJ2tEwlHr20tiuCeLred9LUcNNM1xDKoNN8itps3VHqpPZFJHm1FJtFN2lInU6jg6tB885slMEWojF29/H3zfYB+kxhDpFsLXX1+B0J+PxslAmrJ+Nnq836bib1NxAUIzSumUNtwXPdyvd1M8oJDOUH/wyMS5hK9nSlzfwAvG+ZCsnV0wquPMrTaV0pW/zh3ll+7H9nHQuJHmXVjiAKoUjFOPnsekL4//f5503E181xBpsIRK6XnAYgGHbl4Mo7kxjOXHL79cHA1WHrVmEPMmPA4vXheKLaqE4NOnv30TZOz/BhmH1Lxa7AB9ysA3yP2AOYE+1PJfMwhlySiUAUENhUyzje6rvhcxagJrUHGhjY0xTQ1xeFOYnzjh0VvMHi+BzGu+6FHuH55yUKETGe6eG0yrsYxEGLVrgvA3Uya47DcKC850P52qSO+1loR92Bna/8rIZmTAZtSosHdCpiAY8CKvehamFNoCt5uU3p2E0mYhQS9Hgpvt6Klga/RVO/pw+McqLq1GKSTagzb5zQC3EqkOk+9SNoz1NoZSgnVNG6o8QdsIyzGlN0BdKrXCYZKM12q3WC7Ryhot4zEMmHUv9POQWKEaLKJr1C25xHecLk1/ZRsv8xcs4tshGzipMWTk27V+Cm0BL9AZykM8X1+/3QTlaR3M4m4bTNl6LqFy+Z6MT6kSKUhZaImAHFMCq8sQV+UIEXzQlsBmaGFv9Bcmw5CgAiaVQu6kFYUkYOH/RUXacQfR6uZ8vvVxiQzITL5ysaBVQm/KFMRmdC7hDT06JiC2uJeCM0rbsFHQbnWPYnGAm7HUPyu4ppq6qsxY2FqcdWYPD43Bc7x3hwMZvhGLnqtEei3MsGm9eOhUo68LwglKSkeW3Iz/AL71csDLqQ71QCiouvWUaJX2hQbohbtmbYbhPWuW2vcZQZnD+pVYFu1+zdELYcGgtAWRSqGOnvjL+8Lok0h9Jlqt1DKoj4EYp5RQx2jAYS69G7RSr/DMAgMAfOOqJP5igRbxDBn2RKpmASx81SXRbyOE7AzlpOxQqUsV2m28/izUPYeriU9NuO34v9N54SylbXFwQukIDBWGmJT10+4yQ2s3WF1pVeq8TLXerpMRDlfLTSup2aY98shePT/ZyyU0shPOpEq+eEC7g7HBC9zxfbmASwVQSp2gd43VTUDzqz+M3k7Ffzb2f/W8PQPYn5dTHr1z9L8j/h6fI9AyBS4woZJR1/4fdErRD9wo9WaBI0aD4ow6C+h+KrH9amwZeTSc9W5HbJUUAXys07myw3ZVctSp/QM3NbYy1EuK21wNqfLXL2SoJZrGi/n+TZWtmFULXQ34jXiudW1jnf8+1hDVLdKVzupdGFljjb9Ul1cuKlUBZDoba4WO7gWZNQTe47PIXX4FgeC9lLiqmdY549s2ErwrkV7zoA+0xCXVMU4VlGPWyWeaWEhCWTqSmTNRf242mdcIeu9EKZV3I1LlUYto/OjIURoHl9UGTCr44aodPeC9uqf3XYlQ+7/15vr23f3bP+Ei/BfbKPPEwpTrJkNUOXox+au6HPUs6mfHm3U7ynkTab/PSlne+9ve+zV6KYzgq9Mf6iTsd2Gz9jhJOlE/pm7HqUSHr05/vIoTuzzHwTFsxcrSc3+jX2MmoQAhczmqR0OY4l5STXGSYaINRd07kfcxYDcqJe2oNNJCGmd4F3NOErzJvIwnSv+x/8GnoXey7ndeBNQyrrZuh+BdjDvJ72rjhvPL1bXG+wpuGA1h0QXABYfosEFO/66S0hI4flINgWvro5TGuiELODU1mvejKFfPkMeytsp0pQbzfaRFfCFQG7ULtWmLwoLK7Z5MO+T+GwAA///VGcLz" } diff --git a/metricbeat/module/kibana/kibana.go b/metricbeat/module/kibana/kibana.go index 499f5c5ede57..48bc5ca391af 100644 --- a/metricbeat/module/kibana/kibana.go +++ b/metricbeat/module/kibana/kibana.go @@ -43,12 +43,10 @@ const ( NodeRulesPath = "api/monitoring_collection/node_rules" ClusterActionsPath = "api/monitoring_collection/cluster_actions" NodeActionsPath = "api/monitoring_collection/node_actions" - SettingsPath = "api/settings" ) var ( v6_4_0 = version.MustNew("6.4.0") - v6_5_0 = version.MustNew("6.5.0") v6_7_2 = version.MustNew("6.7.2") v7_0_0 = version.MustNew("7.0.0") v7_0_1 = version.MustNew("7.0.1") @@ -57,9 +55,6 @@ var ( // StatsAPIAvailableVersion is the version of Kibana since when the stats API is available StatsAPIAvailableVersion = v6_4_0 - // SettingsAPIAvailableVersion is the version of Kibana since when the settings API is available - SettingsAPIAvailableVersion = v6_5_0 - // Version of Kibana since when the rules and task manager APIs are available RulesAPIAvailableVersion = v8_2_0 ActionsAPIAvailableVersion = v8_2_0 @@ -121,11 +116,6 @@ func IsStatsAPIAvailable(currentKibanaVersion *version.V) bool { return elastic.IsFeatureAvailable(currentKibanaVersion, StatsAPIAvailableVersion) } -// IsSettingsAPIAvailable returns whether the settings API is available in the given version of Kibana -func IsSettingsAPIAvailable(currentKibanaVersion *version.V) bool { - return elastic.IsFeatureAvailable(currentKibanaVersion, SettingsAPIAvailableVersion) -} - // IsRulesAPIAvailable returns whether the rules API is available in the given version of Kibana func IsRulesAPIAvailable(currentKibanaVersion *version.V) bool { return elastic.IsFeatureAvailable(currentKibanaVersion, RulesAPIAvailableVersion) diff --git a/metricbeat/module/kibana/settings/_meta/data.json b/metricbeat/module/kibana/settings/_meta/data.json deleted file mode 100644 index 892115643069..000000000000 --- a/metricbeat/module/kibana/settings/_meta/data.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "@timestamp": "2017-10-12T08:05:34.853Z", - "event": { - "dataset": "kibana.settings", - "duration": 115000, - "module": "kibana" - }, - "kibana": { - "elasticsearch": { - "cluster": { - "id": "WocBBA0QRma0sGpdQ7vLfQ" - } - }, - "settings": { - "host": "0.0.0.0", - "index": ".kibana", - "locale": "en", - "name": "b04775fa6831", - "port": 5601, - "snapshot": false, - "status": "green", - "transport_address": "0.0.0.0:5601", - "uuid": "f87393a7-e8ae-48fd-af1e-91f229fd93fd", - "version": "7.14.0" - } - }, - "metricset": { - "name": "settings", - "period": 10000 - }, - "service": { - "address": "172.19.0.3:5601", - "type": "kibana" - } -} \ No newline at end of file diff --git a/metricbeat/module/kibana/settings/_meta/docs.asciidoc b/metricbeat/module/kibana/settings/_meta/docs.asciidoc deleted file mode 100644 index 93ba8cf495ac..000000000000 --- a/metricbeat/module/kibana/settings/_meta/docs.asciidoc +++ /dev/null @@ -1,8 +0,0 @@ -This is the `settings` metricset of the Kibana module. This stats endpoint is available in 6.4 by default. - -The intention of the Kibana module is to have a minimal data set that works across Kibana versions. - -[float] -=== Module-specific configuration notes - -If the Kibana instance is using a basepath in its URL, you must set the `basepath` setting for this module with the same value. diff --git a/metricbeat/module/kibana/settings/_meta/fields.yml b/metricbeat/module/kibana/settings/_meta/fields.yml deleted file mode 100644 index f602bdc6a73e..000000000000 --- a/metricbeat/module/kibana/settings/_meta/fields.yml +++ /dev/null @@ -1,34 +0,0 @@ -- name: settings - type: group - description: > - Kibana stats and run-time metrics. - release: ga - fields: - - name: uuid - type: keyword - description: Kibana instance UUID - - name: name - type: keyword - description: Kibana instance name - - name: index - type: keyword - description: Name of Kibana's internal index - - name: host - type: keyword - description: Kibana instance hostname - - name: transport_address - type: keyword - description: Kibana server's hostname and port - - name: version - type: keyword - description: Kibana version - - name: snapshot - type: boolean - description: Whether the Kibana build is a snapshot build - - name: status - type: keyword - description: Kibana instance's health status - - name: locale - type: keyword - - name: port - type: integer diff --git a/metricbeat/module/kibana/settings/_meta/test/settings.700.json b/metricbeat/module/kibana/settings/_meta/test/settings.700.json deleted file mode 100644 index 845ae9f6a155..000000000000 --- a/metricbeat/module/kibana/settings/_meta/test/settings.700.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "cluster_uuid":"u5ii0pnQRka_P0gimfmthg", - "settings":{ - "xpack":{ - "default_admin_email":"jane@doe.com" - }, - "kibana":{ - "uuid":"5b2de169-2785-441b-ae8c-186a1936b17d", - "name":"Janes-MBP-2", - "index":".kibana", - "host":"localhost", - "transport_address":"localhost:5601", - "version":"7.0.0-alpha1", - "snapshot":false, - "status":"green" - } - } - } diff --git a/metricbeat/module/kibana/settings/_meta/test/stats-legacy.700.json b/metricbeat/module/kibana/settings/_meta/test/stats-legacy.700.json deleted file mode 100644 index cb7865256994..000000000000 --- a/metricbeat/module/kibana/settings/_meta/test/stats-legacy.700.json +++ /dev/null @@ -1,122 +0,0 @@ -{ - "kibana":{ - "uuid":"5b2de169-2785-441b-ae8c-186a1936b17d", - "name":"Shaunaks-MBP-2", - "index":".kibana", - "host":"localhost", - "transport_address":"localhost:5601", - "version":"7.0.0-alpha1", - "snapshot":false, - "status":"green" - }, - "last_updated":"2018-07-31T17:53:38.890Z", - "collection_interval_ms":5000, - "process":{ - "memory":{ - "heap":{ - "total_bytes":219418624, - "used_bytes":189963144, - "size_limit":1501560832 - }, - "resident_set_size_bytes":267689984 - }, - "event_loop_delay":0.4890279769897461, - "pid":23445, - "uptime_ms":749417 - }, - "os":{ - "load":{ - "1m":5.03515625, - "5m":3.56787109375, - "15m":3.45654296875 - }, - "memory":{ - "total_bytes":17179869184, - "free_bytes":32022528, - "used_bytes":17147846656 - }, - "uptime_ms":1115347000 - }, - "response_times":{ - "avg_ms":16, - "max_ms":19 - }, - "requests":{ - "total":2, - "disconnects":0, - "status_codes":{ - "200":1, - "404":1 - } - }, - "concurrent_connections":5, - "usage":{ - "index":".kibana", - "dashboard":{ - "total":0 - }, - "visualization":{ - "total":0 - }, - "search":{ - "total":0 - }, - "index_pattern":{ - "total":0 - }, - "graph_workspace":{ - "total":0 - }, - "timelion_sheet":{ - "total":0 - }, - "xpack":{ - "reporting":{ - "available":true, - "enabled":true, - "browser_type":"phantom", - "_all":0, - "csv":{ - "available":true, - "total":0 - }, - "printable_pdf":{ - "available":false, - "total":0 - }, - "status":{ - - }, - "lastDay":{ - "_all":0, - "csv":{ - "available":true, - "total":0 - }, - "printable_pdf":{ - "available":false, - "total":0 - }, - "status":{ - - } - }, - "last7Days":{ - "_all":0, - "csv":{ - "available":true, - "total":0 - }, - "printable_pdf":{ - "available":false, - "total":0 - }, - "status":{ - - } - } - } - } - }, - "clusterUuid":"cCe7_34NRpuCug1ZX1l3ug" - } diff --git a/metricbeat/module/kibana/settings/_meta/test/stats.700.json b/metricbeat/module/kibana/settings/_meta/test/stats.700.json deleted file mode 100644 index 0f74c8b8dbd9..000000000000 --- a/metricbeat/module/kibana/settings/_meta/test/stats.700.json +++ /dev/null @@ -1,144 +0,0 @@ -{ - "kibana":{ - "uuid":"5b2de169-2785-441b-ae8c-186a1936b17d", - "name":"Shaunaks-MBP-2", - "index":".kibana", - "host":"localhost", - "transport_address":"localhost:5601", - "version":"7.0.0-alpha1", - "snapshot":false, - "status":"green" - }, - "last_updated":"2018-07-18T00:32:00.948Z", - "collection_interval_ms":5000, - "process":{ - "memory":{ - "heap":{ - "total_bytes":223391744, - "used_bytes":198413592, - "size_limit":1501560832 - }, - "resident_set_size_bytes":347242496 - }, - "event_loop_delay":0.25226891040802, - "pid":46426, - "uptime_ms":1753889 - }, - "os":{ - "load":{ - "1m":3.50634765625, - "5m":3.76904296875, - "15m":3.54833984375 - }, - "memory":{ - "total_bytes":17179869184, - "free_bytes":31711232, - "used_bytes":17148157952 - }, - "uptime_ms":2187246000 - }, - "response_times":{ - "max_ms":0 - }, - "requests":{ - "total":0, - "disconnects":0, - "status_codes":{ - - } - }, - "concurrent_connections":3, - "usage":{ - "kibana":{ - "index":".kibana", - "dashboard":{ - "total":0 - }, - "visualization":{ - "total":0 - }, - "search":{ - "total":0 - }, - "index_pattern":{ - "total":0 - }, - "graph_workspace":{ - "total":0 - }, - "timelion_sheet":{ - "total":0 - } - }, - "reporting":{ - "available":true, - "enabled":true, - "browser_type":"phantom", - "all":0, - "csv":{ - "available":true, - "total":0 - }, - "printable_pdf":{ - "available":true, - "total":0, - "app":{ - "visualization":0, - "dashboard":0 - }, - "layout":{ - "print":0, - "preserve_layout":0 - } - }, - "status":{ - - }, - "last_day":{ - "all":0, - "csv":{ - "available":true, - "total":0 - }, - "printable_pdf":{ - "available":true, - "total":0, - "app":{ - "visualization":0, - "dashboard":0 - }, - "layout":{ - "print":0, - "preserve_layout":0 - } - }, - "status":{ - - } - }, - "last_7_days":{ - "all":0, - "csv":{ - "available":true, - "total":0 - }, - "printable_pdf":{ - "available":true, - "total":0, - "app":{ - "visualization":0, - "dashboard":0 - }, - "layout":{ - "print":0, - "preserve_layout":0 - } - }, - "status":{ - - } - } - } - }, - "cluster_uuid":"NkfU5AinRnyFnqBD36zhEw" - } diff --git a/metricbeat/module/kibana/settings/data.go b/metricbeat/module/kibana/settings/data.go deleted file mode 100644 index fc47e8a4839b..000000000000 --- a/metricbeat/module/kibana/settings/data.go +++ /dev/null @@ -1,70 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package settings - -import ( - "encoding/json" - "fmt" - - "github.com/elastic/beats/v7/metricbeat/helper/elastic" - "github.com/elastic/elastic-agent-libs/mapstr" - - s "github.com/elastic/beats/v7/libbeat/common/schema" - c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" - "github.com/elastic/beats/v7/metricbeat/mb" -) - -func eventMapping(r mb.ReporterV2, content []byte) error { - var data map[string]interface{} - err := json.Unmarshal(content, &data) - if err != nil { - return fmt.Errorf("failure parsing Kibana API response: %w", err) - } - - schema := s.Schema{ - "elasticsearch": s.Object{ - "cluster": s.Object{ - "id": c.Str("cluster_uuid"), - }, - }, - "settings": c.Ifc("settings.kibana"), - } - - res, err := schema.Apply(data) - if err != nil { - return err - } - - event := mb.Event{ - ModuleFields: res, - MetricSetFields: nil, - RootFields: make(mapstr.M), - } - - // Set service address - serviceAddress, err := res.GetValue("settings.transport_address") - if err != nil { - event.Error = elastic.MakeErrorForMissingField("kibana.transport_address", elastic.Kibana) - return event.Error - } - event.RootFields.Put("service.address", serviceAddress) - - r.Event(event) - - return nil -} diff --git a/metricbeat/module/kibana/settings/settings.go b/metricbeat/module/kibana/settings/settings.go deleted file mode 100644 index 2d8b9f943ea7..000000000000 --- a/metricbeat/module/kibana/settings/settings.go +++ /dev/null @@ -1,103 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package settings - -import ( - "fmt" - - "github.com/elastic/beats/v7/libbeat/common/productorigin" - "github.com/elastic/beats/v7/metricbeat/helper" - "github.com/elastic/beats/v7/metricbeat/mb" - "github.com/elastic/beats/v7/metricbeat/mb/parse" - "github.com/elastic/beats/v7/metricbeat/module/kibana" -) - -// init registers the MetricSet with the central registry. -// The New method will be called after the setup of the module and before starting to fetch data -func init() { - mb.Registry.MustAddMetricSet(kibana.ModuleName, "settings", New, - mb.WithHostParser(hostParser), - ) -} - -var ( - hostParser = parse.URLHostParserBuilder{ - DefaultScheme: "http", - DefaultPath: kibana.SettingsPath, - QueryParams: "extended=true", // make Kibana fetch the cluster_uuid - }.Build() -) - -// MetricSet type defines all fields of the MetricSet -type MetricSet struct { - *kibana.MetricSet - mb.BaseMetricSet - settingsHTTP *helper.HTTP -} - -// New create a new instance of the MetricSet -func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - ms, err := kibana.NewMetricSet(base) - if err != nil { - return nil, err - } - return &MetricSet{ - MetricSet: ms, - BaseMetricSet: base, - }, nil -} - -// Fetch methods implements the data gathering and data conversion to the right format -// It returns the event which is then forward to the output. In case of an error, a -// descriptive error must be returned. -func (m *MetricSet) Fetch(r mb.ReporterV2) (err error) { - if err = m.init(); err != nil { - return err - } - - content, err := m.settingsHTTP.FetchContent() - if err != nil { - return err - } - - return eventMapping(r, content) -} - -func (m *MetricSet) init() (err error) { - httpHelper, err := helper.NewHTTP(m.BaseMetricSet) - if err != nil { - return err - } - - httpHelper.SetHeaderDefault(productorigin.Header, productorigin.Beats) - - kibanaVersion, err := kibana.GetVersion(httpHelper, kibana.SettingsPath, m.ApiKey) - if err != nil { - return err - } - - isSettingsAPIAvailable := kibana.IsSettingsAPIAvailable(kibanaVersion) - if !isSettingsAPIAvailable { - const errorMsg = "the %v metricset is only supported with Kibana >= %v. You are currently running Kibana %v" - return fmt.Errorf(errorMsg, m.FullyQualifiedName(), kibana.SettingsAPIAvailableVersion, kibanaVersion) - } - - m.settingsHTTP, err = helper.NewHTTP(m.BaseMetricSet) - - return err -} diff --git a/metricbeat/module/kibana/settings/settings_test.go b/metricbeat/module/kibana/settings/settings_test.go deleted file mode 100644 index a4e10fb99ea1..000000000000 --- a/metricbeat/module/kibana/settings/settings_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -//go:build !integration - -package settings - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/stretchr/testify/require" - - mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" - "github.com/elastic/beats/v7/metricbeat/module/kibana/mtest" -) - -func TestFetchExcludeUsage(t *testing.T) { - // Spin up mock Kibana server - numStatsRequests := 0 - kib := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/api/status": - _, err := w.Write([]byte("{ \"version\": { \"number\": \"7.5.0\" }}")) - if err != nil { - t.Fatal("write", err) - } - - case "/api/stats": - excludeUsage := r.FormValue("exclude_usage") - - // Make GET /api/stats return 503 for first call, 200 for subsequent calls - switch numStatsRequests { - case 0: // first call - require.Equal(t, "true", excludeUsage) // exclude_usage is always true - w.WriteHeader(503) - - case 1: // second call - require.Equal(t, "true", excludeUsage) // exclude_usage is always true - w.WriteHeader(200) - - case 2: // third call - require.Equal(t, "true", excludeUsage) // exclude_usage is always true - w.WriteHeader(200) - } - - numStatsRequests++ - } - })) - defer kib.Close() - - config := mtest.GetConfig("settings", kib.URL) - - f := mbtest.NewReportingMetricSetV2Error(t, config) - - // First fetch - mbtest.ReportingFetchV2Error(f) - - // Second fetch - mbtest.ReportingFetchV2Error(f) - - // Third fetch - mbtest.ReportingFetchV2Error(f) -} From e532bbe8d6c1daa74ce837203dfbcc89aacd7f7e Mon Sep 17 00:00:00 2001 From: kruskall <99559985+kruskall@users.noreply.github.com> Date: Thu, 27 Feb 2025 21:22:56 +0100 Subject: [PATCH 37/41] feat(fips): return an error when validating kerberos cfg (#42887) * feat(fips): return an error when validating kerberos cfg setting kerberos config options should return an error * Update config.go * Update config_nofips.go * Update client_nofips.go --- .../transport/kerberos/client_nofips.go | 2 +- libbeat/common/transport/kerberos/config.go | 23 +--------- .../common/transport/kerberos/config_fips.go | 28 ++++++++++++ .../transport/kerberos/config_fips_test.go | 32 ++++++++++++++ .../transport/kerberos/config_nofips.go | 43 +++++++++++++++++++ .../transport/kerberos/config_nofips_test.go | 36 ++++++++++++++++ 6 files changed, 141 insertions(+), 23 deletions(-) create mode 100644 libbeat/common/transport/kerberos/config_fips.go create mode 100644 libbeat/common/transport/kerberos/config_fips_test.go create mode 100644 libbeat/common/transport/kerberos/config_nofips.go create mode 100644 libbeat/common/transport/kerberos/config_nofips_test.go diff --git a/libbeat/common/transport/kerberos/client_nofips.go b/libbeat/common/transport/kerberos/client_nofips.go index f734cb750164..798aa4579560 100644 --- a/libbeat/common/transport/kerberos/client_nofips.go +++ b/libbeat/common/transport/kerberos/client_nofips.go @@ -46,7 +46,7 @@ func NewClient(config *Config, httpClient *http.Client, esurl string) (Client, e case authPassword: krbClient = krbclient.NewWithPassword(config.Username, config.Realm, config.Password, krbConf) default: - return nil, InvalidAuthType + return nil, ErrInvalidAuthType } return spnego.NewClient(krbClient, httpClient, ""), nil diff --git a/libbeat/common/transport/kerberos/config.go b/libbeat/common/transport/kerberos/config.go index abea183f4d18..07ed3b68383a 100644 --- a/libbeat/common/transport/kerberos/config.go +++ b/libbeat/common/transport/kerberos/config.go @@ -33,7 +33,7 @@ const ( ) var ( - InvalidAuthType = errors.New("invalid authentication type") + ErrInvalidAuthType = errors.New("invalid authentication type") authTypes = map[string]AuthType{ authPasswordStr: authPassword, @@ -69,24 +69,3 @@ func (t *AuthType) Unpack(value string) error { return nil } - -func (c *Config) Validate() error { - switch c.AuthType { - case authPassword: - if c.Username == "" { - return fmt.Errorf("password authentication is selected for Kerberos, but username is not configured") - } - if c.Password == "" { - return fmt.Errorf("password authentication is selected for Kerberos, but password is not configured") - } - - case authKeytab: - if c.KeyTabPath == "" { - return fmt.Errorf("keytab authentication is selected for Kerberos, but path to keytab is not configured") - } - default: - return InvalidAuthType - } - - return nil -} diff --git a/libbeat/common/transport/kerberos/config_fips.go b/libbeat/common/transport/kerberos/config_fips.go new file mode 100644 index 000000000000..92cf2314630b --- /dev/null +++ b/libbeat/common/transport/kerberos/config_fips.go @@ -0,0 +1,28 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build requirefips + +package kerberos + +import ( + "errors" +) + +func (c *Config) Validate() error { + return errors.New("kerberos is not supported in fips mode") +} diff --git a/libbeat/common/transport/kerberos/config_fips_test.go b/libbeat/common/transport/kerberos/config_fips_test.go new file mode 100644 index 000000000000..d8f9bedd88fa --- /dev/null +++ b/libbeat/common/transport/kerberos/config_fips_test.go @@ -0,0 +1,32 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build requirefips + +package kerberos + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestConfigValidate(t *testing.T) { + cfg := &Config{} + err := cfg.Validate() + require.EqualError(t, err, "kerberos is not supported in fips mode") +} diff --git a/libbeat/common/transport/kerberos/config_nofips.go b/libbeat/common/transport/kerberos/config_nofips.go new file mode 100644 index 000000000000..161f4c4a7beb --- /dev/null +++ b/libbeat/common/transport/kerberos/config_nofips.go @@ -0,0 +1,43 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !requirefips + +package kerberos + +import "fmt" + +func (c *Config) Validate() error { + switch c.AuthType { + case authPassword: + if c.Username == "" { + return fmt.Errorf("password authentication is selected for Kerberos, but username is not configured") + } + if c.Password == "" { + return fmt.Errorf("password authentication is selected for Kerberos, but password is not configured") + } + + case authKeytab: + if c.KeyTabPath == "" { + return fmt.Errorf("keytab authentication is selected for Kerberos, but path to keytab is not configured") + } + default: + return ErrInvalidAuthType + } + + return nil +} diff --git a/libbeat/common/transport/kerberos/config_nofips_test.go b/libbeat/common/transport/kerberos/config_nofips_test.go new file mode 100644 index 000000000000..5262f4c5f735 --- /dev/null +++ b/libbeat/common/transport/kerberos/config_nofips_test.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build !requirefips + +package kerberos + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestConfigValidate(t *testing.T) { + cfg := &Config{ + AuthType: authPassword, + Username: "username", + Password: "password", + } + err := cfg.Validate() + require.NoError(t, err) +} From c0fdd97028aedb671202165d78f89c01c5d29e7d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 28 Feb 2025 11:51:48 +0200 Subject: [PATCH 38/41] build(deps): bump updatecli/updatecli-action in the github-actions group (#42850) Bumps the github-actions group with 1 update: [updatecli/updatecli-action](https://github.com/updatecli/updatecli-action). Updates `updatecli/updatecli-action` from 2.77.0 to 2.78.1 - [Release notes](https://github.com/updatecli/updatecli-action/releases) - [Commits](https://github.com/updatecli/updatecli-action/compare/79983ec58a76fe0c87fc76f5a5c7ef8df0bb36c4...d2e5d2667ba67a8599e636531baef731f54858bc) --- .github/workflows/bump-elastic-stack-snapshot.yml | 2 +- .github/workflows/bump-golang.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bump-elastic-stack-snapshot.yml b/.github/workflows/bump-elastic-stack-snapshot.yml index 7dd51d684db7..5b3196af63b8 100644 --- a/.github/workflows/bump-elastic-stack-snapshot.yml +++ b/.github/workflows/bump-elastic-stack-snapshot.yml @@ -32,7 +32,7 @@ jobs: - uses: actions/checkout@v4 - name: Install Updatecli in the runner - uses: updatecli/updatecli-action@79983ec58a76fe0c87fc76f5a5c7ef8df0bb36c4 # v2.77.0 + uses: updatecli/updatecli-action@d2e5d2667ba67a8599e636531baef731f54858bc # v2.78.1 - name: Run Updatecli in Apply mode run: updatecli --experimental apply --config .github/workflows/updatecli.d/bump-elastic-stack-snapshot.yml --values .github/workflows/updatecli.d/values.d/scm.yml diff --git a/.github/workflows/bump-golang.yml b/.github/workflows/bump-golang.yml index 47f34ede802f..68651773d9ad 100644 --- a/.github/workflows/bump-golang.yml +++ b/.github/workflows/bump-golang.yml @@ -23,7 +23,7 @@ jobs: - uses: actions/checkout@v4 - name: Install Updatecli in the runner - uses: updatecli/updatecli-action@79983ec58a76fe0c87fc76f5a5c7ef8df0bb36c4 # v2.77.0 + uses: updatecli/updatecli-action@d2e5d2667ba67a8599e636531baef731f54858bc # v2.78.1 - name: Run Updatecli in Apply mode run: updatecli --experimental apply --config .github/workflows/updatecli.d/${{ matrix.file }} --values .github/workflows/updatecli.d/values.d/scm.yml From 527716cc3f11389aaaf0a782a6d687afffaf992d Mon Sep 17 00:00:00 2001 From: Victor Martinez Date: Fri, 28 Feb 2025 16:17:35 +0100 Subject: [PATCH 39/41] github-actions: enable 9 branches for the pre-commit (#42605) --- .github/workflows/pre-commit.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 18b6e56de837..ce6acd91c193 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -7,6 +7,7 @@ on: - main - 7.1* - 8.* + - 9.* permissions: contents: read From e41d266fdd5d801473372ed1ed7d03afd65c8f83 Mon Sep 17 00:00:00 2001 From: "Mark J. Hoy" Date: Fri, 28 Feb 2025 10:50:13 -0500 Subject: [PATCH 40/41] [9.x] Fully remove Enterprise Search from Metricbeat (#42915) * fully remove Enterprise Search from Metricbeat 9.x * Also remove the `ent-search` monitoring index * test ensure enterprise search directory is removed * try full relative path; ensure removed pre-test * be explicit about ignoring enterprisesearch tests * remove explicit ignore; use git clean -fd * explicitly remove directory before git clean * ignore removed files * add note to Changelog next --------- Co-authored-by: Valentin Crettaz --- CHANGELOG.next.asciidoc | 1 + dev-tools/mage/module_changeset.go | 6 + metricbeat/docs/fields.asciidoc | 1209 ---------- .../docs/modules/enterprisesearch.asciidoc | 73 - .../modules/enterprisesearch/health.asciidoc | 24 - .../modules/enterprisesearch/stats.asciidoc | 24 - metricbeat/docs/modules_list.asciidoc | 4 - metricbeat/helper/elastic/elastic.go | 5 - x-pack/agentbeat/agentbeat.spec.yml | 5 - x-pack/metricbeat/include/list.go | 3 - x-pack/metricbeat/metricbeat.reference.yml | 77 +- .../module/enterprisesearch/_meta/Dockerfile | 12 - .../enterprisesearch/_meta/config-xpack.yml | 8 - .../module/enterprisesearch/_meta/config.yml | 7 - .../_meta/docker-entrypoint-dependencies.sh | 14 - .../enterprisesearch/_meta/docs.asciidoc | 20 - .../module/enterprisesearch/_meta/fields.yml | 15 - ...Metricbeat-enterprise-search-overview.json | 1954 ----------------- .../metricbeat/module/enterprisesearch/doc.go | 6 - .../enterprisesearch/docker-compose.yml | 51 - .../module/enterprisesearch/fields.go | 23 - .../health/_meta/docs.asciidoc | 3 - .../enterprisesearch/health/_meta/fields.yml | 147 -- .../health/_meta/testdata/config.yml | 2 - .../health/_meta/testdata/health.json | 76 - .../_meta/testdata/health.json-expected.json | 83 - .../module/enterprisesearch/health/data.go | 135 -- .../enterprisesearch/health/data_test.go | 21 - .../module/enterprisesearch/health/health.go | 83 - .../health/health_integration_test.go | 53 - .../module/enterprisesearch/module.yml | 3 - .../stats/_meta/docs.asciidoc | 3 - .../enterprisesearch/stats/_meta/fields.yml | 407 ---- .../stats/_meta/testdata/config.yml | 2 - .../stats/_meta/testdata/stats.json | 154 -- .../_meta/testdata/stats.json-expected.json | 154 -- .../module/enterprisesearch/stats/data.go | 205 -- .../enterprisesearch/stats/data_test.go | 21 - .../module/enterprisesearch/stats/stats.go | 83 - .../stats/stats_integration_test.go | 53 - .../enterprisesearch/test_enterprisesearch.py | 81 - .../enterprisesearch-xpack.yml.disabled | 11 - .../modules.d/enterprisesearch.yml.disabled | 10 - 43 files changed, 41 insertions(+), 5290 deletions(-) delete mode 100644 metricbeat/docs/modules/enterprisesearch.asciidoc delete mode 100644 metricbeat/docs/modules/enterprisesearch/health.asciidoc delete mode 100644 metricbeat/docs/modules/enterprisesearch/stats.asciidoc delete mode 100644 x-pack/metricbeat/module/enterprisesearch/_meta/Dockerfile delete mode 100644 x-pack/metricbeat/module/enterprisesearch/_meta/config-xpack.yml delete mode 100644 x-pack/metricbeat/module/enterprisesearch/_meta/config.yml delete mode 100755 x-pack/metricbeat/module/enterprisesearch/_meta/docker-entrypoint-dependencies.sh delete mode 100644 x-pack/metricbeat/module/enterprisesearch/_meta/docs.asciidoc delete mode 100644 x-pack/metricbeat/module/enterprisesearch/_meta/fields.yml delete mode 100644 x-pack/metricbeat/module/enterprisesearch/_meta/kibana/7/dashboard/Metricbeat-enterprise-search-overview.json delete mode 100644 x-pack/metricbeat/module/enterprisesearch/doc.go delete mode 100644 x-pack/metricbeat/module/enterprisesearch/docker-compose.yml delete mode 100644 x-pack/metricbeat/module/enterprisesearch/fields.go delete mode 100644 x-pack/metricbeat/module/enterprisesearch/health/_meta/docs.asciidoc delete mode 100644 x-pack/metricbeat/module/enterprisesearch/health/_meta/fields.yml delete mode 100644 x-pack/metricbeat/module/enterprisesearch/health/_meta/testdata/config.yml delete mode 100644 x-pack/metricbeat/module/enterprisesearch/health/_meta/testdata/health.json delete mode 100644 x-pack/metricbeat/module/enterprisesearch/health/_meta/testdata/health.json-expected.json delete mode 100644 x-pack/metricbeat/module/enterprisesearch/health/data.go delete mode 100644 x-pack/metricbeat/module/enterprisesearch/health/data_test.go delete mode 100644 x-pack/metricbeat/module/enterprisesearch/health/health.go delete mode 100644 x-pack/metricbeat/module/enterprisesearch/health/health_integration_test.go delete mode 100644 x-pack/metricbeat/module/enterprisesearch/module.yml delete mode 100644 x-pack/metricbeat/module/enterprisesearch/stats/_meta/docs.asciidoc delete mode 100644 x-pack/metricbeat/module/enterprisesearch/stats/_meta/fields.yml delete mode 100644 x-pack/metricbeat/module/enterprisesearch/stats/_meta/testdata/config.yml delete mode 100644 x-pack/metricbeat/module/enterprisesearch/stats/_meta/testdata/stats.json delete mode 100644 x-pack/metricbeat/module/enterprisesearch/stats/_meta/testdata/stats.json-expected.json delete mode 100644 x-pack/metricbeat/module/enterprisesearch/stats/data.go delete mode 100644 x-pack/metricbeat/module/enterprisesearch/stats/data_test.go delete mode 100644 x-pack/metricbeat/module/enterprisesearch/stats/stats.go delete mode 100644 x-pack/metricbeat/module/enterprisesearch/stats/stats_integration_test.go delete mode 100644 x-pack/metricbeat/module/enterprisesearch/test_enterprisesearch.py delete mode 100644 x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled delete mode 100644 x-pack/metricbeat/modules.d/enterprisesearch.yml.disabled diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 42cf55b071f5..8234289871a3 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -82,6 +82,7 @@ otherwise no tag is added. {issue}42208[42208] {pull}42403[42403] - Add support for specifying a custom endpoint for GCP service clients. {issue}40848[40848] {pull}40918[40918] - Fix incorrect handling of types in SQL module. {issue}40090[40090] {pull}41607[41607] - Remove kibana.settings metricset since the API was removed in 8.0 {issue}30592[30592] {pull}42937[42937] +- Removed support for the Enterprise Search module {pull}42915[42915] *Osquerybeat* diff --git a/dev-tools/mage/module_changeset.go b/dev-tools/mage/module_changeset.go index c7ffdbfd9d93..a04a68ddd836 100644 --- a/dev-tools/mage/module_changeset.go +++ b/dev-tools/mage/module_changeset.go @@ -91,6 +91,12 @@ func shouldIgnore(file string) bool { return true } } + + // if the file has been removed, we should ignore it + if _, err := os.Stat(file); os.IsNotExist(err) { + return true + } + return false } diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index 3031dd08ba9c..5c965f89db31 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -39,7 +39,6 @@ grouped in the following categories: * <> * <> * <> -* <> * <> * <> * <> @@ -35103,1214 +35102,6 @@ type: keyword -- -[[exported-fields-enterprisesearch]] -== Enterprise Search fields - -Enterprise Search module - - - -[float] -=== enterprisesearch - -`enterprisesearch` contains metrics and health information for Enterprise Search - - - -*`enterprisesearch.cluster_uuid`*:: -+ --- -Cluster UUID for the Elasticsearch cluster used as the data store for Enterprise Search. - -type: keyword - --- - -[float] -=== health - -Enterprise Search health - - -*`enterprisesearch.health.name`*:: -+ --- -Host name for the Enterprise Search node - -type: keyword - --- - -[float] -=== version - -Enterprise Search version information - - -*`enterprisesearch.health.version.number`*:: -+ --- -Enterprise Search version number using the semantic versioning format - -type: keyword - --- - -*`enterprisesearch.health.version.build_hash`*:: -+ --- -A unique build hash for the Enterprise Search package - -type: keyword - --- - -[float] -=== process - -Enterprise Search process information - - -*`enterprisesearch.health.process.pid`*:: -+ --- -Process ID for the Enterprise Search instance - -type: long - --- - -*`enterprisesearch.health.process.uptime.sec`*:: -+ --- -Process uptime for the Enterprise Search instance - -type: long - --- - -[float] -=== filebeat - -Health information for the embedded Filebeat instance - - -*`enterprisesearch.health.process.filebeat.pid`*:: -+ --- -Process ID for the embedded Filebeat instance - -type: long - --- - -*`enterprisesearch.health.process.filebeat.restart_count`*:: -+ --- -Number of times embedded Filebeat instance had to be restarted due to some issues - -type: long - --- - -*`enterprisesearch.health.process.filebeat.time_since_last_restart.sec`*:: -+ --- -Time since the last embedded Filebeat instance restart (-1 if never restarted) - -type: long - --- - -[float] -=== jvm - -JVM health - - -*`enterprisesearch.health.jvm.version`*:: -+ --- -JVM version used to run Enterprise Search - -type: keyword - --- - -[float] -=== gc - -Java garbage collection metrics - - -*`enterprisesearch.health.jvm.gc.collection_count`*:: -+ --- -Total number of Java garbage collector invocations since the start of the process - -type: long - --- - -*`enterprisesearch.health.jvm.gc.collection_time.ms`*:: -+ --- -Total time spent running Java garbage collector since the start of the process - -type: long - --- - -[float] -=== memory_usage - -Memory usage - - -*`enterprisesearch.health.jvm.memory_usage.heap_init.bytes`*:: -+ --- -Heap init used by the JVM in bytes. - -type: long - -format: bytes - --- - -*`enterprisesearch.health.jvm.memory_usage.heap_used.bytes`*:: -+ --- -Heap used by the JVM in bytes. - -type: long - -format: bytes - --- - -*`enterprisesearch.health.jvm.memory_usage.heap_committed.bytes`*:: -+ --- -Committed heap to the JVM in bytes. - -type: long - -format: bytes - --- - -*`enterprisesearch.health.jvm.memory_usage.heap_max.bytes`*:: -+ --- -Max heap used by the JVM in bytes - -type: long - -format: bytes - --- - -*`enterprisesearch.health.jvm.memory_usage.non_heap_init.bytes`*:: -+ --- -Non-Heap initial memory used by the JVM in bytes. - -type: long - -format: bytes - --- - -*`enterprisesearch.health.jvm.memory_usage.non_heap_committed.bytes`*:: -+ --- -Non-Heap committed memory used by the JVM in bytes. - -type: long - -format: bytes - --- - -*`enterprisesearch.health.jvm.memory_usage.object_pending_finalization_count`*:: -+ --- -Displays the approximate number of objects for which finalization is pending. - - -type: long - --- - -[float] -=== threads - -Threads information - - -*`enterprisesearch.health.jvm.threads.current`*:: -+ --- -Current number of live threads. - -type: long - --- - -*`enterprisesearch.health.jvm.threads.daemon`*:: -+ --- -Current number of live daemon threads. - -type: long - --- - -*`enterprisesearch.health.jvm.threads.max`*:: -+ --- -Peak live thread count since the JVM started or the peak was reset. - -type: long - --- - -*`enterprisesearch.health.jvm.threads.total_started`*:: -+ --- -Total number of threads created and/or started since the JVM started. - -type: long - --- - -[float] -=== crawler - -Crawler health - - -[float] -=== workers - -Crawler workers - - -*`enterprisesearch.health.crawler.workers.pool_size`*:: -+ --- -Workers pool size. - -type: long - --- - -*`enterprisesearch.health.crawler.workers.active`*:: -+ --- -Number of active workers. - -type: long - --- - -*`enterprisesearch.health.crawler.workers.available`*:: -+ --- -Number of available workers. - -type: long - --- - -[float] -=== stats - -Enterprise Search stats. - - -[float] -=== connectors - -Workplace Search connectors subsystem stats. - - -[float] -=== job_store - -Workplace Search connectors job store stats. - - -*`enterprisesearch.stats.connectors.job_store.waiting`*:: -+ --- -Number of connectors jobs waiting to be processed. - -type: long - --- - -*`enterprisesearch.stats.connectors.job_store.working`*:: -+ --- -Number of connectors jobs currently being processed. - -type: long - --- - -[float] -=== job_types - -Breakdown of connectors jobs by types. - - -*`enterprisesearch.stats.connectors.job_store.job_types.delete`*:: -+ --- -Number of delete jobs in the jobs store. - -type: long - --- - -*`enterprisesearch.stats.connectors.job_store.job_types.full`*:: -+ --- -Number of full sync jobs in the jobs store. - -type: long - --- - -*`enterprisesearch.stats.connectors.job_store.job_types.incremental`*:: -+ --- -Number of incremental sync jobs in the jobs store. - -type: long - --- - -*`enterprisesearch.stats.connectors.job_store.job_types.permissions`*:: -+ --- -Number of permissions sync jobs in the jobs store. - -type: long - --- - -[float] -=== pool - -Workplace Search worker pools stats. - - -[float] -=== extract_worker_pool - -Status information for the extractor workers pool. - - -*`enterprisesearch.stats.connectors.pool.extract_worker_pool.size`*:: -+ --- -Worker pool size. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.extract_worker_pool.busy`*:: -+ --- -Number of busy workers. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.extract_worker_pool.queue_depth`*:: -+ --- -Number of items waiting to be processed. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.extract_worker_pool.idle`*:: -+ --- -Number of idle workers. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.extract_worker_pool.total_completed`*:: -+ --- -Number of jobs completed since the start. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.extract_worker_pool.total_scheduled`*:: -+ --- -Number of jobs scheduled since the start. - -type: long - --- - -[float] -=== subextract_worker_pool - -Status information for the sub-extractor workers pool. - - -*`enterprisesearch.stats.connectors.pool.subextract_worker_pool.size`*:: -+ --- -Worker pool size. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.subextract_worker_pool.busy`*:: -+ --- -Number of busy workers. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.subextract_worker_pool.queue_depth`*:: -+ --- -Number of items waiting to be processed. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.subextract_worker_pool.idle`*:: -+ --- -Number of idle workers. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.subextract_worker_pool.total_completed`*:: -+ --- -Number of jobs completed since the start. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.subextract_worker_pool.total_scheduled`*:: -+ --- -Number of jobs scheduled since the start. - -type: long - --- - -[float] -=== publish_worker_pool - -Status information for the publish workers pool. - - -*`enterprisesearch.stats.connectors.pool.publish_worker_pool.size`*:: -+ --- -Worker pool size. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.publish_worker_pool.busy`*:: -+ --- -Number of busy workers. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.publish_worker_pool.queue_depth`*:: -+ --- -Number of items waiting to be processed. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.publish_worker_pool.idle`*:: -+ --- -Number of idle workers. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.publish_worker_pool.total_completed`*:: -+ --- -Number of jobs completed since the start. - -type: long - --- - -*`enterprisesearch.stats.connectors.pool.publish_worker_pool.total_scheduled`*:: -+ --- -Number of jobs scheduled since the start. - -type: long - --- - -[float] -=== http - -Incoming HTTP request metrics. - - -[float] -=== connections - -Incoming HTTP connection statistics. - - -*`enterprisesearch.stats.http.connections.current`*:: -+ --- -Current number of HTTP connections opened to the Enterprise Search instance. - -type: long - --- - -*`enterprisesearch.stats.http.connections.max`*:: -+ --- -Maximum number of concurrent HTTP connections open to the Enterprise Search instance since the start. - -type: long - --- - -*`enterprisesearch.stats.http.connections.total`*:: -+ --- -Total number of HTTP connections opened to the Enterprise Search instance since the start. - -type: long - --- - -[float] -=== network - -Network traffic metrics. - - -*`enterprisesearch.stats.http.network.received.bytes`*:: -+ --- -Total number of bytes received by the Enterprise Search instance since the start. - -type: long - -format: bytes - --- - -*`enterprisesearch.stats.http.network.sent.bytes`*:: -+ --- -Total number of bytes sent by the Enterprise Search instance since the start. - -type: long - -format: bytes - --- - -*`enterprisesearch.stats.http.network.received.bytes_per_sec`*:: -+ --- -Average number of bytes received by the Enterprise Search instance per second since the start. - -type: long - -format: bytes - --- - -*`enterprisesearch.stats.http.network.sent.bytes_per_sec`*:: -+ --- -Average number of bytes sent by the Enterprise Search instance per second since the start. - -type: long - -format: bytes - --- - -[float] -=== request_duration - -Aggregate HTTP request duration statistics. - - -*`enterprisesearch.stats.http.request_duration.max.ms`*:: -+ --- -Longest HTTP connection duration since the start of the instance. - -type: long - --- - -*`enterprisesearch.stats.http.request_duration.mean.ms`*:: -+ --- -Average HTTP connection duration since the start of the instance. - -type: long - --- - -*`enterprisesearch.stats.http.request_duration.std_dev.ms`*:: -+ --- -Standard deviation for HTTP connection duration values since the start of the instance. - -type: long - --- - -[float] -=== responses - -Aggregate HTTP response counts broken down by HTTP status type. - - -*`enterprisesearch.stats.http.responses.1xx`*:: -+ --- -Total number of HTTP requests finished with a 1xx response code since the start of the instance. - -type: long - --- - -*`enterprisesearch.stats.http.responses.2xx`*:: -+ --- -Total number of HTTP requests finished with a 2xx response code since the start of the instance. - -type: long - --- - -*`enterprisesearch.stats.http.responses.3xx`*:: -+ --- -Total number of HTTP requests finished with a 3xx response code since the start of the instance. - -type: long - --- - -*`enterprisesearch.stats.http.responses.4xx`*:: -+ --- -Total number of HTTP requests finished with a 4xx response code since the start of the instance. - -type: long - --- - -*`enterprisesearch.stats.http.responses.5xx`*:: -+ --- -Total number of HTTP requests finished with a 5xx response code since the start of the instance. - -type: long - --- - -[float] -=== queues - -Aggregate stats on the functioning of the background jobs processing pipeline within Enterprise Search. - - -*`enterprisesearch.stats.queues.engine_destroyer.count`*:: -+ --- -Total number of jobs processed via the engine_destroyer queue since the start of the instance. - -type: long - --- - -*`enterprisesearch.stats.queues.mailer.count`*:: -+ --- -Total number of jobs processed via the mailer queue since the start of the instance. - -type: long - --- - -*`enterprisesearch.stats.queues.process_crawl.count`*:: -+ --- -Total number of jobs processed via the process_crawl queue since the start of the instance. - -type: long - --- - -*`enterprisesearch.stats.queues.failed.count`*:: -+ --- -Total number of jobs waiting in the failed queue. - -type: long - --- - -[float] -=== crawler - -Aggregate stats on the functioning of the crawler subsystem within Enterprise Search. - - -[float] -=== global - -Global deployment-wide metrics for the crawler. - - -[float] -=== crawl_requests - -Crawl request summary for the deployment. - - -*`enterprisesearch.stats.crawler.global.crawl_requests.pending`*:: -+ --- -Total number of crawl requests waiting to be processed. - -type: long - --- - -*`enterprisesearch.stats.crawler.global.crawl_requests.active`*:: -+ --- -Total number of crawl requests currently being processed (running crawls). - -type: long - --- - -*`enterprisesearch.stats.crawler.global.crawl_requests.successful`*:: -+ --- -Total number of crawl requests that have succeeded. - -type: long - --- - -*`enterprisesearch.stats.crawler.global.crawl_requests.failed`*:: -+ --- -Total number of failed crawl requests. - -type: long - --- - -[float] -=== node - -Node-level statistics for the crawler. - - -*`enterprisesearch.stats.crawler.node.pages_visited`*:: -+ --- -Total number of pages visited by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.urls_allowed`*:: -+ --- -Total number of URLs allowed by the crawler during discovery since the instance start. - -type: long - --- - -[float] -=== urls_denied - -Total number of URLs denied by the crawler during discovery since the instance start, broken down by deny reason. - - -*`enterprisesearch.stats.crawler.node.urls_denied.already_seen`*:: -+ --- -Total number of URLs not followed because of URL de-duplication (each URL is visited only once). - -type: long - --- - -*`enterprisesearch.stats.crawler.node.urls_denied.domain_filter_denied`*:: -+ --- -Total number of URLs denied because of an unknown domain. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.urls_denied.incorrect_protocol`*:: -+ --- -Total number of URLs with incorrect/invalid/unsupported protocols. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.urls_denied.link_too_deep`*:: -+ --- -Total number of URLs not followed due to crawl depth limits. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.urls_denied.nofollow`*:: -+ --- -Total number of URLs denied due to a nofollow meta tag or an HTML link attribute. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.urls_denied.unsupported_content_type`*:: -+ --- -Total number of URLs denied due to an unsupported content type. - -type: long - --- - -[float] -=== status_codes - -HTTP request result counts, by status code. - - -*`enterprisesearch.stats.crawler.node.status_codes.200`*:: -+ --- -Total number of HTTP 200 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.301`*:: -+ --- -Total number of HTTP 301 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.302`*:: -+ --- -Total number of HTTP 302 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.400`*:: -+ --- -Total number of HTTP 400 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.401`*:: -+ --- -Total number of HTTP 401 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.402`*:: -+ --- -Total number of HTTP 402 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.403`*:: -+ --- -Total number of HTTP 403 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.404`*:: -+ --- -Total number of HTTP 404 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.405`*:: -+ --- -Total number of HTTP 405 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.410`*:: -+ --- -Total number of HTTP 410 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.422`*:: -+ --- -Total number of HTTP 422 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.429`*:: -+ --- -Total number of HTTP 429 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.500`*:: -+ --- -Total number of HTTP 500 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.501`*:: -+ --- -Total number of HTTP 501 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.502`*:: -+ --- -Total number of HTTP 502 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.503`*:: -+ --- -Total number of HTTP 503 responses seen by the crawler since the instance start. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.status_codes.504`*:: -+ --- -Total number of HTTP 504 responses seen by the crawler since the instance start. - -type: long - --- - -[float] -=== queue_size - -Total current URL queue size for the instance. - - -*`enterprisesearch.stats.crawler.node.queue_size.primary`*:: -+ --- -Total number of URLs waiting to be crawled by the instance. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.queue_size.purge`*:: -+ --- -Total number of URLs waiting to be checked by the purge crawl phase. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.active_threads`*:: -+ --- -Total number of crawler worker threads currently active on the instance. - -type: long - --- - -[float] -=== workers - -Crawler workers information for the instance. - - -*`enterprisesearch.stats.crawler.node.workers.pool_size`*:: -+ --- -Total size of the crawl workers pool (number of concurrent crawls possible) for the instance. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.workers.active`*:: -+ --- -Total number of currently active crawl workers (running crawls) for the instance. - -type: long - --- - -*`enterprisesearch.stats.crawler.node.workers.available`*:: -+ --- -Total number of currently available (free) crawl workers for the instance. - -type: long - --- - -[float] -=== product_usage - -Aggregate product usage statistics for the Enterprise Search deployment. - - -[float] -=== app_search - -App Search product usage statistics. - - -*`enterprisesearch.stats.product_usage.app_search.total_engines`*:: -+ --- -Current number of App Search engines within the deployment. - -type: long - --- - -[float] -=== workplace_search - -Workplace Search product usage statistics. - - -*`enterprisesearch.stats.product_usage.workplace_search.total_org_sources`*:: -+ --- -Current number of Workplace Search org-wide content sources within the deployment. - -type: long - --- - -*`enterprisesearch.stats.product_usage.workplace_search.total_private_sources`*:: -+ --- -Current number of Workplace Search private content sources within the deployment. - -type: long - --- - [[exported-fields-envoyproxy]] == Envoyproxy fields diff --git a/metricbeat/docs/modules/enterprisesearch.asciidoc b/metricbeat/docs/modules/enterprisesearch.asciidoc deleted file mode 100644 index a9aea72ff575..000000000000 --- a/metricbeat/docs/modules/enterprisesearch.asciidoc +++ /dev/null @@ -1,73 +0,0 @@ -//// -This file is generated! See scripts/mage/docs_collector.go -//// - -:modulename: enterprisesearch -:edit_url: https://github.com/elastic/beats/edit/main/x-pack/metricbeat/module/enterprisesearch/_meta/docs.asciidoc - - -[[metricbeat-module-enterprisesearch]] -[role="xpack"] -== Enterprise Search module - -beta[] - -This module periodically fetches metrics and health information from Elastic Enterprise Search instances using HTTP APIs. - -[float] -=== Compatibility -The module has been tested with Enterprise Search versions 7.16.0 and higher. Versions below 7.16.0 are not expected to work due to a number of API-level incompatibilities. - -[float] -=== Usage -The Enterprise Search module requires a set of credentials (a username and a password) for an Elasticserch user for a user that has a `monitor` https://www.elastic.co/guide/en/elasticsearch/reference/current/security-privileges.html#privileges-list-cluster[cluster privilege]. - -[float] -=== Usage for {stack} Monitoring - -The Enterprise Search module can be used to collect metrics shown in our {stack-monitor-app} -UI in {kib}. To enable this usage, set `xpack.enabled: true` in configuration. - -NOTE: When this module is used for {stack} Monitoring, it sends metrics to the -monitoring index instead of the default index typically used by {metricbeat}. -For more details about the monitoring index, see -{ref}/config-monitoring-indices.html[Configuring indices for monitoring]. - - -:edit_url: - -[float] -=== Example configuration - -The Enterprise Search module supports the standard configuration options that are described -in <>. Here is an example configuration: - -[source,yaml] ----- -metricbeat.modules: -- module: enterprisesearch - metricsets: ["health", "stats"] - enabled: true - period: 10s - hosts: ["http://localhost:3002"] - #username: "user" - #password: "secret" ----- - -This module supports TLS connections when using `ssl` config field, as described in <>. -It also supports the options described in <>. - -[float] -=== Metricsets - -The following metricsets are available: - -* <> - -* <> - -include::enterprisesearch/health.asciidoc[] - -include::enterprisesearch/stats.asciidoc[] - -:edit_url!: diff --git a/metricbeat/docs/modules/enterprisesearch/health.asciidoc b/metricbeat/docs/modules/enterprisesearch/health.asciidoc deleted file mode 100644 index 171a9a52a64c..000000000000 --- a/metricbeat/docs/modules/enterprisesearch/health.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -//// -This file is generated! See scripts/mage/docs_collector.go -//// -:edit_url: https://github.com/elastic/beats/edit/main/x-pack/metricbeat/module/enterprisesearch/health/_meta/docs.asciidoc - - -[[metricbeat-metricset-enterprisesearch-health]] -[role="xpack"] -=== Enterprise Search health metricset - -beta[] - -include::../../../../x-pack/metricbeat/module/enterprisesearch/health/_meta/docs.asciidoc[] - -This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. - -:edit_url: - -==== Fields - -For a description of each field in the metricset, see the -<> section. - -:edit_url!: \ No newline at end of file diff --git a/metricbeat/docs/modules/enterprisesearch/stats.asciidoc b/metricbeat/docs/modules/enterprisesearch/stats.asciidoc deleted file mode 100644 index d585c80f1bee..000000000000 --- a/metricbeat/docs/modules/enterprisesearch/stats.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -//// -This file is generated! See scripts/mage/docs_collector.go -//// -:edit_url: https://github.com/elastic/beats/edit/main/x-pack/metricbeat/module/enterprisesearch/stats/_meta/docs.asciidoc - - -[[metricbeat-metricset-enterprisesearch-stats]] -[role="xpack"] -=== Enterprise Search stats metricset - -beta[] - -include::../../../../x-pack/metricbeat/module/enterprisesearch/stats/_meta/docs.asciidoc[] - -This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. - -:edit_url: - -==== Fields - -For a description of each field in the metricset, see the -<> section. - -:edit_url!: \ No newline at end of file diff --git a/metricbeat/docs/modules_list.asciidoc b/metricbeat/docs/modules_list.asciidoc index 89c634032ef0..ee8055b298c8 100644 --- a/metricbeat/docs/modules_list.asciidoc +++ b/metricbeat/docs/modules_list.asciidoc @@ -113,9 +113,6 @@ This file is generated! See scripts/mage/docs_collector.go |<> |<> |<> -|<> beta[] |image:./images/icon-yes.png[Prebuilt dashboards are available] | -.2+| .2+| |<> beta[] -|<> beta[] |<> |image:./images/icon-no.png[No prebuilt dashboards] | .1+| .1+| |<> |<> |image:./images/icon-no.png[No prebuilt dashboards] | @@ -360,7 +357,6 @@ include::modules/couchdb.asciidoc[] include::modules/docker.asciidoc[] include::modules/dropwizard.asciidoc[] include::modules/elasticsearch.asciidoc[] -include::modules/enterprisesearch.asciidoc[] include::modules/envoyproxy.asciidoc[] include::modules/etcd.asciidoc[] include::modules/gcp.asciidoc[] diff --git a/metricbeat/helper/elastic/elastic.go b/metricbeat/helper/elastic/elastic.go index a952497d9f83..4af3871356c4 100644 --- a/metricbeat/helper/elastic/elastic.go +++ b/metricbeat/helper/elastic/elastic.go @@ -47,9 +47,6 @@ const ( // Beats product Beats - - // Enterprise Search product - EnterpriseSearch ) func (p Product) xPackMonitoringIndexString() string { @@ -58,7 +55,6 @@ func (p Product) xPackMonitoringIndexString() string { "kibana", "logstash", "beats", - "ent-search", } if int(p) < 0 || int(p) > len(indexProductNames) { @@ -74,7 +70,6 @@ func (p Product) String() string { "kibana", "logstash", "beats", - "enterprisesearch", } if int(p) < 0 || int(p) > len(productNames) { diff --git a/x-pack/agentbeat/agentbeat.spec.yml b/x-pack/agentbeat/agentbeat.spec.yml index d553152d35b6..11b4ebbf185a 100644 --- a/x-pack/agentbeat/agentbeat.spec.yml +++ b/x-pack/agentbeat/agentbeat.spec.yml @@ -355,11 +355,6 @@ inputs: platforms: *platforms outputs: *outputs command: *metricbeat_command - - name: enterprisesearch/metrics - description: "Enterprise search metrics" - platforms: *platforms - outputs: *outputs - command: *metricbeat_command - name: kibana/metrics description: "Kibana metrics" platforms: *platforms diff --git a/x-pack/metricbeat/include/list.go b/x-pack/metricbeat/include/list.go index af3b1e9425d8..cac57f05aa71 100644 --- a/x-pack/metricbeat/include/list.go +++ b/x-pack/metricbeat/include/list.go @@ -34,9 +34,6 @@ import ( _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/containerd/memory" _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/coredns" _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/coredns/stats" - _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/enterprisesearch" - _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/enterprisesearch/health" - _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/enterprisesearch/stats" _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/gcp" _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/gcp/billing" _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/gcp/carbon" diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index dfb8246bc5ea..40aeb4742c17 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -576,15 +576,6 @@ metricbeat.modules: #xpack.enabled: false #scope: node -#-------------------------- Enterprise Search Module -------------------------- -- module: enterprisesearch - metricsets: ["health", "stats"] - enabled: true - period: 10s - hosts: ["http://localhost:3002"] - #username: "user" - #password: "secret" - #------------------------------ Envoyproxy Module ------------------------------ - module: envoyproxy metricsets: ["server"] @@ -1408,11 +1399,9 @@ metricbeat.modules: # Password to use when connecting to PostgreSQL. Empty by default. #password: pass -#------------------------------ Prometheus Module ------------------------------ -# Metrics collected from a Prometheus endpoint +#----------------------- Prometheus Typed Metrics Module ----------------------- - module: prometheus period: 10s - metricsets: ["collector"] hosts: ["localhost:9090"] metrics_path: /metrics #metrics_filters: @@ -1421,14 +1410,20 @@ metricbeat.modules: #username: "user" #password: "secret" - # Count number of metrics present in Elasticsearch document (default: false) - #metrics_count: false - # This can be used for service account based authorization: #bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token #ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + # Count number of metrics present in Elasticsearch document (default: false) + #metrics_count: false + + # Use Elasticsearch histogram type to store histograms (beta, default: false) + # This will change the default layout and put metric type in the field name + #use_types: true + + # Store counter rates instead of original cumulative counters (experimental, default: false) + #rate_counters: true # Metrics sent by a Prometheus server using remote_write option #- module: prometheus @@ -1436,13 +1431,25 @@ metricbeat.modules: # host: "localhost" # port: "9201" - # Count number of metrics present in Elasticsearch document (default: false) - #metrics_count: false - # Secure settings for the server using TLS/SSL: #ssl.certificate: "/etc/pki/server/cert.pem" #ssl.key: "/etc/pki/server/cert.key" + # Count number of metrics present in Elasticsearch document (default: false) + #metrics_count: false + + # Use Elasticsearch histogram type to store histograms (beta, default: false) + # This will change the default layout and put metric type in the field name + #use_types: true + + # Store counter rates instead of original cumulative counters (experimental, default: false) + #rate_counters: true + + # Define patterns for counter and histogram types so as to identify metrics' types according to these patterns + #types_patterns: + # counter_patterns: [] + # histogram_patterns: [] + # Metrics that will be collected using a PromQL #- module: prometheus # metricsets: ["query"] @@ -1469,9 +1476,11 @@ metricbeat.modules: # params: # query: "some_value" -#----------------------- Prometheus Typed Metrics Module ----------------------- +#------------------------------ Prometheus Module ------------------------------ +# Metrics collected from a Prometheus endpoint - module: prometheus period: 10s + metricsets: ["collector"] hosts: ["localhost:9090"] metrics_path: /metrics #metrics_filters: @@ -1480,20 +1489,14 @@ metricbeat.modules: #username: "user" #password: "secret" + # Count number of metrics present in Elasticsearch document (default: false) + #metrics_count: false + # This can be used for service account based authorization: #bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token #ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - # Count number of metrics present in Elasticsearch document (default: false) - #metrics_count: false - - # Use Elasticsearch histogram type to store histograms (beta, default: false) - # This will change the default layout and put metric type in the field name - #use_types: true - - # Store counter rates instead of original cumulative counters (experimental, default: false) - #rate_counters: true # Metrics sent by a Prometheus server using remote_write option #- module: prometheus @@ -1501,24 +1504,12 @@ metricbeat.modules: # host: "localhost" # port: "9201" - # Secure settings for the server using TLS/SSL: - #ssl.certificate: "/etc/pki/server/cert.pem" - #ssl.key: "/etc/pki/server/cert.key" - # Count number of metrics present in Elasticsearch document (default: false) #metrics_count: false - # Use Elasticsearch histogram type to store histograms (beta, default: false) - # This will change the default layout and put metric type in the field name - #use_types: true - - # Store counter rates instead of original cumulative counters (experimental, default: false) - #rate_counters: true - - # Define patterns for counter and histogram types so as to identify metrics' types according to these patterns - #types_patterns: - # counter_patterns: [] - # histogram_patterns: [] + # Secure settings for the server using TLS/SSL: + #ssl.certificate: "/etc/pki/server/cert.pem" + #ssl.key: "/etc/pki/server/cert.key" # Metrics that will be collected using a PromQL #- module: prometheus diff --git a/x-pack/metricbeat/module/enterprisesearch/_meta/Dockerfile b/x-pack/metricbeat/module/enterprisesearch/_meta/Dockerfile deleted file mode 100644 index 57546590cf63..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/_meta/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -ARG ENT_VERSION -FROM docker.elastic.co/enterprise-search/enterprise-search:${ENT_VERSION} - -COPY docker-entrypoint-dependencies.sh /usr/local/bin/ -# We need to explicitly specify tini here or Docker will use /bin/sh to run the script and -# on Debian-based images (which we use for ARM64 images) it runs dash, which does not -# support environment variables with dots and it leads to all config options being dropped -# See https://github.com/docker-library/openjdk/issues/135#issuecomment-318495067 -ENTRYPOINT ["tini", "--", "/usr/local/bin/docker-entrypoint-dependencies.sh"] - -HEALTHCHECK --interval=1s --retries=300 --start-period=60s \ - CMD curl --user elastic:changeme --fail --silent http://localhost:3002/api/ent/v1/internal/health diff --git a/x-pack/metricbeat/module/enterprisesearch/_meta/config-xpack.yml b/x-pack/metricbeat/module/enterprisesearch/_meta/config-xpack.yml deleted file mode 100644 index d80e6d349b6e..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/_meta/config-xpack.yml +++ /dev/null @@ -1,8 +0,0 @@ -- module: enterprisesearch - xpack.enabled: true - metricsets: ["health", "stats"] - enabled: true - period: 10s - hosts: ["http://localhost:3002"] - #username: "user" - #password: "secret" diff --git a/x-pack/metricbeat/module/enterprisesearch/_meta/config.yml b/x-pack/metricbeat/module/enterprisesearch/_meta/config.yml deleted file mode 100644 index e90fa79f9ff4..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/_meta/config.yml +++ /dev/null @@ -1,7 +0,0 @@ -- module: enterprisesearch - metricsets: ["health", "stats"] - enabled: true - period: 10s - hosts: ["http://localhost:3002"] - #username: "user" - #password: "secret" diff --git a/x-pack/metricbeat/module/enterprisesearch/_meta/docker-entrypoint-dependencies.sh b/x-pack/metricbeat/module/enterprisesearch/_meta/docker-entrypoint-dependencies.sh deleted file mode 100755 index bdfe80f627fc..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/_meta/docker-entrypoint-dependencies.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -set -e - -# Bash is not good at retrieving env variables with dot; parse them with grep from env -export USERNAME=`env | grep elasticsearch.\\username= | cut -d= -f2-` -export PASSWORD=`env | grep elasticsearch.\\password= | cut -d= -f2-` - -until curl -u $USERNAME:$PASSWORD -f -s "http://elasticsearch:9200/_license"; do - echo "Elasticsearch not available yet". - sleep 1 -done - -/usr/local/bin/docker-entrypoint.sh "$@" diff --git a/x-pack/metricbeat/module/enterprisesearch/_meta/docs.asciidoc b/x-pack/metricbeat/module/enterprisesearch/_meta/docs.asciidoc deleted file mode 100644 index e417e57b682f..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/_meta/docs.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -This module periodically fetches metrics and health information from Elastic Enterprise Search instances using HTTP APIs. - -[float] -=== Compatibility -The module has been tested with Enterprise Search versions 7.16.0 and higher. Versions below 7.16.0 are not expected to work due to a number of API-level incompatibilities. - -[float] -=== Usage -The Enterprise Search module requires a set of credentials (a username and a password) for an Elasticserch user for a user that has a `monitor` https://www.elastic.co/guide/en/elasticsearch/reference/current/security-privileges.html#privileges-list-cluster[cluster privilege]. - -[float] -=== Usage for {stack} Monitoring - -The Enterprise Search module can be used to collect metrics shown in our {stack-monitor-app} -UI in {kib}. To enable this usage, set `xpack.enabled: true` in configuration. - -NOTE: When this module is used for {stack} Monitoring, it sends metrics to the -monitoring index instead of the default index typically used by {metricbeat}. -For more details about the monitoring index, see -{ref}/config-monitoring-indices.html[Configuring indices for monitoring]. diff --git a/x-pack/metricbeat/module/enterprisesearch/_meta/fields.yml b/x-pack/metricbeat/module/enterprisesearch/_meta/fields.yml deleted file mode 100644 index abe269d31b18..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/_meta/fields.yml +++ /dev/null @@ -1,15 +0,0 @@ -- key: enterprisesearch - title: "Enterprise Search" - release: beta - settings: ["ssl", "http"] - description: > - Enterprise Search module - fields: - - name: enterprisesearch - type: group - description: > - `enterprisesearch` contains metrics and health information for Enterprise Search - fields: - - name: cluster_uuid - type: keyword - description: Cluster UUID for the Elasticsearch cluster used as the data store for Enterprise Search. diff --git a/x-pack/metricbeat/module/enterprisesearch/_meta/kibana/7/dashboard/Metricbeat-enterprise-search-overview.json b/x-pack/metricbeat/module/enterprisesearch/_meta/kibana/7/dashboard/Metricbeat-enterprise-search-overview.json deleted file mode 100644 index b31750048d3d..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/_meta/kibana/7/dashboard/Metricbeat-enterprise-search-overview.json +++ /dev/null @@ -1,1954 +0,0 @@ -{ - "attributes": { - "description": "Enterprise Search deployment overview: product usage, network metrics, low-level resource utilization, etc", - "hits": 0, - "kibanaSavedObjectMeta": { - "searchSourceJSON": { - "filter": [ - { - "$state": { - "store": "appState" - }, - "meta": { - "alias": null, - "disabled": false, - "indexRefName": "kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index", - "key": "event.module", - "negate": false, - "params": { - "query": "enterprisesearch" - }, - "type": "phrase" - }, - "query": { - "match_phrase": { - "event.module": "enterprisesearch" - } - } - } - ], - "query": { - "language": "kuery", - "query": "" - } - } - }, - "optionsJSON": { - "hidePanelTitles": false, - "syncColors": false, - "useMargins": true - }, - "panelsJSON": [ - { - "embeddableConfig": { - "enhancements": {}, - "savedVis": { - "data": { - "aggs": [], - "searchSource": { - "filter": [], - "query": { - "language": "kuery", - "query": "" - } - } - }, - "description": "", - "id": "", - "params": { - "fontSize": 12, - "markdown": "# Product Usage", - "openLinksInNewTab": false - }, - "title": "", - "type": "markdown", - "uiState": {} - } - }, - "gridData": { - "h": 4, - "i": "7f957d85-d635-4c60-9449-01175e6bc122", - "w": 48, - "x": 0, - "y": 0 - }, - "panelIndex": "7f957d85-d635-4c60-9449-01175e6bc122", - "type": "visualization", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "attributes": { - "references": [ - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-0d0ae348-91e8-44a0-b442-f409a093178a", - "type": "index-pattern" - } - ], - "state": { - "datasourceStates": { - "indexpattern": { - "layers": { - "0d0ae348-91e8-44a0-b442-f409a093178a": { - "columnOrder": [ - "ade6560c-7f95-4f3f-a01e-154a3139ff55", - "c5c8d5c3-4bd7-4689-b3a9-7d6d00cd24f2" - ], - "columns": { - "ade6560c-7f95-4f3f-a01e-154a3139ff55": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "auto" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "c5c8d5c3-4bd7-4689-b3a9-7d6d00cd24f2": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Active Nodes", - "operationType": "unique_count", - "scale": "ratio", - "sourceField": "enterprisesearch.cluster_uuid" - } - }, - "incompleteColumns": {} - } - } - } - }, - "filters": [], - "query": { - "language": "kuery", - "query": "" - }, - "visualization": { - "axisTitlesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "fittingFunction": "None", - "gridlinesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "labelsOrientation": { - "x": 0, - "yLeft": 0, - "yRight": 0 - }, - "layers": [ - { - "accessors": [ - "c5c8d5c3-4bd7-4689-b3a9-7d6d00cd24f2" - ], - "layerId": "0d0ae348-91e8-44a0-b442-f409a093178a", - "layerType": "data", - "position": "top", - "seriesType": "line", - "showGridlines": false, - "xAccessor": "ade6560c-7f95-4f3f-a01e-154a3139ff55" - } - ], - "legend": { - "isVisible": true, - "position": "right" - }, - "preferredSeriesType": "bar_stacked", - "tickLabelsVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "valueLabels": "hide", - "yLeftExtent": { - "mode": "full" - }, - "yRightExtent": { - "mode": "full" - } - } - }, - "title": "", - "type": "lens", - "visualizationType": "lnsXY" - }, - "enhancements": {}, - "hidePanelTitles": false - }, - "gridData": { - "h": 11, - "i": "1534ebcc-1209-4d1a-829e-b92128b025e9", - "w": 24, - "x": 0, - "y": 4 - }, - "panelIndex": "1534ebcc-1209-4d1a-829e-b92128b025e9", - "title": "Active Enterprise Search Nodes", - "type": "lens", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "attributes": { - "references": [ - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-84b53681-6661-4b25-80df-4b4662b8d685", - "type": "index-pattern" - } - ], - "state": { - "datasourceStates": { - "indexpattern": { - "layers": { - "84b53681-6661-4b25-80df-4b4662b8d685": { - "columnOrder": [ - "1b101cee-67f3-4bcc-83cb-47bdb826d22c", - "d0eec076-f510-436a-9868-898e63f16266", - "8a7f1cb1-97c6-4de7-a3b6-eeed5a93e9bd", - "52e0322a-25ad-42db-aa63-f12574a01a09" - ], - "columns": { - "1b101cee-67f3-4bcc-83cb-47bdb826d22c": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "auto" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "52e0322a-25ad-42db-aa63-f12574a01a09": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Private Sources", - "operationType": "median", - "scale": "ratio", - "sourceField": "enterprisesearch.stats.product_usage.workplace_search.total_private_sources" - }, - "8a7f1cb1-97c6-4de7-a3b6-eeed5a93e9bd": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Org Sources", - "operationType": "median", - "scale": "ratio", - "sourceField": "enterprisesearch.stats.product_usage.workplace_search.total_org_sources" - }, - "d0eec076-f510-436a-9868-898e63f16266": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "App Search Engines", - "operationType": "average", - "scale": "ratio", - "sourceField": "enterprisesearch.stats.product_usage.app_search.total_engines" - } - }, - "incompleteColumns": {} - } - } - } - }, - "filters": [], - "query": { - "language": "kuery", - "query": "" - }, - "visualization": { - "axisTitlesVisibilitySettings": { - "x": true, - "yLeft": false, - "yRight": true - }, - "fittingFunction": "None", - "gridlinesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "labelsOrientation": { - "x": 0, - "yLeft": 0, - "yRight": 0 - }, - "layers": [ - { - "accessors": [ - "d0eec076-f510-436a-9868-898e63f16266", - "8a7f1cb1-97c6-4de7-a3b6-eeed5a93e9bd", - "52e0322a-25ad-42db-aa63-f12574a01a09" - ], - "layerId": "84b53681-6661-4b25-80df-4b4662b8d685", - "layerType": "data", - "position": "top", - "seriesType": "line", - "showGridlines": false, - "xAccessor": "1b101cee-67f3-4bcc-83cb-47bdb826d22c" - } - ], - "legend": { - "isInside": false, - "isVisible": true, - "position": "bottom", - "showSingleSeries": true - }, - "preferredSeriesType": "line", - "tickLabelsVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "valueLabels": "hide", - "valuesInLegend": true, - "yLeftExtent": { - "mode": "full" - }, - "yRightExtent": { - "mode": "full" - } - } - }, - "title": "", - "type": "lens", - "visualizationType": "lnsXY" - }, - "enhancements": {}, - "hidePanelTitles": false - }, - "gridData": { - "h": 11, - "i": "b330f58a-2b26-432a-b359-747c23654557", - "w": 24, - "x": 24, - "y": 4 - }, - "panelIndex": "b330f58a-2b26-432a-b359-747c23654557", - "title": "Product Usage", - "type": "lens", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "enhancements": {}, - "savedVis": { - "data": { - "aggs": [], - "searchSource": { - "filter": [], - "query": { - "language": "kuery", - "query": "" - } - } - }, - "description": "", - "id": "", - "params": { - "fontSize": 12, - "markdown": "# Network Metrics", - "openLinksInNewTab": false - }, - "title": "", - "type": "markdown", - "uiState": {} - } - }, - "gridData": { - "h": 4, - "i": "aeb9e2f9-f281-45b3-9e2a-ee516d62126a", - "w": 48, - "x": 0, - "y": 15 - }, - "panelIndex": "aeb9e2f9-f281-45b3-9e2a-ee516d62126a", - "type": "visualization", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "attributes": { - "references": [ - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-5e24b58b-47b0-4f0e-bea0-df51c7af4896", - "type": "index-pattern" - } - ], - "state": { - "datasourceStates": { - "indexpattern": { - "layers": { - "5e24b58b-47b0-4f0e-bea0-df51c7af4896": { - "columnOrder": [ - "1079c9fa-6120-49b1-9d62-af75ecc00525", - "4ef7aabc-0fa2-4a89-89c4-2a7d908fddfe", - "4831819f-66f7-45b0-824f-cba088840eee", - "e8965f7f-bdb0-418b-8e23-ee84c42518b0", - "790cae75-ce3f-4ccd-a767-84632c4a1dae" - ], - "columns": { - "1079c9fa-6120-49b1-9d62-af75ecc00525": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "auto" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "4831819f-66f7-45b0-824f-cba088840eee": { - "dataType": "number", - "isBucketed": false, - "label": "Maximum of enterprisesearch.stats.http.network.received.bytes", - "operationType": "max", - "scale": "ratio", - "sourceField": "enterprisesearch.stats.http.network.received.bytes" - }, - "4ef7aabc-0fa2-4a89-89c4-2a7d908fddfe": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Incoming", - "operationType": "counter_rate", - "references": [ - "4831819f-66f7-45b0-824f-cba088840eee" - ], - "scale": "ratio", - "timeScale": "s" - }, - "790cae75-ce3f-4ccd-a767-84632c4a1dae": { - "dataType": "number", - "isBucketed": false, - "label": "Maximum of enterprisesearch.stats.http.network.sent.bytes", - "operationType": "max", - "scale": "ratio", - "sourceField": "enterprisesearch.stats.http.network.sent.bytes" - }, - "e8965f7f-bdb0-418b-8e23-ee84c42518b0": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Outgoing", - "operationType": "counter_rate", - "references": [ - "790cae75-ce3f-4ccd-a767-84632c4a1dae" - ], - "scale": "ratio", - "timeScale": "s" - } - }, - "incompleteColumns": {} - } - } - } - }, - "filters": [], - "query": { - "language": "kuery", - "query": "" - }, - "visualization": { - "axisTitlesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "fittingFunction": "None", - "gridlinesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "labelsOrientation": { - "x": 0, - "yLeft": 0, - "yRight": 0 - }, - "layers": [ - { - "accessors": [ - "4ef7aabc-0fa2-4a89-89c4-2a7d908fddfe", - "e8965f7f-bdb0-418b-8e23-ee84c42518b0" - ], - "layerId": "5e24b58b-47b0-4f0e-bea0-df51c7af4896", - "layerType": "data", - "position": "top", - "seriesType": "line", - "showGridlines": false, - "xAccessor": "1079c9fa-6120-49b1-9d62-af75ecc00525" - } - ], - "legend": { - "isInside": true, - "isVisible": true, - "position": "right" - }, - "preferredSeriesType": "line", - "tickLabelsVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "valueLabels": "hide", - "valuesInLegend": true, - "yLeftExtent": { - "mode": "full" - }, - "yRightExtent": { - "mode": "full" - }, - "yTitle": "bytes/sec" - } - }, - "title": "", - "type": "lens", - "visualizationType": "lnsXY" - }, - "enhancements": {}, - "hidePanelTitles": false - }, - "gridData": { - "h": 10, - "i": "117a974f-7477-4bc5-951c-ad50e617ef68", - "w": 24, - "x": 0, - "y": 19 - }, - "panelIndex": "117a974f-7477-4bc5-951c-ad50e617ef68", - "title": "Network Traffic", - "type": "lens", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "attributes": { - "references": [ - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-ab0dfa29-0a04-448e-ae11-edf8a0427c48", - "type": "index-pattern" - } - ], - "state": { - "datasourceStates": { - "indexpattern": { - "layers": { - "ab0dfa29-0a04-448e-ae11-edf8a0427c48": { - "columnOrder": [ - "1baeaecc-cd8d-4793-b3f9-2981ad03418b", - "476c8804-4014-461d-aa46-1b006658dcca", - "cc2f4b02-bcf2-49b2-a95d-1718db2e04fe", - "c1821e7b-d24b-4a1c-9afe-b44842c966d8", - "10f51273-2d6f-4f34-b23d-3a28726352c8", - "631736a3-f9d2-49d1-a45f-e2bc3796fc32", - "7ced77f8-34c2-430c-b11d-45b366734863", - "e4bfadc8-b049-4e8d-a6fc-6cbf84d6df40", - "aa9f33f1-d530-4e0d-af2c-23672edd56fa" - ], - "columns": { - "10f51273-2d6f-4f34-b23d-3a28726352c8": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "4xx", - "operationType": "median", - "scale": "ratio", - "sourceField": "enterprisesearch.stats.http.responses.4xx" - }, - "1baeaecc-cd8d-4793-b3f9-2981ad03418b": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "auto" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "476c8804-4014-461d-aa46-1b006658dcca": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "1xx", - "operationType": "counter_rate", - "references": [ - "7ced77f8-34c2-430c-b11d-45b366734863" - ], - "scale": "ratio", - "timeScale": "s" - }, - "631736a3-f9d2-49d1-a45f-e2bc3796fc32": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "5xx", - "operationType": "median", - "scale": "ratio", - "sourceField": "enterprisesearch.stats.http.responses.5xx" - }, - "7ced77f8-34c2-430c-b11d-45b366734863": { - "dataType": "number", - "isBucketed": false, - "label": "Maximum of enterprisesearch.stats.http.responses.1xx", - "operationType": "max", - "scale": "ratio", - "sourceField": "enterprisesearch.stats.http.responses.1xx" - }, - "aa9f33f1-d530-4e0d-af2c-23672edd56fa": { - "dataType": "number", - "isBucketed": false, - "label": "Maximum of enterprisesearch.stats.http.responses.3xx", - "operationType": "max", - "scale": "ratio", - "sourceField": "enterprisesearch.stats.http.responses.3xx" - }, - "c1821e7b-d24b-4a1c-9afe-b44842c966d8": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "3xx", - "operationType": "counter_rate", - "references": [ - "aa9f33f1-d530-4e0d-af2c-23672edd56fa" - ], - "scale": "ratio", - "timeScale": "s" - }, - "cc2f4b02-bcf2-49b2-a95d-1718db2e04fe": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "2xx", - "operationType": "counter_rate", - "references": [ - "e4bfadc8-b049-4e8d-a6fc-6cbf84d6df40" - ], - "scale": "ratio", - "timeScale": "s" - }, - "e4bfadc8-b049-4e8d-a6fc-6cbf84d6df40": { - "dataType": "number", - "isBucketed": false, - "label": "Maximum of enterprisesearch.stats.http.responses.2xx", - "operationType": "max", - "scale": "ratio", - "sourceField": "enterprisesearch.stats.http.responses.2xx" - } - }, - "incompleteColumns": {} - } - } - } - }, - "filters": [], - "query": { - "language": "kuery", - "query": "" - }, - "visualization": { - "axisTitlesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "fittingFunction": "Linear", - "gridlinesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "labelsOrientation": { - "x": 0, - "yLeft": 0, - "yRight": 0 - }, - "layers": [ - { - "accessors": [ - "476c8804-4014-461d-aa46-1b006658dcca", - "cc2f4b02-bcf2-49b2-a95d-1718db2e04fe", - "c1821e7b-d24b-4a1c-9afe-b44842c966d8", - "10f51273-2d6f-4f34-b23d-3a28726352c8", - "631736a3-f9d2-49d1-a45f-e2bc3796fc32" - ], - "layerId": "ab0dfa29-0a04-448e-ae11-edf8a0427c48", - "layerType": "data", - "position": "top", - "seriesType": "line", - "showGridlines": false, - "xAccessor": "1baeaecc-cd8d-4793-b3f9-2981ad03418b", - "yConfig": [ - { - "axisMode": "left", - "color": "#6092c0", - "forAccessor": "476c8804-4014-461d-aa46-1b006658dcca" - }, - { - "axisMode": "left", - "color": "#54b399", - "forAccessor": "cc2f4b02-bcf2-49b2-a95d-1718db2e04fe" - }, - { - "axisMode": "left", - "forAccessor": "c1821e7b-d24b-4a1c-9afe-b44842c966d8" - }, - { - "axisMode": "left", - "forAccessor": "10f51273-2d6f-4f34-b23d-3a28726352c8" - }, - { - "axisMode": "left", - "forAccessor": "631736a3-f9d2-49d1-a45f-e2bc3796fc32" - } - ] - } - ], - "legend": { - "isInside": true, - "isVisible": true, - "position": "right" - }, - "preferredSeriesType": "line", - "tickLabelsVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "valueLabels": "hide", - "valuesInLegend": true, - "yLeftExtent": { - "mode": "full" - }, - "yRightExtent": { - "mode": "full" - }, - "yTitle": "responses/sec" - } - }, - "title": "", - "type": "lens", - "visualizationType": "lnsXY" - }, - "enhancements": {}, - "hidePanelTitles": false - }, - "gridData": { - "h": 10, - "i": "00eb0f89-4a3b-4ebe-8c1f-2d9fc6fc13b0", - "w": 24, - "x": 24, - "y": 19 - }, - "panelIndex": "00eb0f89-4a3b-4ebe-8c1f-2d9fc6fc13b0", - "title": "HTTP Responses", - "type": "lens", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "attributes": { - "references": [ - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-0efbdd59-a39c-42d6-a596-115019496378", - "type": "index-pattern" - } - ], - "state": { - "datasourceStates": { - "indexpattern": { - "layers": { - "0efbdd59-a39c-42d6-a596-115019496378": { - "columnOrder": [ - "125132ed-117e-426b-b29e-ffa870c7cf74", - "312cbb39-bd19-4cc3-906a-96d2811cb1a3", - "6160010f-d025-46ca-aed5-40d9c117bf04", - "1108c89d-b2bf-4f3a-8afd-aec8b0176adf" - ], - "columns": { - "1108c89d-b2bf-4f3a-8afd-aec8b0176adf": { - "dataType": "number", - "isBucketed": false, - "label": "Maximum of enterprisesearch.stats.http.connections.total", - "operationType": "max", - "scale": "ratio", - "sourceField": "enterprisesearch.stats.http.connections.total" - }, - "125132ed-117e-426b-b29e-ffa870c7cf74": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "auto" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "312cbb39-bd19-4cc3-906a-96d2811cb1a3": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Current Connections", - "operationType": "average", - "scale": "ratio", - "sourceField": "enterprisesearch.stats.http.connections.current" - }, - "6160010f-d025-46ca-aed5-40d9c117bf04": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Connection Rate", - "operationType": "counter_rate", - "references": [ - "1108c89d-b2bf-4f3a-8afd-aec8b0176adf" - ], - "scale": "ratio", - "timeScale": "s" - } - }, - "incompleteColumns": {} - } - } - } - }, - "filters": [], - "query": { - "language": "kuery", - "query": "" - }, - "visualization": { - "axisTitlesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "fittingFunction": "None", - "gridlinesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "labelsOrientation": { - "x": 0, - "yLeft": 0, - "yRight": 0 - }, - "layers": [ - { - "accessors": [ - "312cbb39-bd19-4cc3-906a-96d2811cb1a3", - "6160010f-d025-46ca-aed5-40d9c117bf04" - ], - "layerId": "0efbdd59-a39c-42d6-a596-115019496378", - "layerType": "data", - "position": "top", - "seriesType": "line", - "showGridlines": false, - "xAccessor": "125132ed-117e-426b-b29e-ffa870c7cf74", - "yConfig": [ - { - "axisMode": "right", - "forAccessor": "6160010f-d025-46ca-aed5-40d9c117bf04" - }, - { - "axisMode": "left", - "forAccessor": "312cbb39-bd19-4cc3-906a-96d2811cb1a3" - } - ] - } - ], - "legend": { - "isInside": true, - "isVisible": true, - "position": "right" - }, - "preferredSeriesType": "line", - "tickLabelsVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "valueLabels": "hide", - "valuesInLegend": true, - "yLeftExtent": { - "mode": "full" - }, - "yRightExtent": { - "mode": "full" - }, - "yRightTitle": "Connections/sec", - "yTitle": "Connections" - } - }, - "title": "", - "type": "lens", - "visualizationType": "lnsXY" - }, - "enhancements": {}, - "hidePanelTitles": false - }, - "gridData": { - "h": 11, - "i": "f58b85e4-0623-4eda-8a22-4645b4658825", - "w": 24, - "x": 0, - "y": 29 - }, - "panelIndex": "f58b85e4-0623-4eda-8a22-4645b4658825", - "title": "Active HTTP Connections", - "type": "lens", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "enhancements": {}, - "savedVis": { - "data": { - "aggs": [], - "searchSource": { - "filter": [], - "query": { - "language": "kuery", - "query": "" - } - } - }, - "description": "", - "id": "", - "params": { - "fontSize": 12, - "markdown": "# Network Metrics", - "openLinksInNewTab": false - }, - "title": "", - "type": "markdown", - "uiState": {} - } - }, - "gridData": { - "h": 4, - "i": "803dcfc0-874d-45f6-81e6-e7822f3e4757", - "w": 48, - "x": 0, - "y": 40 - }, - "panelIndex": "803dcfc0-874d-45f6-81e6-e7822f3e4757", - "type": "visualization", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "attributes": { - "references": [ - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-beaaf733-314f-497d-ae53-6bd8cacc159a", - "type": "index-pattern" - } - ], - "state": { - "datasourceStates": { - "indexpattern": { - "layers": { - "beaaf733-314f-497d-ae53-6bd8cacc159a": { - "columnOrder": [ - "805cf0b8-0d01-40fe-a8e1-5dbc69ce1070", - "1d2594e3-33b9-4b8e-986f-268cdbfe3dfb", - "a8b8a7a9-684e-4d7e-b675-b988edc90aac", - "e5ad4665-d717-4bed-81fe-6d11001817d4", - "b5eac894-ebbc-4417-9da3-eb3aed9c3998" - ], - "columns": { - "1d2594e3-33b9-4b8e-986f-268cdbfe3dfb": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Total", - "operationType": "median", - "params": { - "format": { - "id": "bytes", - "params": { - "decimals": 2 - } - } - }, - "scale": "ratio", - "sourceField": "enterprisesearch.health.jvm.memory_usage.heap_max.bytes" - }, - "805cf0b8-0d01-40fe-a8e1-5dbc69ce1070": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "auto" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "a8b8a7a9-684e-4d7e-b675-b988edc90aac": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Allocated", - "operationType": "median", - "params": { - "format": { - "id": "bytes", - "params": { - "decimals": 2 - } - } - }, - "scale": "ratio", - "sourceField": "enterprisesearch.health.jvm.memory_usage.heap_committed.bytes" - }, - "b5eac894-ebbc-4417-9da3-eb3aed9c3998": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Off-heap", - "operationType": "median", - "params": { - "format": { - "id": "bytes", - "params": { - "decimals": 2 - } - } - }, - "scale": "ratio", - "sourceField": "enterprisesearch.health.jvm.memory_usage.non_heap_committed.bytes" - }, - "e5ad4665-d717-4bed-81fe-6d11001817d4": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Used", - "operationType": "median", - "params": { - "format": { - "id": "bytes", - "params": { - "decimals": 2 - } - } - }, - "scale": "ratio", - "sourceField": "enterprisesearch.health.jvm.memory_usage.heap_used.bytes" - } - }, - "incompleteColumns": {} - } - } - } - }, - "filters": [], - "query": { - "language": "kuery", - "query": "" - }, - "visualization": { - "axisTitlesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "fittingFunction": "None", - "gridlinesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "labelsOrientation": { - "x": 0, - "yLeft": 0, - "yRight": 0 - }, - "layers": [ - { - "accessors": [ - "1d2594e3-33b9-4b8e-986f-268cdbfe3dfb", - "e5ad4665-d717-4bed-81fe-6d11001817d4", - "b5eac894-ebbc-4417-9da3-eb3aed9c3998", - "a8b8a7a9-684e-4d7e-b675-b988edc90aac" - ], - "layerId": "beaaf733-314f-497d-ae53-6bd8cacc159a", - "layerType": "data", - "position": "top", - "seriesType": "line", - "showGridlines": false, - "xAccessor": "805cf0b8-0d01-40fe-a8e1-5dbc69ce1070", - "yConfig": [ - { - "axisMode": "left", - "forAccessor": "1d2594e3-33b9-4b8e-986f-268cdbfe3dfb" - }, - { - "axisMode": "left", - "forAccessor": "a8b8a7a9-684e-4d7e-b675-b988edc90aac" - }, - { - "axisMode": "left", - "forAccessor": "e5ad4665-d717-4bed-81fe-6d11001817d4" - } - ] - } - ], - "legend": { - "horizontalAlignment": "right", - "isInside": true, - "isVisible": true, - "position": "bottom", - "verticalAlignment": "top" - }, - "preferredSeriesType": "line", - "tickLabelsVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "valueLabels": "hide", - "valuesInLegend": true, - "yLeftExtent": { - "mode": "full" - }, - "yRightExtent": { - "mode": "full" - }, - "yTitle": "Bytes" - } - }, - "title": "", - "type": "lens", - "visualizationType": "lnsXY" - }, - "enhancements": {}, - "hidePanelTitles": false - }, - "gridData": { - "h": 11, - "i": "c72f1e47-34f8-41a2-91f5-452da0a8d052", - "w": 24, - "x": 0, - "y": 44 - }, - "panelIndex": "c72f1e47-34f8-41a2-91f5-452da0a8d052", - "title": "Memory Usage", - "type": "lens", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "attributes": { - "references": [ - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-1d99fbfc-3f4b-47ac-9a91-cec22a18f7c4", - "type": "index-pattern" - } - ], - "state": { - "datasourceStates": { - "indexpattern": { - "layers": { - "1d99fbfc-3f4b-47ac-9a91-cec22a18f7c4": { - "columnOrder": [ - "ba3f6710-7ba4-4e33-b865-aa3b29a611cc", - "c92a9912-3cdb-403a-b686-87c8c58cac43" - ], - "columns": { - "ba3f6710-7ba4-4e33-b865-aa3b29a611cc": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "auto" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "c92a9912-3cdb-403a-b686-87c8c58cac43": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Objects", - "operationType": "average", - "scale": "ratio", - "sourceField": "enterprisesearch.health.jvm.memory_usage.object_pending_finalization_count" - } - }, - "incompleteColumns": {} - } - } - } - }, - "filters": [], - "query": { - "language": "kuery", - "query": "" - }, - "visualization": { - "axisTitlesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "fittingFunction": "None", - "gridlinesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "labelsOrientation": { - "x": 0, - "yLeft": 0, - "yRight": 0 - }, - "layers": [ - { - "accessors": [ - "c92a9912-3cdb-403a-b686-87c8c58cac43" - ], - "layerId": "1d99fbfc-3f4b-47ac-9a91-cec22a18f7c4", - "layerType": "data", - "position": "top", - "seriesType": "line", - "showGridlines": false, - "xAccessor": "ba3f6710-7ba4-4e33-b865-aa3b29a611cc" - } - ], - "legend": { - "isVisible": true, - "position": "bottom", - "showSingleSeries": false - }, - "preferredSeriesType": "line", - "tickLabelsVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "valueLabels": "hide", - "valuesInLegend": true, - "yLeftExtent": { - "mode": "full" - }, - "yRightExtent": { - "mode": "full" - } - } - }, - "title": "", - "type": "lens", - "visualizationType": "lnsXY" - }, - "enhancements": {}, - "hidePanelTitles": false - }, - "gridData": { - "h": 11, - "i": "a381a1ed-82a0-4b43-9dd5-8172e4dc4a92", - "w": 24, - "x": 24, - "y": 44 - }, - "panelIndex": "a381a1ed-82a0-4b43-9dd5-8172e4dc4a92", - "title": "JVM Object Finalizer Queue", - "type": "lens", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "attributes": { - "references": [ - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-50da9c38-fcc3-4c43-a74e-85672a6ce4b0", - "type": "index-pattern" - } - ], - "state": { - "datasourceStates": { - "indexpattern": { - "layers": { - "50da9c38-fcc3-4c43-a74e-85672a6ce4b0": { - "columnOrder": [ - "5bc7d0eb-20e7-4846-a3d0-4f8d82317623", - "30bcd973-ce24-4101-bf9f-8e43715b2cd9", - "e2c87a95-f4a1-41e6-9848-6d2bb31d53da", - "2d04b3ab-8d59-4ec0-831c-b9228a1c62b5", - "dcd14020-52ef-40b2-ba74-30c56176b5f8" - ], - "columns": { - "2d04b3ab-8d59-4ec0-831c-b9228a1c62b5": { - "dataType": "number", - "isBucketed": false, - "label": "Maximum of enterprisesearch.health.jvm.gc.collection_count", - "operationType": "max", - "scale": "ratio", - "sourceField": "enterprisesearch.health.jvm.gc.collection_count" - }, - "30bcd973-ce24-4101-bf9f-8e43715b2cd9": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "GC Rate (per second)", - "operationType": "counter_rate", - "references": [ - "2d04b3ab-8d59-4ec0-831c-b9228a1c62b5" - ], - "scale": "ratio", - "timeScale": "s" - }, - "5bc7d0eb-20e7-4846-a3d0-4f8d82317623": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "auto" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "dcd14020-52ef-40b2-ba74-30c56176b5f8": { - "dataType": "number", - "isBucketed": false, - "label": "Maximum of enterprisesearch.health.jvm.gc.collection_time.ms", - "operationType": "max", - "scale": "ratio", - "sourceField": "enterprisesearch.health.jvm.gc.collection_time.ms" - }, - "e2c87a95-f4a1-41e6-9848-6d2bb31d53da": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "GC Time (msec)", - "operationType": "counter_rate", - "references": [ - "dcd14020-52ef-40b2-ba74-30c56176b5f8" - ], - "scale": "ratio", - "timeScale": "s" - } - }, - "incompleteColumns": {} - } - } - } - }, - "filters": [], - "query": { - "language": "kuery", - "query": "" - }, - "visualization": { - "axisTitlesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "curveType": "LINEAR", - "fittingFunction": "None", - "gridlinesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "labelsOrientation": { - "x": 0, - "yLeft": 0, - "yRight": 0 - }, - "layers": [ - { - "accessors": [ - "30bcd973-ce24-4101-bf9f-8e43715b2cd9", - "e2c87a95-f4a1-41e6-9848-6d2bb31d53da" - ], - "layerId": "50da9c38-fcc3-4c43-a74e-85672a6ce4b0", - "layerType": "data", - "position": "top", - "seriesType": "line", - "showGridlines": false, - "xAccessor": "5bc7d0eb-20e7-4846-a3d0-4f8d82317623", - "yConfig": [ - { - "axisMode": "left", - "forAccessor": "30bcd973-ce24-4101-bf9f-8e43715b2cd9" - }, - { - "axisMode": "right", - "forAccessor": "e2c87a95-f4a1-41e6-9848-6d2bb31d53da" - } - ] - } - ], - "legend": { - "isVisible": true, - "position": "bottom" - }, - "preferredSeriesType": "line", - "tickLabelsVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "valueLabels": "hide", - "yLeftExtent": { - "mode": "full" - }, - "yRightExtent": { - "mode": "full" - } - } - }, - "title": "", - "type": "lens", - "visualizationType": "lnsXY" - }, - "enhancements": {}, - "hidePanelTitles": false - }, - "gridData": { - "h": 12, - "i": "0a6dec60-0d76-4d56-8de7-bbc11b056dd4", - "w": 24, - "x": 0, - "y": 55 - }, - "panelIndex": "0a6dec60-0d76-4d56-8de7-bbc11b056dd4", - "title": "JVM Garbage Collection", - "type": "lens", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "attributes": { - "references": [ - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-525cd9ab-aabd-4650-9fd3-49b273d63dfd", - "type": "index-pattern" - } - ], - "state": { - "datasourceStates": { - "indexpattern": { - "layers": { - "525cd9ab-aabd-4650-9fd3-49b273d63dfd": { - "columnOrder": [ - "e357cbf6-3e7b-4a1a-ab01-8bf1158d8c9e", - "ebf70775-2697-4596-9a54-4c9dd8729dca", - "82b0a7eb-7726-47ef-aaf8-1559ce454967", - "6906ad8f-3215-47d0-9861-2d183269ce10", - "23c8da16-6db9-442b-bd8f-a39786c37047" - ], - "columns": { - "23c8da16-6db9-442b-bd8f-a39786c37047": { - "dataType": "number", - "isBucketed": false, - "label": "Maximum of enterprisesearch.health.jvm.threads.total_started", - "operationType": "max", - "scale": "ratio", - "sourceField": "enterprisesearch.health.jvm.threads.total_started" - }, - "6906ad8f-3215-47d0-9861-2d183269ce10": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Creation Rate", - "operationType": "counter_rate", - "references": [ - "23c8da16-6db9-442b-bd8f-a39786c37047" - ], - "scale": "ratio", - "timeScale": "s" - }, - "82b0a7eb-7726-47ef-aaf8-1559ce454967": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Daemon Threads", - "operationType": "median", - "scale": "ratio", - "sourceField": "enterprisesearch.health.jvm.threads.daemon" - }, - "e357cbf6-3e7b-4a1a-ab01-8bf1158d8c9e": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "auto" - }, - "scale": "interval", - "sourceField": "@timestamp" - }, - "ebf70775-2697-4596-9a54-4c9dd8729dca": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Active Threads", - "operationType": "average", - "scale": "ratio", - "sourceField": "enterprisesearch.health.jvm.threads.current" - } - }, - "incompleteColumns": {} - } - } - } - }, - "filters": [], - "query": { - "language": "kuery", - "query": "" - }, - "visualization": { - "axisTitlesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "fittingFunction": "None", - "gridlinesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "labelsOrientation": { - "x": 0, - "yLeft": 0, - "yRight": 0 - }, - "layers": [ - { - "accessors": [ - "ebf70775-2697-4596-9a54-4c9dd8729dca", - "82b0a7eb-7726-47ef-aaf8-1559ce454967", - "6906ad8f-3215-47d0-9861-2d183269ce10" - ], - "layerId": "525cd9ab-aabd-4650-9fd3-49b273d63dfd", - "layerType": "data", - "position": "top", - "seriesType": "line", - "showGridlines": false, - "xAccessor": "e357cbf6-3e7b-4a1a-ab01-8bf1158d8c9e", - "yConfig": [ - { - "axisMode": "right", - "forAccessor": "6906ad8f-3215-47d0-9861-2d183269ce10" - }, - { - "axisMode": "left", - "forAccessor": "82b0a7eb-7726-47ef-aaf8-1559ce454967" - }, - { - "axisMode": "left", - "forAccessor": "ebf70775-2697-4596-9a54-4c9dd8729dca" - } - ] - } - ], - "legend": { - "isVisible": true, - "position": "bottom" - }, - "preferredSeriesType": "line", - "tickLabelsVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "valueLabels": "hide", - "valuesInLegend": true, - "yLeftExtent": { - "mode": "full" - }, - "yRightExtent": { - "mode": "full" - }, - "yRightTitle": "Threads/sec", - "yTitle": "Threads" - } - }, - "title": "", - "type": "lens", - "visualizationType": "lnsXY" - }, - "enhancements": {}, - "hidePanelTitles": false - }, - "gridData": { - "h": 12, - "i": "49705139-1a81-4a84-bb21-d8e48643ed88", - "w": 24, - "x": 24, - "y": 55 - }, - "panelIndex": "49705139-1a81-4a84-bb21-d8e48643ed88", - "title": "JVM Threads", - "type": "lens", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "enhancements": {}, - "savedVis": { - "data": { - "aggs": [], - "searchSource": { - "filter": [], - "query": { - "language": "kuery", - "query": "" - } - } - }, - "description": "", - "id": "", - "params": { - "fontSize": 12, - "markdown": "# Background Workers", - "openLinksInNewTab": false - }, - "title": "", - "type": "markdown", - "uiState": {} - } - }, - "gridData": { - "h": 4, - "i": "a03d7633-3463-4c56-9a3d-a02f8b39772c", - "w": 48, - "x": 0, - "y": 67 - }, - "panelIndex": "a03d7633-3463-4c56-9a3d-a02f8b39772c", - "type": "visualization", - "version": "7.16.0-SNAPSHOT" - }, - { - "embeddableConfig": { - "attributes": { - "references": [ - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "indexpattern-datasource-layer-d7ce3f00-e90b-429d-af0d-91651c3ec1c0", - "type": "index-pattern" - } - ], - "state": { - "datasourceStates": { - "indexpattern": { - "layers": { - "d7ce3f00-e90b-429d-af0d-91651c3ec1c0": { - "columnOrder": [ - "f9190822-6861-4d63-94d1-52e9c3cfd223", - "c190f356-68d2-47ab-a465-07c32958d727", - "4da78b25-82c4-48ed-a954-bdb991ad6c23" - ], - "columns": { - "4da78b25-82c4-48ed-a954-bdb991ad6c23": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Active Workers", - "operationType": "average", - "scale": "ratio", - "sourceField": "enterprisesearch.health.crawler.workers.active" - }, - "c190f356-68d2-47ab-a465-07c32958d727": { - "customLabel": true, - "dataType": "number", - "isBucketed": false, - "label": "Total Workers", - "operationType": "average", - "scale": "ratio", - "sourceField": "enterprisesearch.health.crawler.workers.pool_size" - }, - "f9190822-6861-4d63-94d1-52e9c3cfd223": { - "dataType": "date", - "isBucketed": true, - "label": "@timestamp", - "operationType": "date_histogram", - "params": { - "interval": "auto" - }, - "scale": "interval", - "sourceField": "@timestamp" - } - }, - "incompleteColumns": {} - } - } - } - }, - "filters": [], - "query": { - "language": "kuery", - "query": "" - }, - "visualization": { - "axisTitlesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "fittingFunction": "None", - "gridlinesVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "labelsOrientation": { - "x": 0, - "yLeft": 0, - "yRight": 0 - }, - "layers": [ - { - "accessors": [ - "c190f356-68d2-47ab-a465-07c32958d727", - "4da78b25-82c4-48ed-a954-bdb991ad6c23" - ], - "layerId": "d7ce3f00-e90b-429d-af0d-91651c3ec1c0", - "layerType": "data", - "position": "top", - "seriesType": "line", - "showGridlines": false, - "xAccessor": "f9190822-6861-4d63-94d1-52e9c3cfd223" - } - ], - "legend": { - "isInside": true, - "isVisible": true, - "position": "right" - }, - "preferredSeriesType": "line", - "tickLabelsVisibilitySettings": { - "x": true, - "yLeft": true, - "yRight": true - }, - "valueLabels": "hide", - "valuesInLegend": true, - "yLeftExtent": { - "mode": "full" - }, - "yRightExtent": { - "mode": "full" - }, - "yTitle": "Workers" - } - }, - "title": "", - "type": "lens", - "visualizationType": "lnsXY" - }, - "enhancements": {}, - "hidePanelTitles": false - }, - "gridData": { - "h": 12, - "i": "40cbf119-cfab-48a5-b37e-a6c8edf4b9ba", - "w": 24, - "x": 0, - "y": 71 - }, - "panelIndex": "40cbf119-cfab-48a5-b37e-a6c8edf4b9ba", - "title": "Crawler Workers", - "type": "lens", - "version": "7.16.0-SNAPSHOT" - } - ], - "timeRestore": false, - "title": "[Metricbeat Enterprise Search] Overview", - "version": 1 - }, - "coreMigrationVersion": "7.16.0", - "id": "e179f130-2c4a-11ec-9ab7-4fbdf1480ffc", - "migrationVersion": { - "dashboard": "7.16.0" - }, - "references": [ - { - "id": "metricbeat-*", - "name": "kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "1534ebcc-1209-4d1a-829e-b92128b025e9:indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "1534ebcc-1209-4d1a-829e-b92128b025e9:indexpattern-datasource-layer-0d0ae348-91e8-44a0-b442-f409a093178a", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "b330f58a-2b26-432a-b359-747c23654557:indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "b330f58a-2b26-432a-b359-747c23654557:indexpattern-datasource-layer-84b53681-6661-4b25-80df-4b4662b8d685", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "117a974f-7477-4bc5-951c-ad50e617ef68:indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "117a974f-7477-4bc5-951c-ad50e617ef68:indexpattern-datasource-layer-5e24b58b-47b0-4f0e-bea0-df51c7af4896", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "00eb0f89-4a3b-4ebe-8c1f-2d9fc6fc13b0:indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "00eb0f89-4a3b-4ebe-8c1f-2d9fc6fc13b0:indexpattern-datasource-layer-ab0dfa29-0a04-448e-ae11-edf8a0427c48", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "f58b85e4-0623-4eda-8a22-4645b4658825:indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "f58b85e4-0623-4eda-8a22-4645b4658825:indexpattern-datasource-layer-0efbdd59-a39c-42d6-a596-115019496378", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "c72f1e47-34f8-41a2-91f5-452da0a8d052:indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "c72f1e47-34f8-41a2-91f5-452da0a8d052:indexpattern-datasource-layer-beaaf733-314f-497d-ae53-6bd8cacc159a", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "a381a1ed-82a0-4b43-9dd5-8172e4dc4a92:indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "a381a1ed-82a0-4b43-9dd5-8172e4dc4a92:indexpattern-datasource-layer-1d99fbfc-3f4b-47ac-9a91-cec22a18f7c4", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "0a6dec60-0d76-4d56-8de7-bbc11b056dd4:indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "0a6dec60-0d76-4d56-8de7-bbc11b056dd4:indexpattern-datasource-layer-50da9c38-fcc3-4c43-a74e-85672a6ce4b0", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "49705139-1a81-4a84-bb21-d8e48643ed88:indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "49705139-1a81-4a84-bb21-d8e48643ed88:indexpattern-datasource-layer-525cd9ab-aabd-4650-9fd3-49b273d63dfd", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "40cbf119-cfab-48a5-b37e-a6c8edf4b9ba:indexpattern-datasource-current-indexpattern", - "type": "index-pattern" - }, - { - "id": "metricbeat-*", - "name": "40cbf119-cfab-48a5-b37e-a6c8edf4b9ba:indexpattern-datasource-layer-d7ce3f00-e90b-429d-af0d-91651c3ec1c0", - "type": "index-pattern" - } - ], - "type": "dashboard", - "updated_at": "2021-10-19T17:06:49.093Z", - "version": "WzIzMjksMV0=" -} \ No newline at end of file diff --git a/x-pack/metricbeat/module/enterprisesearch/doc.go b/x-pack/metricbeat/module/enterprisesearch/doc.go deleted file mode 100644 index 183425da0890..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -// Package enterprisesearch is a Metricbeat module that contains MetricSets. -package enterprisesearch diff --git a/x-pack/metricbeat/module/enterprisesearch/docker-compose.yml b/x-pack/metricbeat/module/enterprisesearch/docker-compose.yml deleted file mode 100644 index 09d7addb71c2..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/docker-compose.yml +++ /dev/null @@ -1,51 +0,0 @@ -version: '2.3' - -services: - enterprise_search: - image: docker.elastic.co/integrations-ci/beats-enterprisesearch:${ENT_VERSION:-8.0.0-SNAPSHOT} - build: - context: ./_meta - args: - ENT_VERSION: ${ENT_VERSION:-8.0.0-SNAPSHOT} - depends_on: - - "elasticsearch" - environment: - - "ENT_SEARCH_DEFAULT_PASSWORD=changeme" - - "elasticsearch.username=elastic" - - "elasticsearch.password=changeme" - - "elasticsearch.host=http://elasticsearch:9200" - - "allow_es_settings_modification=true" - - "secret_management.encryption_keys=[4a2cd3f81d39bf28738c10db0ca782095ffac07279561809eecc722e0c20eb09]" - - "kibana.host=http://localhost:5601" - - "JAVA_OPTS=-Xms2g -Xmx2g" - # Make it possible to run against slightly older ES versions - - "elasticsearch.ignore_version_mismatch=true" - ports: - - 3002:3002 - - elasticsearch: - image: docker.elastic.co/integrations-ci/beats-elasticsearch:${ELASTICSEARCH_VERSION:-8.0.0-SNAPSHOT}-1 - build: - args: - ELASTICSEARCH_VERSION: ${ELASTICSEARCH_VERSION:-7.15.0} - extends: - file: ../../../metricbeat/docker-compose.yml - service: elasticsearch - ulimits: - memlock: - soft: -1 - hard: -1 - # Override healthcheck to add user / password - healthcheck: - test: ["CMD", "curl", "-u", "elastic:changeme", "-f", "http://localhost:9200/_license"] - interval: 30s - timeout: 10s - ports: - - 9200:9200 - environment: - - "bootstrap.memory_lock=true" - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - "discovery.type=single-node" - - "xpack.security.enabled=true" - - "xpack.security.authc.api_key.enabled=true" - - "ELASTIC_PASSWORD=changeme" diff --git a/x-pack/metricbeat/module/enterprisesearch/fields.go b/x-pack/metricbeat/module/enterprisesearch/fields.go deleted file mode 100644 index 061ec34b5cf1..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/fields.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -// Code generated by beats/dev-tools/cmd/asset/asset.go - DO NOT EDIT. - -package enterprisesearch - -import ( - "github.com/elastic/beats/v7/libbeat/asset" -) - -func init() { - if err := asset.SetFields("metricbeat", "enterprisesearch", asset.ModuleFieldsPri, AssetEnterprisesearch); err != nil { - panic(err) - } -} - -// AssetEnterprisesearch returns asset data. -// This is the base64 encoded zlib format compressed contents of module/enterprisesearch. -func AssetEnterprisesearch() string { - return "eJzsXM2O4zYSvvdTFPo0A6Qn/edD+hAgO8nuJJgJgqRnc1gsFEos2xxTpEJS7naefkFSkmX92JIsqRe73YcgGNtV31csVpGsIq9gg7sHQGFQJYpp1EhUtL4AMMxwfIDLH4qP4Df32eUFgEKOROMDhGjIBYBGY5hY6Qf416XW/PIruFwbk1z++wKAoo4USwyT4gG+vQAAqMmEWNKU4wXAkiGn+sF97QoEibERnf0zuwQfYKVkmmT/0qDK/v1RFfAHRFIYwoSGGI1ikQYiKKyRcLMGJpZSxcRKgaVUdbSZ4DLUMtyIp9qgCtKU0eLDHO4Gd09Slf/9APR7/1v4/PnH751ys0b4gRNtWOSx5+Ih1UiBaPcNSgwBbaTCZsTvaiA91xq8sjXtX3WgG0HXh7MmvWqrMhT734MP2m1VU/1BauME7I1VAyMk3ctvRLBFpZkUjSCqFunAPhNXdqSKgCZzHJgkjUNUtY+PG6YHMi8fUs3EyllNY0yEYVH+DfvvHnyz5cpgw5RxGqyJXp8L+DtIBfszRS8SrMgj45qQaENWJ4Y2UTJCrcca2kzcGUObsCYzeEBcitUpG/2SISjHhxpMJrQhImqxTRlOmhgW4zuN0SiovLhRkC0ZxxBLDljF1TR8NWAfmmO6BYdxiJQihb9nmurg9n9twwonh/akIVuNWRriDliPQlOoDVEmiGQqmizaH+TPPoTIJdgR10cQwppQMBJCzGEgBZqi/TctYwSmdYq6GxGrLNBMRBjYrBhkElscuD+tR+u+Tr6zu9VxjFqmHt5c3QBbgsAtqj3Lt8dj05dtPCwu/fTPT/UUCx2CT3Oig75x2urPU4lbhhgJKhWtK6Ujs3zVHnc6ze+fyJbAiqiQrBAiyTlGbpJn67qBM3kvaMQZ8ygN4XnqlctG6FIBE1sZuUilS47o3czOtjXWklpXLi7Ux01GGcrGRXudoDDWAdyyoYXWUCo5jRhjqXZBqsspvwq+k8t8cpKgTVIX/1gjSQImmHkX7gwONqjPSQ9wTEg1myVg9fppF+6cDe10ZMJLedfNKRwBK+MlCIyBPZJxzIyZncD7XK+DYQPfOSxi8jwz/k/k2SNvG4Ru6IUUwUtNgp+luComAiM8Cw3nulVB6aVcq+BVABiJmQy/YGSCBAVlYhUsmSCc/UVGTm/ftnwJ4HumE052/qiCJImSzywmBku50EPUbr37tGbRGsoogWnI0B+hXCwR1woJbTJ5jyTx6IUc2ehB17VEqhSOZOP3XlbJcJxtMWfc0R0owbiRzWh4vIaesGLyPM7uCcmmbBZwLl5af9gZlG9Dsu1VYn/zRLRduqPpCNjYFVCQSZpkhZgZECKFxKIlgn5tl1IZ+EZKLeALb1TkideOljruO977Hw/bezxJtUF15rTMEbQL67RRl5IHmv3VtPyD3qP2u8fipIKV2tF/SGTYdhwI+824F5rbpyuSLWGchHx0MLncOp5ctTbElIdx1GNoJ/xd6fvHjqEjKYTbqgw8LLRukHASFdr3AkGnod5pg3EdUhusMrQvMgzc6f55k+cYwi8yzAoIjQiPoSwjfSLMsFY3GepHhzh1riU7VMq2kK2hr4ZRqs30GLO0z3cQosXaF6Udc4vl+PqzbdxrQP+mkGyofBJNWO3S0qpqGnU4MfJlzBQ5mrYgAl2MW8O9N7AX7gEz4dKe+3/ntSeMWga5TDmfCKIVDXonovNRMhEpjFEYMhXYkoaRMCeoYqY1k6LNac/FXNIwEHN5CTByNPU5zknW50VRfDaKRCbwEoMWrKfx1jD/ZohJdXM1xOuUxdrKETk3IhxZZMEgX/h9b+Quay04qFPq3UR+aUV3XHOVEf2ZYooBxaS2mB4LGDMYD82XZaSMti4Oz4ZIm9aHJxH53Vck48Smhbb917ngfCrPlVQPsXvD1dEaacqnhVso6Qm3mLNp+BIBSKfh1WsQeg1C7Uhfg9D/TxBK0pAzvZ41AmU6X2PPa+ypIX2NPf+7sacoyRpTDSEdz/5+FJGMrYN9eHz8BRT+maI2eQ9K39O+7HSmZRvdY4d6iGov1m1OmWspHrpDnbaoVcGrQSYofJvR8a7CmUtNn8gzi9O4hDySIjNNM4nTFAbmSze9RiFVLUINHoweTIoaPBobCM9z+5+9EDCKLJcsapuG0NHVFUbItrN3A1SHwf2wAJN3A4zuRxrF3P0czUwtkMlYHo5pkKAKzmgg7c34uy0qssJzRjdBBRojKQbv8ouB/q+h33HIB1Hfj7zLzQFNVVtTR49Q891qpXBFDB4m/lz4CIk2Js9jdWx+lGJl0VVXA3u0zS2afZMrEjEW5NxTJoasDQ0obsdC/ZshghJFgeKW7beYrSS2hKfY2uzbgUupyz+RQjfOwnO82kv1jTQaQiU3KMAVE8Od/472u2qrZKin3zyPsyhrXL9kM1PDkgmm10jhiZk1EKu1TJDWMspAl7p9ETa3E7G5exE2dxOxuX8RNvcTsVm8CJvFSGwOToIGdt7s45WruoL09eBlKlygtTvgDExIoo0VJ6g/KchOglx/BkuQM4GOIGu4y9J3G49ixQQGFLVRcofqXVurbfdbdtUhKnNACltGfCm3otkbd4T8EhPG5yTi9Y0GP1MQuB7I+VgcqB2NzNLahk7FIj8wzVorvDKPvQFb44w+q9O0+5TO1JRa7MaawCsuw8YzlR5LqX84GUAx4XIXozBXT4xi8epAXnnISAw+jbM/D/JYfTQbdK6TuE7bYkej0zgmalcA3hM6t0CS9fWPeh5cdemozKVDNaAN69GO3UmgtnYRwpv8vp37hX57Gr1OI/vbZTpuR9kJBmZNDKzJFr1+pF0M7ePNpDCzkHaItssRZfk1iSqgbueTkuIVxy3y0vnAWKEgISvUwZZpNtWlBKcBMg35cU0RhYu8tj+h63EolSquA8K5fJoI/OdfP2rIFFSx01TZ+USZjuQW1W4UMhQFO8Glc0huJOMVDObyVXU7T1HsQCHRUpwb2glXSOgu0Iht141glLnsDCGkgaXMhxYjkmrMPgSKVzRNOPP3quENkmjtPmB7T5aC70CKCN/2qGpSGRMmgiXjBtXxwR6Raj7me5JEQCo2wo6hh9Sv1Vgq5W4IKmlk1NppMSIDt4ssFH/NxJZwRr9OhU6TRLrbTTmYPjVxzsQmMFIGFLFtSo3I4sDlshctfEJxLQ3AWcyOJpUqfiG9uNlcKANNCs12aUrAkBVIZb3qw+Onj86sQIxRLExNn46T0ngGkRQGhXGXGmbnJ8pQIINSOa48cURMTKqDSNKxbmQcVAwU6pSb7IT1KxuHsxNVq/DcOHx5e319OanJHZfb6+v9CTTYoN9/ddBK4e76ZgYKd9c3U1K4nYXC7XQU7mdxpPspHel+Fke6n9KR7mdxpPtpHeluFgp3U1K4n4XC/ZQUFrNQWExI4WaWiHQzZUS6nWU63045nW+/mYXCN9NRWMyS2hZTprbFLKltMWVqW8yS2hZTprbFLKltMWVqW8yS2hYjpLajezZ/u+DkOx89z9/y/t7Pv34sinl/7Z88rZfwDv8610UUi4ka975G81HMQTXEm744TzzFpnxhRzU+pzct2jVGmz1ahyE7gUnWRPd7iSVofziqE4HTVZHi4Zr9yz5FfSd7uCUrdPbsAWl/DgcGVv72b+w0XpUazctPvMQDZ3iOm5blKvHBxS5409i176tpkEitWcjxbWfG81YJq25zyK9aGxxC4sSTQKPzKJ4KerNUiG8rjNoJXDShT5SkaWQaXwvt3YGQCfMPhjaV7Oq9yu3l8VNNByRJAn34dm4X6HX4SVJ6s7wRf78S4+E9Lt9i1BxwTrjFibs/JeCZkryho63voNWWT/kLHaNYtPbexzh2rVpWqlWgZaqi4TcwTli4RkSqlW9Lyc/FM/29DV9nkyi2JQbnZpSp7UroPwEAAP//yshTWw==" -} diff --git a/x-pack/metricbeat/module/enterprisesearch/health/_meta/docs.asciidoc b/x-pack/metricbeat/module/enterprisesearch/health/_meta/docs.asciidoc deleted file mode 100644 index 49bacfa68148..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/health/_meta/docs.asciidoc +++ /dev/null @@ -1,3 +0,0 @@ -This is the `health` metricset of the Enterprise Search module. - -This metricset allows users to fetch and ingest Enterprise Search solution health information from the https://www.elastic.co/guide/en/enterprise-search/current/monitoring-apis.html#health-api[Health API]. diff --git a/x-pack/metricbeat/module/enterprisesearch/health/_meta/fields.yml b/x-pack/metricbeat/module/enterprisesearch/health/_meta/fields.yml deleted file mode 100644 index 41038696c9f1..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/health/_meta/fields.yml +++ /dev/null @@ -1,147 +0,0 @@ -- name: health - type: group - release: beta - description: Enterprise Search health - fields: - - name: name - type: keyword - description: Host name for the Enterprise Search node - - - name: version - type: group - description: Enterprise Search version information - fields: - - name: number - type: keyword - description: Enterprise Search version number using the semantic versioning format - - - name: build_hash - type: keyword - description: A unique build hash for the Enterprise Search package - - - name: process - type: group - description: Enterprise Search process information - fields: - - name: pid - type: long - description: Process ID for the Enterprise Search instance - - - name: uptime.sec - type: long - description: Process uptime for the Enterprise Search instance - - - name: filebeat - type: group - description: Health information for the embedded Filebeat instance - fields: - - name: pid - type: long - description: Process ID for the embedded Filebeat instance - - - name: restart_count - type: long - description: Number of times embedded Filebeat instance had to be restarted due to some issues - - - name: time_since_last_restart.sec - type: long - description: Time since the last embedded Filebeat instance restart (-1 if never restarted) - - - name: jvm - type: group - description: JVM health - fields: - - name: version - type: keyword - description: JVM version used to run Enterprise Search - - - name: gc - type: group - description: Java garbage collection metrics - fields: - - name: collection_count - type: long - description: Total number of Java garbage collector invocations since the start of the process - - - name: collection_time.ms - type: long - description: Total time spent running Java garbage collector since the start of the process - - - name: memory_usage - type: group - description: Memory usage - fields: - - name: heap_init.bytes - type: long - format: bytes - description: Heap init used by the JVM in bytes. - - - name: heap_used.bytes - type: long - format: bytes - description: Heap used by the JVM in bytes. - - - name: heap_committed.bytes - type: long - format: bytes - description: Committed heap to the JVM in bytes. - - - name: heap_max.bytes - type: long - format: bytes - description: Max heap used by the JVM in bytes - - - name: non_heap_init.bytes - type: long - format: bytes - description: Non-Heap initial memory used by the JVM in bytes. - - - name: non_heap_committed.bytes - type: long - format: bytes - description: Non-Heap committed memory used by the JVM in bytes. - - - name: object_pending_finalization_count - type: long - description: > - Displays the approximate number of objects for which finalization is pending. - - - name: threads - type: group - description: Threads information - fields: - - name: current - type: long - description: Current number of live threads. - - - name: daemon - type: long - description: Current number of live daemon threads. - - - name: max - type: long - description: Peak live thread count since the JVM started or the peak was reset. - - - name: total_started - type: long - description: Total number of threads created and/or started since the JVM started. - - - name: crawler - type: group - description: Crawler health - fields: - - name: workers - type: group - description: Crawler workers - fields: - - name: pool_size - type: long - description: Workers pool size. - - - name: active - type: long - description: Number of active workers. - - - name: available - type: long - description: Number of available workers. diff --git a/x-pack/metricbeat/module/enterprisesearch/health/_meta/testdata/config.yml b/x-pack/metricbeat/module/enterprisesearch/health/_meta/testdata/config.yml deleted file mode 100644 index 2f2263d262b5..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/health/_meta/testdata/config.yml +++ /dev/null @@ -1,2 +0,0 @@ -type: http -url: "/api/ent/v1/internal/health" diff --git a/x-pack/metricbeat/module/enterprisesearch/health/_meta/testdata/health.json b/x-pack/metricbeat/module/enterprisesearch/health/_meta/testdata/health.json deleted file mode 100644 index 64745fb70ba4..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/health/_meta/testdata/health.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "name":"elastic-m1.local", - "version":{ - "number":"8.0.0", - "build_hash":"unknown", - "build_date":null - }, - "jvm":{ - "gc":{ - "collection_count":20, - "collection_time":2303, - "garbage_collectors":{ - "PS Scavenge":{ - "collection_count":16, - "collection_time":657 - }, - "PS MarkSweep":{ - "collection_count":4, - "collection_time":1646 - } - } - }, - "pid":59623, - "uptime":170377, - "memory_usage":{ - "heap_init":268435456, - "heap_used":736601248, - "heap_committed":1279262720, - "heap_max":1908932608, - "object_pending_finalization_count":0, - "non_heap_init":2555904, - "non_heap_committed":99745792 - }, - "memory_pools":[ - "Code Cache", - "Metaspace", - "Compressed Class Space", - "PS Eden Space", - "PS Survivor Space", - "PS Old Gen" - ], - "threads":{ - "thread_count":27, - "peak_thread_count":32, - "total_started_thread_count":35, - "daemon_thread_count":17 - }, - "vm_version":"25.292-b10", - "vm_vendor":"AdoptOpenJDK", - "vm_name":"OpenJDK 64-Bit Server VM" - }, - "filebeat":{ - "pid":60067, - "alive":true, - "restart_count":0, - "seconds_since_last_restart":-1 - }, - "esqueues_me":{ - - }, - "crawler":{ - "running":true, - "workers":{ - "pool_size":16, - "active":0, - "available":16 - } - }, - "system":{ - "java_version":"1.8.0_292", - "jruby_version":"9.2.13.0", - "os_name":"Mac OS X", - "os_version":"10.16" - }, - "cluster_uuid":"MTD6fXYGTZylTMoW5AG5JA" -} diff --git a/x-pack/metricbeat/module/enterprisesearch/health/_meta/testdata/health.json-expected.json b/x-pack/metricbeat/module/enterprisesearch/health/_meta/testdata/health.json-expected.json deleted file mode 100644 index 2ec21d12edc3..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/health/_meta/testdata/health.json-expected.json +++ /dev/null @@ -1,83 +0,0 @@ -[ - { - "enterprisesearch": { - "cluster_uuid": "MTD6fXYGTZylTMoW5AG5JA", - "health": { - "crawler": { - "workers": { - "active": 0, - "available": 16, - "pool_size": 16 - } - }, - "jvm": { - "gc": { - "collection_count": 20, - "collection_time": { - "ms": 2303 - } - }, - "memory_usage": { - "heap_committed": { - "bytes": 1279262720 - }, - "heap_init": { - "bytes": 268435456 - }, - "heap_max": { - "bytes": 1908932608 - }, - "heap_used": { - "bytes": 736601248 - }, - "non_heap_committed": { - "bytes": 99745792 - }, - "non_heap_init": { - "bytes": 2555904 - }, - "object_pending_finalization_count": 0 - }, - "threads": { - "current": 27, - "daemon": 17, - "max": 32, - "total_started": 35 - }, - "version": "1.8.0_292" - }, - "name": "elastic-m1.local", - "process": { - "filebeat": { - "pid": 60067, - "restart_count": 0, - "time_since_last_restart": { - "sec": -1 - } - }, - "pid": 59623, - "uptime": { - "sec": 170377 - } - }, - "version": { - "build_hash": "unknown", - "number": "8.0.0" - } - } - }, - "event": { - "dataset": "enterprisesearch.health", - "duration": 115000, - "module": "enterprisesearch" - }, - "metricset": { - "name": "health", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "enterprisesearch" - } - } -] \ No newline at end of file diff --git a/x-pack/metricbeat/module/enterprisesearch/health/data.go b/x-pack/metricbeat/module/enterprisesearch/health/data.go deleted file mode 100644 index 318f9fa05981..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/health/data.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package health - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/joeshaw/multierror" - - s "github.com/elastic/beats/v7/libbeat/common/schema" - c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" - "github.com/elastic/beats/v7/metricbeat/helper/elastic" - "github.com/elastic/beats/v7/metricbeat/mb" - "github.com/elastic/elastic-agent-libs/mapstr" -) - -var ( - schema = s.Schema{ - "name": c.Str("name"), - - "version": c.Dict("version", s.Schema{ - "number": c.Str("number"), - "build_hash": c.Str("build_hash"), - }), - - "jvm": c.Dict("jvm", s.Schema{ - "version": c.Str("version"), - - "gc": c.Dict("gc", s.Schema{ - "collection_count": c.Int("collection_count"), - "collection_time": s.Object{"ms": c.Int("collection_time")}, - // TODO: Add separate metrics for old and young generation collectors - }), - - "memory_usage": c.Dict("memory_usage", s.Schema{ - "heap_init": s.Object{"bytes": c.Int("heap_init")}, - "heap_used": s.Object{"bytes": c.Int("heap_used")}, - "heap_committed": s.Object{"bytes": c.Int("heap_committed")}, - "heap_max": s.Object{"bytes": c.Int("heap_max")}, - "non_heap_init": s.Object{"bytes": c.Int("non_heap_init")}, - "non_heap_committed": s.Object{"bytes": c.Int("non_heap_committed")}, - "object_pending_finalization_count": c.Int("object_pending_finalization_count"), - }), - - "threads": c.Dict("threads", s.Schema{ - "current": c.Int("thread_count"), - "daemon": c.Int("daemon_thread_count"), - "max": c.Int("peak_thread_count"), - "total_started": c.Int("total_started_thread_count"), - }), - }), - - "process": c.Dict("process", s.Schema{ - "pid": c.Int("pid"), - "uptime": s.Object{"sec": c.Int("uptime")}, - - "filebeat": c.Dict("filebeat", s.Schema{ - "pid": c.Int("pid"), - "restart_count": c.Int("restart_count"), - "time_since_last_restart": s.Object{"sec": c.Int("seconds_since_last_restart")}, - }), - }), - - "crawler": c.Dict("crawler", s.Schema{ - "workers": c.Dict("workers", s.Schema{ - "pool_size": c.Int("pool_size"), - "active": c.Int("active"), - "available": c.Int("available"), - }), - }), - } -) - -func eventMapping(report mb.ReporterV2, input []byte, isXpack bool) error { - var data map[string]interface{} - err := json.Unmarshal(input, &data) - if err != nil { - return err - } - var errs multierror.Errors - - // All events need to have a cluster_uuid to work with Stack Monitoring - event := mb.Event{ - ModuleFields: mapstr.M{}, - MetricSetFields: mapstr.M{}, - } - event.ModuleFields.Put("cluster_uuid", data["cluster_uuid"]) - - // Collect process info in a form ready for mapping - process := make(map[string]interface{}) - process["filebeat"] = data["filebeat"] - - jvm, ok := data["jvm"].(map[string]interface{}) - if ok { - if pid, ok := jvm["pid"]; ok { - process["pid"] = pid - } - if uptime, ok := jvm["uptime"]; ok { - process["uptime"] = uptime - } - - // Add version info to the JVM section to help the schema mapper find it - system, ok := data["system"].(map[string]interface{}) - if ok { - jvm["version"] = system["java_version"] - } else { - errs = append(errs, errors.New("system is not a map")) - } - } else { - errs = append(errs, errors.New("jvm is not a map")) - } - - // Set the process info we have collected - data["process"] = process - - // xpack.enabled in config using standalone metricbeat writes to `.monitoring` instead of `metricbeat-*` - // When using Agent, the index name is overwritten anyways. - if isXpack { - index := elastic.MakeXPackMonitoringIndexName(elastic.EnterpriseSearch) - event.Index = index - } - - event.MetricSetFields, err = schema.Apply(data) - if err != nil { - errs = append(errs, fmt.Errorf("failure to apply health schema: %w", err)) - } else { - report.Event(event) - } - - return errs.Err() -} diff --git a/x-pack/metricbeat/module/enterprisesearch/health/data_test.go b/x-pack/metricbeat/module/enterprisesearch/health/data_test.go deleted file mode 100644 index 17072d911e29..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/health/data_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build !integration - -package health - -import ( - "testing" - - mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" - "github.com/elastic/elastic-agent-libs/logp" - - _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/enterprisesearch" -) - -func TestEventMapping(t *testing.T) { - logp.TestingSetup() - mbtest.TestDataFiles(t, "enterprisesearch", "health") -} diff --git a/x-pack/metricbeat/module/enterprisesearch/health/health.go b/x-pack/metricbeat/module/enterprisesearch/health/health.go deleted file mode 100644 index a8de3c90bc30..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/health/health.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package health - -import ( - "fmt" - - "github.com/elastic/beats/v7/libbeat/common/cfgwarn" - "github.com/elastic/beats/v7/metricbeat/helper" - "github.com/elastic/beats/v7/metricbeat/mb" - "github.com/elastic/beats/v7/metricbeat/mb/parse" -) - -const ( - // defaultScheme is the default scheme to use when it is not specified in - // the host config. - defaultScheme = "http" - - // defaultPath is the default path to the Enterprise Search Health API - defaultPath = "/api/ent/v1/internal/health" -) - -var ( - hostParser = parse.URLHostParserBuilder{ - DefaultScheme: defaultScheme, - DefaultPath: defaultPath, - }.Build() -) - -func init() { - mb.Registry.MustAddMetricSet("enterprisesearch", "health", New, - mb.WithHostParser(hostParser), - mb.DefaultMetricSet(), - ) -} - -type MetricSet struct { - mb.BaseMetricSet - http *helper.HTTP - XPackEnabled bool -} - -func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Beta("The Enterprise Search health metricset is currently in beta.") - - http, err := helper.NewHTTP(base) - if err != nil { - return nil, err - } - config := struct { - XPackEnabled bool `config:"xpack.enabled"` - }{ - XPackEnabled: false, - } - if err := base.Module().UnpackConfig(&config); err != nil { - return nil, err - } - - return &MetricSet{ - base, - http, - config.XPackEnabled, - }, nil -} - -// Makes a GET request to Enterprise Search Health API (see defaultPath) -// and generates a monitoring event based on the fetched metrics. -// Returns nil or an error object. -func (m *MetricSet) Fetch(report mb.ReporterV2) error { - content, err := m.http.FetchContent() - if err != nil { - return fmt.Errorf("error in fetch: %w", err) - } - - err = eventMapping(report, content, m.XPackEnabled) - if err != nil { - return fmt.Errorf("error converting event: %w", err) - } - - return nil -} diff --git a/x-pack/metricbeat/module/enterprisesearch/health/health_integration_test.go b/x-pack/metricbeat/module/enterprisesearch/health/health_integration_test.go deleted file mode 100644 index a9baf47b158c..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/health/health_integration_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build integration - -package health - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/beats/v7/libbeat/tests/compose" - mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" -) - -func TestFetch(t *testing.T) { - service := compose.EnsureUpWithTimeout(t, 300, "enterprise_search") - - config := getConfig("health", service.Host()) - f := mbtest.NewReportingMetricSetV2Error(t, config) - events, errs := mbtest.ReportingFetchV2Error(f) - if len(errs) > 0 { - t.Fatalf("Expected 0 errors, had %d. %v\n", len(errs), errs) - } - assert.NotEmpty(t, events) - event := events[0].MetricSetFields - t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event) -} - -func TestData(t *testing.T) { - service := compose.EnsureUpWithTimeout(t, 300, "enterprise_search") - - config := getConfig("health", service.Host()) - - f := mbtest.NewReportingMetricSetV2Error(t, config) - err := mbtest.WriteEventsReporterV2Error(f, t, "") - if err != nil { - t.Fatal("write", err) - } -} - -// GetConfig returns config for Enterprise Search module -func getConfig(metricset string, host string) map[string]interface{} { - return map[string]interface{}{ - "module": "enterprisesearch", - "metricsets": []string{metricset}, - "hosts": []string{host}, - "username": "elastic", - "password": "changeme", - } -} diff --git a/x-pack/metricbeat/module/enterprisesearch/module.yml b/x-pack/metricbeat/module/enterprisesearch/module.yml deleted file mode 100644 index 320fd5445fc7..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/module.yml +++ /dev/null @@ -1,3 +0,0 @@ -dashboards: - - id: e179f130-2c4a-11ec-9ab7-4fbdf1480ffc - file: Metricbeat-enterprise-search-overview.json diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/_meta/docs.asciidoc b/x-pack/metricbeat/module/enterprisesearch/stats/_meta/docs.asciidoc deleted file mode 100644 index 90fdec57ccf4..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/stats/_meta/docs.asciidoc +++ /dev/null @@ -1,3 +0,0 @@ -This is the `stats` metricset of the Enterprise Search module. - -This metricset allows users to fetch and ingest Enterprise Search solution statistics information from the https://www.elastic.co/guide/en/enterprise-search/current/monitoring-apis.html#stats-api[Stats API]. diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/_meta/fields.yml b/x-pack/metricbeat/module/enterprisesearch/stats/_meta/fields.yml deleted file mode 100644 index 74b36eb9b633..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/stats/_meta/fields.yml +++ /dev/null @@ -1,407 +0,0 @@ -- name: stats - type: group - release: beta - description: Enterprise Search stats. - fields: - - name: connectors - type: group - description: Workplace Search connectors subsystem stats. - fields: - - name: job_store - type: group - description: Workplace Search connectors job store stats. - fields: - - name: waiting - type: long - description: Number of connectors jobs waiting to be processed. - - - name: working - type: long - description: Number of connectors jobs currently being processed. - - - name: job_types - type: group - description: Breakdown of connectors jobs by types. - fields: - - name: delete - type: long - description: Number of delete jobs in the jobs store. - - - name: full - type: long - description: Number of full sync jobs in the jobs store. - - - name: incremental - type: long - description: Number of incremental sync jobs in the jobs store. - - - name: permissions - type: long - description: Number of permissions sync jobs in the jobs store. - - - name: pool - type: group - description: Workplace Search worker pools stats. - fields: - - name: extract_worker_pool - type: group - description: Status information for the extractor workers pool. - fields: - - name: size - type: long - description: Worker pool size. - - - name: busy - type: long - description: Number of busy workers. - - - name: queue_depth - type: long - description: Number of items waiting to be processed. - - - name: idle - type: long - description: Number of idle workers. - - - name: total_completed - type: long - description: Number of jobs completed since the start. - - - name: total_scheduled - type: long - description: Number of jobs scheduled since the start. - - - name: subextract_worker_pool - type: group - description: Status information for the sub-extractor workers pool. - fields: - - name: size - type: long - description: Worker pool size. - - - name: busy - type: long - description: Number of busy workers. - - - name: queue_depth - type: long - description: Number of items waiting to be processed. - - - name: idle - type: long - description: Number of idle workers. - - - name: total_completed - type: long - description: Number of jobs completed since the start. - - - name: total_scheduled - type: long - description: Number of jobs scheduled since the start. - - - name: publish_worker_pool - type: group - description: Status information for the publish workers pool. - fields: - - name: size - type: long - description: Worker pool size. - - - name: busy - type: long - description: Number of busy workers. - - - name: queue_depth - type: long - description: Number of items waiting to be processed. - - - name: idle - type: long - description: Number of idle workers. - - - name: total_completed - type: long - description: Number of jobs completed since the start. - - - name: total_scheduled - type: long - description: Number of jobs scheduled since the start. - - - name: http - type: group - description: Incoming HTTP request metrics. - fields: - - name: connections - type: group - description: Incoming HTTP connection statistics. - fields: - - name: current - type: long - description: Current number of HTTP connections opened to the Enterprise Search instance. - - - name: max - type: long - description: Maximum number of concurrent HTTP connections open to the Enterprise Search instance since the start. - - - name: total - type: long - description: Total number of HTTP connections opened to the Enterprise Search instance since the start. - - - name: network - type: group - description: Network traffic metrics. - fields: - - name: received.bytes - type: long - format: bytes - description: Total number of bytes received by the Enterprise Search instance since the start. - - - name: sent.bytes - type: long - format: bytes - description: Total number of bytes sent by the Enterprise Search instance since the start. - - - name: received.bytes_per_sec - type: long - format: bytes - description: Average number of bytes received by the Enterprise Search instance per second since the start. - - - name: sent.bytes_per_sec - type: long - format: bytes - description: Average number of bytes sent by the Enterprise Search instance per second since the start. - - - name: request_duration - type: group - description: Aggregate HTTP request duration statistics. - fields: - - name: max.ms - type: long - description: Longest HTTP connection duration since the start of the instance. - - - name: mean.ms - type: long - description: Average HTTP connection duration since the start of the instance. - - - name: std_dev.ms - type: long - description: Standard deviation for HTTP connection duration values since the start of the instance. - - - name: responses - type: group - description: Aggregate HTTP response counts broken down by HTTP status type. - fields: - - name: 1xx - type: long - description: Total number of HTTP requests finished with a 1xx response code since the start of the instance. - - - name: 2xx - type: long - description: Total number of HTTP requests finished with a 2xx response code since the start of the instance. - - - name: 3xx - type: long - description: Total number of HTTP requests finished with a 3xx response code since the start of the instance. - - - name: 4xx - type: long - description: Total number of HTTP requests finished with a 4xx response code since the start of the instance. - - - name: 5xx - type: long - description: Total number of HTTP requests finished with a 5xx response code since the start of the instance. - - - name: queues - type: group - description: Aggregate stats on the functioning of the background jobs processing pipeline within Enterprise Search. - fields: - - name: engine_destroyer.count - type: long - description: Total number of jobs processed via the engine_destroyer queue since the start of the instance. - - - name: mailer.count - type: long - description: Total number of jobs processed via the mailer queue since the start of the instance. - - - name: process_crawl.count - type: long - description: Total number of jobs processed via the process_crawl queue since the start of the instance. - - - name: failed.count - type: long - description: Total number of jobs waiting in the failed queue. - - - - name: crawler - type: group - description: Aggregate stats on the functioning of the crawler subsystem within Enterprise Search. - fields: - - name: global - type: group - description: Global deployment-wide metrics for the crawler. - fields: - - name: crawl_requests - type: group - description: Crawl request summary for the deployment. - fields: - - name: pending - type: long - description: Total number of crawl requests waiting to be processed. - - name: active - type: long - description: Total number of crawl requests currently being processed (running crawls). - - name: successful - type: long - description: Total number of crawl requests that have succeeded. - - name: failed - type: long - description: Total number of failed crawl requests. - - - name: node - type: group - description: Node-level statistics for the crawler. - fields: - - name: pages_visited - type: long - description: Total number of pages visited by the crawler since the instance start. - - - name: urls_allowed - type: long - description: Total number of URLs allowed by the crawler during discovery since the instance start. - - - name: urls_denied - type: group - description: Total number of URLs denied by the crawler during discovery since the instance start, broken down by deny reason. - fields: - - name: already_seen - type: long - description: Total number of URLs not followed because of URL de-duplication (each URL is visited only once). - - - name: domain_filter_denied - type: long - description: Total number of URLs denied because of an unknown domain. - - - name: incorrect_protocol - type: long - description: Total number of URLs with incorrect/invalid/unsupported protocols. - - - name: link_too_deep - type: long - description: Total number of URLs not followed due to crawl depth limits. - - - name: nofollow - type: long - description: Total number of URLs denied due to a nofollow meta tag or an HTML link attribute. - - - name: unsupported_content_type - type: long - description: Total number of URLs denied due to an unsupported content type. - - - name: status_codes - type: group - description: HTTP request result counts, by status code. - fields: - - name: "200" - type: long - description: Total number of HTTP 200 responses seen by the crawler since the instance start. - - name: "301" - type: long - description: Total number of HTTP 301 responses seen by the crawler since the instance start. - - name: "302" - type: long - description: Total number of HTTP 302 responses seen by the crawler since the instance start. - - name: "400" - type: long - description: Total number of HTTP 400 responses seen by the crawler since the instance start. - - name: "401" - type: long - description: Total number of HTTP 401 responses seen by the crawler since the instance start. - - name: "402" - type: long - description: Total number of HTTP 402 responses seen by the crawler since the instance start. - - name: "403" - type: long - description: Total number of HTTP 403 responses seen by the crawler since the instance start. - - name: "404" - type: long - description: Total number of HTTP 404 responses seen by the crawler since the instance start. - - name: "405" - type: long - description: Total number of HTTP 405 responses seen by the crawler since the instance start. - - name: "410" - type: long - description: Total number of HTTP 410 responses seen by the crawler since the instance start. - - name: "422" - type: long - description: Total number of HTTP 422 responses seen by the crawler since the instance start. - - name: "429" - type: long - description: Total number of HTTP 429 responses seen by the crawler since the instance start. - - name: "500" - type: long - description: Total number of HTTP 500 responses seen by the crawler since the instance start. - - name: "501" - type: long - description: Total number of HTTP 501 responses seen by the crawler since the instance start. - - name: "502" - type: long - description: Total number of HTTP 502 responses seen by the crawler since the instance start. - - name: "503" - type: long - description: Total number of HTTP 503 responses seen by the crawler since the instance start. - - name: "504" - type: long - description: Total number of HTTP 504 responses seen by the crawler since the instance start. - - - name: queue_size - type: group - description: Total current URL queue size for the instance. - fields: - - name: primary - type: long - description: Total number of URLs waiting to be crawled by the instance. - - name: purge - type: long - description: Total number of URLs waiting to be checked by the purge crawl phase. - - - name: active_threads - type: long - description: Total number of crawler worker threads currently active on the instance. - - - name: workers - type: group - description: Crawler workers information for the instance. - fields: - - name: pool_size - type: long - description: Total size of the crawl workers pool (number of concurrent crawls possible) for the instance. - - name: active - type: long - description: Total number of currently active crawl workers (running crawls) for the instance. - - name: available - type: long - description: Total number of currently available (free) crawl workers for the instance. - - - name: product_usage - type: group - description: Aggregate product usage statistics for the Enterprise Search deployment. - fields: - - name: app_search - type: group - description: App Search product usage statistics. - fields: - - name: total_engines - type: long - description: Current number of App Search engines within the deployment. - - - name: workplace_search - type: group - description: Workplace Search product usage statistics. - fields: - - name: total_org_sources - type: long - description: Current number of Workplace Search org-wide content sources within the deployment. - - - name: total_private_sources - type: long - description: Current number of Workplace Search private content sources within the deployment. diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/_meta/testdata/config.yml b/x-pack/metricbeat/module/enterprisesearch/stats/_meta/testdata/config.yml deleted file mode 100644 index 766b2cc2d507..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/stats/_meta/testdata/config.yml +++ /dev/null @@ -1,2 +0,0 @@ -type: http -url: "/api/ent/v1/internal/stats" diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/_meta/testdata/stats.json b/x-pack/metricbeat/module/enterprisesearch/stats/_meta/testdata/stats.json deleted file mode 100644 index 77741d831592..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/stats/_meta/testdata/stats.json +++ /dev/null @@ -1,154 +0,0 @@ -{ - "cluster_uuid": "s7QHFAuAQmqeqnGZmAqXIA", - "http": { - "connections": { - "current": 1, - "max": 3, - "total": 29 - }, - "request_duration_ms": { - "max": 3528004, - "mean": 922170.1785714285, - "std_dev": 1068785.5936706674 - }, - "network_bytes": { - "received_total": 231403, - "received_rate": 42, - "sent_total": 704357, - "sent_rate": 5 - }, - "responses": { - "1xx": 1, - "2xx": 2, - "3xx": 3, - "4xx": 4, - "5xx": 5 - } - }, - "app": { - "pid": 41556, - "start": "2021-09-23T21:55:53+00:00", - "end": "2021-09-23T21:56:53+00:00", - "metrics": { - "timers.cron.local.cron-refresh_elasticsearch_license.total_job_time": { - "sum": 114.21483382582664, - "max": 1140.21, - "mean": 555.1 - }, - "timers.actastic.relation.search": { - "sum": 51.183209056034684, - "max": 35.318125039339066, - "mean": 25.591604528017342 - }, - "timers.cron.local.cron-keep_filebeat_alive.total_job_time": { - "sum": 52.856332855299115, - "max": 52.856332855299115, - "mean": 52.856332855299115 - } - } - }, - "queues": { - "engine_destroyer": { - "pending": 1 - }, - "process_crawl": { - "pending": 2 - }, - "mailer": { - "pending": 3 - }, - "failed": [ - { - "foo": 123 - } - ] - }, - "connectors": { - "alive": false, - "pool": { - "extract_worker_pool": { - "running": true, - "queue_depth": 1, - "size": 2, - "busy": 3, - "idle": 4, - "total_scheduled": 5, - "total_completed": 6 - }, - "subextract_worker_pool": { - "running": true, - "queue_depth": 7, - "size": 8, - "busy": 9, - "idle": 10, - "total_scheduled": 11, - "total_completed": 12 - }, - "publish_worker_pool": { - "running": true, - "queue_depth": 13, - "size": 14, - "busy": 15, - "idle": 16, - "total_scheduled": 17, - "total_completed": 18 - } - }, - "job_store": { - "waiting": 1, - "working": 2, - "job_types": { - "full": 3, - "incremental": 4, - "delete": 5, - "permissions": 6 - } - } - }, - "crawler": { - "global": { - "crawl_requests": { - "pending": 0, - "active": 2, - "successful": 2, - "failed": 0 - } - }, - "node": { - "pages_visited": 385, - "urls_allowed": 478, - "urls_denied": { - "nofollow": 98, - "already_seen": 8466, - "domain_filter_denied": 5286, - "incorrect_protocol": 23, - "unsupported_content_type": 4, - "link_too_deep": 45 - }, - "status_codes": { - "200": 367, - "301": 18 - }, - "queue_size": { - "primary": 91, - "purge": 0 - }, - "active_threads": 18, - "workers": { - "pool_size": 16, - "active": 2, - "available": 14 - } - } - }, - "product_usage": { - "app_search": { - "total_engines": 1 - }, - "workplace_search": { - "total_org_sources": 2, - "total_private_sources": 3, - "total_queries_last_30_days": 4 - } - } -} \ No newline at end of file diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/_meta/testdata/stats.json-expected.json b/x-pack/metricbeat/module/enterprisesearch/stats/_meta/testdata/stats.json-expected.json deleted file mode 100644 index 379ae1104796..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/stats/_meta/testdata/stats.json-expected.json +++ /dev/null @@ -1,154 +0,0 @@ -[ - { - "enterprisesearch": { - "cluster_uuid": "s7QHFAuAQmqeqnGZmAqXIA", - "stats": { - "connectors": { - "job_store": { - "job_types": { - "delete": 5, - "full": 3, - "incremental": 4, - "permissions": 6 - }, - "waiting": 1, - "working": 2 - }, - "pool": { - "extract_worker_pool": { - "busy": 3, - "idle": 4, - "queue_depth": 1, - "size": 2, - "total_completed": 5, - "total_scheduled": 5 - }, - "publish_worker_pool": { - "busy": 15, - "idle": 16, - "queue_depth": 13, - "size": 14, - "total_completed": 17, - "total_scheduled": 17 - }, - "subextract_worker_pool": { - "busy": 9, - "idle": 10, - "queue_depth": 7, - "size": 8, - "total_completed": 11, - "total_scheduled": 11 - } - } - }, - "crawler": { - "global": { - "crawl_requests": { - "active": 2, - "failed": 0, - "pending": 0, - "successful": 2 - } - }, - "node": { - "active_threads": 18, - "pages_visited": 385, - "queue_size": { - "primary": 91, - "purge": 0 - }, - "status_codes": { - "200": 367, - "301": 18 - }, - "urls_allowed": 478, - "urls_denied": { - "already_seen": 8466, - "domain_filter_denied": 5286, - "incorrect_protocol": 23, - "link_too_deep": 45, - "nofollow": 98, - "unsupported_content_type": 4 - }, - "workers": { - "active": 2, - "available": 14, - "pool_size": 16 - } - } - }, - "http": { - "connections": { - "current": 1, - "max": 3, - "total": 29 - }, - "network": { - "received": { - "bytes": 231403, - "bytes_per_sec": 42 - }, - "sent": { - "bytes": 704357, - "bytes_per_sec": 5 - } - }, - "request_duration": { - "max": { - "ms": 3528004 - }, - "mean": { - "ms": 922170 - }, - "std_dev": { - "ms": 1068785 - } - }, - "responses": { - "1xx": 1, - "2xx": 2, - "3xx": 3, - "4xx": 4, - "5xx": 5 - } - }, - "product_usage": { - "app_search": { - "total_engines": 1 - }, - "workplace_search": { - "total_org_sources": 2, - "total_private_sources": 3 - } - }, - "queues": { - "engine_destroyer": { - "count": 1 - }, - "failed": { - "count": 1 - }, - "mailer": { - "count": 3 - }, - "process_crawl": { - "count": 2 - } - } - } - }, - "event": { - "dataset": "enterprisesearch.stats", - "duration": 115000, - "module": "enterprisesearch" - }, - "metricset": { - "name": "stats", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "enterprisesearch" - } - } -] \ No newline at end of file diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/data.go b/x-pack/metricbeat/module/enterprisesearch/stats/data.go deleted file mode 100644 index cd1ebd13dc3d..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/stats/data.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stats - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/joeshaw/multierror" - - s "github.com/elastic/beats/v7/libbeat/common/schema" - c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" - "github.com/elastic/beats/v7/metricbeat/helper/elastic" - "github.com/elastic/beats/v7/metricbeat/mb" - "github.com/elastic/elastic-agent-libs/mapstr" -) - -var ( - connectorsPoolSchema = s.Schema{ - "queue_depth": c.Int("queue_depth"), - "size": c.Int("size"), - "busy": c.Int("busy"), - "idle": c.Int("idle"), - "total_scheduled": c.Int("total_scheduled"), - "total_completed": c.Int("total_scheduled"), - } - - schema = s.Schema{ - "http": c.Dict("http", s.Schema{ - "connections": c.Dict("connections", s.Schema{ - "current": c.Int("current"), - "max": c.Int("max"), - "total": c.Int("total"), - }), - - "request_duration": c.Dict("request_duration_ms", s.Schema{ - "max": s.Object{"ms": c.Int("max")}, - "mean": s.Object{"ms": c.Int("mean")}, - "std_dev": s.Object{"ms": c.Int("std_dev")}, - }), - - "network": c.Dict("network_bytes", s.Schema{ - "received": s.Object{ - "bytes": c.Int("received_total"), - "bytes_per_sec": c.Int("received_rate"), - }, - "sent": s.Object{ - "bytes": c.Int("sent_total"), - "bytes_per_sec": c.Int("sent_rate"), - }, - }), - - "responses": c.Dict("responses", s.Schema{ - "1xx": c.Int("1xx"), - "2xx": c.Int("2xx"), - "3xx": c.Int("3xx"), - "4xx": c.Int("4xx"), - "5xx": c.Int("5xx"), - }), - }), - - "queues": c.Dict("queues", s.Schema{ - "engine_destroyer": s.Object{"count": c.Int("engine_destroyer.pending")}, - "process_crawl": s.Object{"count": c.Int("process_crawl.pending")}, - "mailer": s.Object{"count": c.Int("mailer.pending")}, - "failed": s.Object{"count": c.Int("failed.count")}, - }), - - "connectors": c.Dict("connectors", s.Schema{ - "pool": c.Dict("pool", s.Schema{ - "extract_worker_pool": c.Dict("extract_worker_pool", connectorsPoolSchema), - "subextract_worker_pool": c.Dict("subextract_worker_pool", connectorsPoolSchema), - "publish_worker_pool": c.Dict("publish_worker_pool", connectorsPoolSchema), - }), - - "job_store": c.Dict("job_store", s.Schema{ - "waiting": c.Int("waiting"), - "working": c.Int("working"), - "job_types": c.Dict("job_types", s.Schema{ - "full": c.Int("full"), - "incremental": c.Int("incremental"), - "delete": c.Int("delete"), - "permissions": c.Int("permissions"), - }), - }), - }), - - "crawler": c.Dict("crawler", s.Schema{ - "global": c.Dict("global", s.Schema{ - "crawl_requests": c.Dict("crawl_requests", s.Schema{ - "pending": c.Int("pending"), - "active": c.Int("active"), - "successful": c.Int("successful"), - "failed": c.Int("failed"), - }), - }), - - "node": c.Dict("node", s.Schema{ - "pages_visited": c.Int("pages_visited"), - "urls_allowed": c.Int("urls_allowed"), - "urls_denied": c.Dict("urls_denied", s.Schema{ - "already_seen": c.Int("already_seen", s.Optional), - "domain_filter_denied": c.Int("domain_filter_denied", s.Optional), - "incorrect_protocol": c.Int("incorrect_protocol", s.Optional), - "link_too_deep": c.Int("link_too_deep", s.Optional), - "nofollow": c.Int("nofollow", s.Optional), - "unsupported_content_type": c.Int("unsupported_content_type", s.Optional), - }), - - "status_codes": c.Dict("status_codes", s.Schema{ - "200": c.Int("200", s.Optional), - "301": c.Int("301", s.Optional), - "302": c.Int("302", s.Optional), - "304": c.Int("304", s.Optional), - "400": c.Int("400", s.Optional), - "401": c.Int("401", s.Optional), - "402": c.Int("402", s.Optional), - "403": c.Int("403", s.Optional), - "404": c.Int("404", s.Optional), - "405": c.Int("405", s.Optional), - "410": c.Int("410", s.Optional), - "422": c.Int("422", s.Optional), - "429": c.Int("429", s.Optional), - "500": c.Int("500", s.Optional), - "501": c.Int("501", s.Optional), - "502": c.Int("502", s.Optional), - "503": c.Int("503", s.Optional), - "504": c.Int("504", s.Optional), - }), - - "queue_size": c.Dict("queue_size", s.Schema{ - "primary": c.Int("primary"), - "purge": c.Int("purge"), - }), - - "active_threads": c.Int("active_threads"), - "workers": c.Dict("workers", s.Schema{ - "pool_size": c.Int("pool_size"), - "active": c.Int("active"), - "available": c.Int("available"), - }), - }), - }), - - "product_usage": c.Dict("product_usage", s.Schema{ - "app_search": c.Dict("app_search", s.Schema{ - "total_engines": c.Int("total_engines"), - }), - "workplace_search": c.Dict("workplace_search", s.Schema{ - "total_org_sources": c.Int("total_org_sources"), - "total_private_sources": c.Int("total_private_sources"), - }), - }), - } -) - -func eventMapping(report mb.ReporterV2, input []byte, isXpack bool) error { - var data map[string]interface{} - err := json.Unmarshal(input, &data) - if err != nil { - return err - } - var errs multierror.Errors - - // All events need to have a cluster_uuid to work with Stack Monitoring - event := mb.Event{ - ModuleFields: mapstr.M{}, - MetricSetFields: mapstr.M{}, - } - event.ModuleFields.Put("cluster_uuid", data["cluster_uuid"]) - - // Get queues information - queues, ok := data["queues"].(map[string]interface{}) - if ok { - // Get the list of failed items - failed, ok := queues["failed"].([]interface{}) - if ok { - // Use the failed items count as a metric - queues["failed.count"] = len(failed) - } else { - errs = append(errs, errors.New("queues.failed is not an array of maps")) - } - } else { - errs = append(errs, errors.New("queues is not a map")) - } - - // xpack.enabled in config using standalone metricbeat writes to `.monitoring` instead of `metricbeat-*` - // When using Agent, the index name is overwritten anyways. - if isXpack { - index := elastic.MakeXPackMonitoringIndexName(elastic.EnterpriseSearch) - event.Index = index - } - - event.MetricSetFields, err = schema.Apply(data) - if err != nil { - errs = append(errs, fmt.Errorf("failure to apply stats schema: %w", err)) - } else { - report.Event(event) - } - - return errs.Err() -} diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/data_test.go b/x-pack/metricbeat/module/enterprisesearch/stats/data_test.go deleted file mode 100644 index a183fe6d1621..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/stats/data_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build !integration - -package stats - -import ( - "testing" - - mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" - "github.com/elastic/elastic-agent-libs/logp" - - _ "github.com/elastic/beats/v7/x-pack/metricbeat/module/enterprisesearch" -) - -func TestEventMapping(t *testing.T) { - logp.TestingSetup() - mbtest.TestDataFiles(t, "enterprisesearch", "stats") -} diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/stats.go b/x-pack/metricbeat/module/enterprisesearch/stats/stats.go deleted file mode 100644 index 234a65d0d116..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/stats/stats.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stats - -import ( - "fmt" - - "github.com/elastic/beats/v7/libbeat/common/cfgwarn" - "github.com/elastic/beats/v7/metricbeat/helper" - "github.com/elastic/beats/v7/metricbeat/mb" - "github.com/elastic/beats/v7/metricbeat/mb/parse" -) - -const ( - // defaultScheme is the default scheme to use when it is not specified in - // the host config. - defaultScheme = "http" - - // defaultPath is the default path to the Enterprise Search Stats API - defaultPath = "/api/ent/v1/internal/stats" -) - -var ( - hostParser = parse.URLHostParserBuilder{ - DefaultScheme: defaultScheme, - DefaultPath: defaultPath, - }.Build() -) - -func init() { - mb.Registry.MustAddMetricSet("enterprisesearch", "stats", New, - mb.WithHostParser(hostParser), - mb.DefaultMetricSet(), - ) -} - -type MetricSet struct { - mb.BaseMetricSet - http *helper.HTTP - XPackEnabled bool -} - -func New(base mb.BaseMetricSet) (mb.MetricSet, error) { - cfgwarn.Beta("The Enterprise Search stats metricset is currently in beta.") - - http, err := helper.NewHTTP(base) - if err != nil { - return nil, err - } - - config := struct { - XPackEnabled bool `config:"xpack.enabled"` - }{ - XPackEnabled: false, - } - if err := base.Module().UnpackConfig(&config); err != nil { - return nil, err - } - return &MetricSet{ - base, - http, - config.XPackEnabled, - }, nil -} - -// Makes a GET request to Enterprise Search Stats API (see defaultPath) -// and generates a monitoring event based on the fetched metrics. -// Returns nil or an error object. -func (m *MetricSet) Fetch(report mb.ReporterV2) error { - content, err := m.http.FetchContent() - if err != nil { - return fmt.Errorf("error in fetch: %w", err) - } - - err = eventMapping(report, content, m.XPackEnabled) - if err != nil { - return fmt.Errorf("error converting event: %w", err) - } - - return nil -} diff --git a/x-pack/metricbeat/module/enterprisesearch/stats/stats_integration_test.go b/x-pack/metricbeat/module/enterprisesearch/stats/stats_integration_test.go deleted file mode 100644 index b3af4b1a5ab0..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/stats/stats_integration_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build integration - -package stats - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/beats/v7/libbeat/tests/compose" - mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" -) - -func TestFetch(t *testing.T) { - service := compose.EnsureUpWithTimeout(t, 300, "enterprise_search") - - config := getConfig("stats", service.Host()) - f := mbtest.NewReportingMetricSetV2Error(t, config) - events, errs := mbtest.ReportingFetchV2Error(f) - if len(errs) > 0 { - t.Fatalf("Expected 0 errors, had %d. %v\n", len(errs), errs) - } - assert.NotEmpty(t, events) - event := events[0].MetricSetFields - t.Logf("%s/%s event: %+v", f.Module().Name(), f.Name(), event) -} - -func TestData(t *testing.T) { - service := compose.EnsureUpWithTimeout(t, 300, "enterprise_search") - - config := getConfig("stats", service.Host()) - - f := mbtest.NewReportingMetricSetV2Error(t, config) - err := mbtest.WriteEventsReporterV2Error(f, t, "") - if err != nil { - t.Fatal("write", err) - } -} - -// GetConfig returns config for Enterprise Search module -func getConfig(metricset string, host string) map[string]interface{} { - return map[string]interface{}{ - "module": "enterprisesearch", - "metricsets": []string{metricset}, - "hosts": []string{host}, - "username": "elastic", - "password": "changeme", - } -} diff --git a/x-pack/metricbeat/module/enterprisesearch/test_enterprisesearch.py b/x-pack/metricbeat/module/enterprisesearch/test_enterprisesearch.py deleted file mode 100644 index cba273bc7eec..000000000000 --- a/x-pack/metricbeat/module/enterprisesearch/test_enterprisesearch.py +++ /dev/null @@ -1,81 +0,0 @@ -"""Integration tests for the Enterprise Search Metricbeat module""" - -import os -import unittest -from xpack_metricbeat import XPackTest, metricbeat - - -class Test(XPackTest): - COMPOSE_SERVICES = ['enterprise_search'] - COMPOSE_TIMEOUT = 600 - - # ------------------------------------------------------------------------- - @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, 'integration test') - def test_health(self): - """Tests the Health API and the associated metricset""" - - # Setup the environment - self.setup_environment(metricset="health") - - # Get a single event for testing - evt = self.get_event() - - self.assertIn("enterprisesearch", evt) - self.assertIn("health", evt["enterprisesearch"]) - - health = evt["enterprisesearch"]["health"] - self.assertIn("jvm", health) - - # ------------------------------------------------------------------------- - @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, 'integration test') - def test_stats(self): - """Tests the Stats API and the associated metricset""" - - # Setup the environment - self.setup_environment(metricset="stats") - - # Get a single event for testing - evt = self.get_event() - - self.assertIn("enterprisesearch", evt) - self.assertIn("stats", evt["enterprisesearch"]) - - stats = evt["enterprisesearch"]["stats"] - self.assertIn("http", stats) - - # ------------------------------------------------------------------------- - def setup_environment(self, metricset): - """Sets up the testing environment and starts all components needed""" - - self.render_config_template(modules=[{ - "name": "enterprisesearch", - "metricsets": [metricset], - "hosts": [self.compose_host(service="enterprise_search")], - "username": self.get_username(), - "password": self.get_password(), - "period": "5s" - }]) - - proc = self.start_beat(home=self.beat_path) - self.wait_until(lambda: self.output_lines() > 0) - proc.check_kill_and_wait() - self.assert_no_logged_warnings() - - def get_event(self): - """Gets a single event and checks that all fields are documented. - Returns the event hash.""" - - output = self.read_output_json() - self.assertEqual(len(output), 1) - self.assert_fields_are_documented(output[0]) - return output[0] - - @staticmethod - def get_username(): - """Returns the user name to be used for Enterprise Search""" - return os.getenv('ENT_SEARCH_USER', 'elastic') - - @staticmethod - def get_password(): - """Returns the password to be used for Enterprise Search""" - return os.getenv('ENT_SEARCH_PASSWORD', 'changeme') diff --git a/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled b/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled deleted file mode 100644 index 0af7916573a0..000000000000 --- a/x-pack/metricbeat/modules.d/enterprisesearch-xpack.yml.disabled +++ /dev/null @@ -1,11 +0,0 @@ -# Module: enterprisesearch -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-enterprisesearch.html - -- module: enterprisesearch - xpack.enabled: true - metricsets: ["health", "stats"] - enabled: true - period: 10s - hosts: ["http://localhost:3002"] - #username: "user" - #password: "secret" diff --git a/x-pack/metricbeat/modules.d/enterprisesearch.yml.disabled b/x-pack/metricbeat/modules.d/enterprisesearch.yml.disabled deleted file mode 100644 index 122e56b627b1..000000000000 --- a/x-pack/metricbeat/modules.d/enterprisesearch.yml.disabled +++ /dev/null @@ -1,10 +0,0 @@ -# Module: enterprisesearch -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-enterprisesearch.html - -- module: enterprisesearch - metricsets: ["health", "stats"] - enabled: true - period: 10s - hosts: ["http://localhost:3002"] - #username: "user" - #password: "secret" From a964b89e5b6080615c20bd4db37deabf86db0ecc Mon Sep 17 00:00:00 2001 From: Maurizio Branca Date: Sat, 1 Mar 2025 19:37:58 +0100 Subject: [PATCH 41/41] [azure-eventhub] Update sdk/messaging/azeventhubs to v1.3.0 (#42794) Upgrade the `sdk/messaging/azeventhubs` package to the latest stable version v1.3.0. This package is the core library of the modern Event Hub SDK we use in the `azure-eventhub` input v2. From v1.2.1 to v1.3.0 we get several bug fixes and a new TCP proxies with AMQP feature. --- NOTICE.txt | 234 ++++++++++++++++++++++++++++++++++++++++++++--------- go.mod | 12 +-- go.sum | 42 ++++++---- 3 files changed, 229 insertions(+), 59 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index afc3813e3c76..069f419fce6e 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1742,11 +1742,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/Azure/azure-sdk-for-go/sdk/azcore -Version: v1.13.0 +Version: v1.16.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/azcore@v1.13.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/azcore@v1.16.0/LICENSE.txt: MIT License @@ -1773,11 +1773,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/Azure/azure-sdk-for-go/sdk/azidentity -Version: v1.7.0 +Version: v1.8.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/azidentity@v1.7.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/azidentity@v1.8.0/LICENSE.txt: MIT License @@ -1804,11 +1804,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs -Version: v1.2.1 +Version: v1.3.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/messaging/azeventhubs@v1.2.1/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/messaging/azeventhubs@v1.3.0/LICENSE.txt: Copyright (c) Microsoft Corporation. @@ -1985,11 +1985,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/Azure/azure-sdk-for-go/sdk/storage/azblob -Version: v1.4.0 +Version: v1.5.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/storage/azblob@v1.4.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/storage/azblob@v1.5.0/LICENSE.txt: MIT License @@ -32573,6 +32573,37 @@ Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-pipeline-g OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE +-------------------------------------------------------------------------------- +Dependency : github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache +Version: v0.3.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/LICENSE.txt: + +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE + + -------------------------------------------------------------------------------- Dependency : github.com/Azure/azure-sdk-for-go/sdk/internal Version: v1.10.0 @@ -32606,11 +32637,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub -Version: v1.2.0 +Version: v1.3.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub@v1.2.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub@v1.3.0/LICENSE.txt: MIT License @@ -32787,11 +32818,11 @@ Contents of probable licence file $GOMODCACHE/github.com/!azure/azure-sdk-for-go -------------------------------------------------------------------------------- Dependency : github.com/Azure/go-amqp -Version: v1.0.5 +Version: v1.3.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!azure/go-amqp@v1.0.5/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/!azure/go-amqp@v1.3.0/LICENSE: MIT License @@ -34487,13 +34518,44 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/AzureAD/microsoft-authentication-extensions-for-go/cache +Version: v0.1.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!azure!a!d/microsoft-authentication-extensions-for-go/cache@v0.1.1/LICENSE: + + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE + + -------------------------------------------------------------------------------- Dependency : github.com/AzureAD/microsoft-authentication-library-for-go -Version: v1.2.2 +Version: v1.3.2 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!azure!a!d/microsoft-authentication-library-for-go@v1.2.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/!azure!a!d/microsoft-authentication-library-for-go@v1.3.2/LICENSE: MIT License @@ -40690,6 +40752,29 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/coder/websocket +Version: v1.8.12 +Licence type (autodetected): ISC +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/coder/websocket@v1.8.12/LICENSE.txt: + +Copyright (c) 2023 Anmol Sethi + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/containerd/log Version: v0.1.0 @@ -41405,6 +41490,37 @@ THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/dgryski/go-rendezvous +Version: v0.0.0-20200823014737-9f7001d12a5f +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dgryski/go-rendezvous@v0.0.0-20200823014737-9f7001d12a5f/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2017-2020 Damian Gryski + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/dimchansky/utfbom Version: v1.1.0 @@ -52254,6 +52370,38 @@ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/keybase/go-keychain +Version: v0.0.0-20231219164618-57a3676c3af6 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/keybase/go-keychain@v0.0.0-20231219164618-57a3676c3af6/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2015 Keybase + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + -------------------------------------------------------------------------------- Dependency : github.com/klauspost/asmfmt Version: v1.3.2 @@ -56864,6 +57012,41 @@ Contents of probable licence file $GOMODCACHE/github.com/prometheus/client_golan limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/redis/go-redis/v9 +Version: v9.6.1 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/redis/go-redis/v9@v9.6.1/LICENSE: + +Copyright (c) 2013 The github.com/redis/go-redis Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : github.com/rogpeppe/go-internal Version: v1.13.1 @@ -74862,29 +75045,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : nhooyr.io/websocket -Version: v1.8.11 -Licence type (autodetected): ISC --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/nhooyr.io/websocket@v1.8.11/LICENSE.txt: - -Copyright (c) 2023 Anmol Sethi - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : sigs.k8s.io/json Version: v0.0.0-20221116044647-bc3834ca7abd diff --git a/go.mod b/go.mod index 30f5522581b2..4f39957943c4 100644 --- a/go.mod +++ b/go.mod @@ -153,15 +153,15 @@ require ( cloud.google.com/go v0.116.0 cloud.google.com/go/compute v1.29.0 cloud.google.com/go/redis v1.17.2 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 - github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 + github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/consumption/armconsumption v1.1.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/costmanagement/armcostmanagement v1.1.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.8.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 github.com/Azure/azure-storage-blob-go v0.15.0 github.com/Azure/go-autorest/autorest/adal v0.9.24 github.com/aerospike/aerospike-client-go/v7 v7.7.1 @@ -246,14 +246,14 @@ require ( github.com/Azure/azure-amqp-common-go/v4 v4.2.0 // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/Azure/go-amqp v1.0.5 // indirect + github.com/Azure/go-amqp v1.3.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect diff --git a/go.sum b/go.sum index 5d1702535981..3cd1c865a183 100644 --- a/go.sum +++ b/go.sum @@ -54,20 +54,22 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1 h1:0f6XnzroY1yCQQwxGf/n/2xlaBF02Qhof2as99dGNsY= -github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.1/go.mod h1:vMGz6NOUGJ9h5ONl2kkyaqq5E0g7s4CHNSrXN5fl8UY= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.3.0 h1:skbmKp8umb8jMxl4A4CwvYyfCblujU00XUB/ytUjEac= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.3.0/go.mod h1:nynTZqX7jGM6FQy6Y+7uFT7Y+LhaAeO3q3d48VZzH5E= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0 h1:0nGmzwBv5ougvzfGPCO2ljFRHvun57KpNrVCMrlk0ns= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.8.0/go.mod h1:gYq8wyDgv6JLhGbAU6gg8amCPgQWRE+aCvrV2gyzdfs= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/costmanagement/armcostmanagement v1.1.1 h1:ehSLdbLah6kk6HTVc6e/lrbmbz7MMbpNxkOd3OYlhB0= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/costmanagement/armcostmanagement v1.1.1/go.mod h1:Am1cUioOk0HdZIsjpXJkQ4RIeQbwYsW6LkNIc5z/5XY= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.2.0 h1:+dggnR89/BIIlRlQ6d19dkhhdd/mQUiQbXhyHUFiB4w= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.2.0/go.mod h1:tI9M2Q/ueFi287QRkdrhb9LHm6ZnXgkVYLRC3FhYkPw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.3.0 h1:4hGvxD72TluuFIXVr8f4XkKZfqAa7Pj61t0jmQ7+kes= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.3.0/go.mod h1:TSH7DcFItwAufy0Lz+Ft2cyopExCpxbOxI5SkH4dRNo= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= @@ -82,12 +84,12 @@ github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1 h1:MyVTgWR github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.0.1/go.mod h1:GpPjLhVR9dnUoJMyHWSPy71xY9/lcmpzIPZXmF0FCVY= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0 h1:Be6KInmFEKV81c0pOAEbRYehLMwmmGI1exuFj248AMk= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.0/go.mod h1:WCPBHsOXfBVnivScjs2ypRfimjEW0qPVLGgJkZlrIOA= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo= github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= -github.com/Azure/go-amqp v1.0.5 h1:po5+ljlcNSU8xtapHTe8gIc8yHxCzC03E8afH2g1ftU= -github.com/Azure/go-amqp v1.0.5/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE= +github.com/Azure/go-amqp v1.3.0 h1://1rikYhoIQNXJFXyoO/Rlb4+4EkHYfJceNtLlys2/4= +github.com/Azure/go-amqp v1.3.0/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= @@ -117,8 +119,10 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM= @@ -267,6 +271,8 @@ github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8E github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= +github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -294,6 +300,8 @@ github.com/dgraph-io/ristretto/v2 v2.0.0 h1:l0yiSOtlJvc0otkqyMaDNysg8E9/F/TYZwMb github.com/dgraph-io/ristretto/v2 v2.0.0/go.mod h1:FVFokF2dRqXyPyeMnK1YDy8Fc6aTe0IKgbcd03CYeEk= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/digitalocean/go-libvirt v0.0.0-20240709142323-d8406205c752 h1:NI7XEcHzWVvBfVjSVK6Qk4wmrUfoyQxCNpBjrHelZFk= github.com/digitalocean/go-libvirt v0.0.0-20240709142323-d8406205c752/go.mod h1:/Ok8PA2qi/ve0Py38+oL+VxoYmlowigYRyLEODRYdgc= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= @@ -670,6 +678,8 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= @@ -853,6 +863,8 @@ github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+Yb github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= +github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= @@ -1408,8 +1420,6 @@ kernel.org/pub/linux/libs/security/libcap/psx v1.2.57 h1:NOFATXSf5z/cMR3HIwQ3Xrd kernel.org/pub/linux/libs/security/libcap/psx v1.2.57/go.mod h1:+l6Ee2F59XiJ2I6WR5ObpC1utCQJZ/VLsEbQCD8RG24= mvdan.cc/garble v0.12.1 h1:GyKeyqr4FKhWz12ZD9kKT9VnDqFILVYxgmAE8RKd3x8= mvdan.cc/garble v0.12.1/go.mod h1:rJ4GvtUEuVCRAYQkpd1iG6bolz9NEnkk0iu6gdTwWqA= -nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= -nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=