All Posts

Find Answers
Ask questions. Get answers. Find technical product solutions from passionate members of the Splunk community.

All Posts

Hi, I have this very simple splunk search query and i was able to run in splunk search portal or UI and I am using the same search query API (using the same query but in the form of encoded URL) - w... See more...
Hi, I have this very simple splunk search query and i was able to run in splunk search portal or UI and I am using the same search query API (using the same query but in the form of encoded URL) - what is the issue? I am getting total number of events as 164 in splunk portal but when i run the same query which is transted into encoded URL through python script i am getting 157 records/rows only... since this search is only for yesterday iam using earliest=-1d@d latest=-0d@d index=App001_logs sourcetype="App001_logs_st" earliest=-1d@d latest=-0d@d organization IN ("InternalApps","ExternalApps") AppclientId="ABC123" status_code=200 environment="UAT" | table _time, AppclientId,organization,environment,proxyBasePath,api_name the same exact query which is translated in encoded URL like https:// whole search query and when i run the python script in my desktop (my time zone is CST) i get only 157 records/rows I think there is something going on UTC and CST - this is what i see in splunk portal 164 events (5/30/25 12:00:00.000 AM to 5/31/25 12:00:00.000 AM) any guidance please?
{ "visualizations": { "viz_gsqlcpsd": { "type": "splunk.line", "dataSources": { "primary": "ds_xcdWhjuu" }, "title": "${sel... See more...
{ "visualizations": { "viz_gsqlcpsd": { "type": "splunk.line", "dataSources": { "primary": "ds_xcdWhjuu" }, "title": "${selected_server:-All Servers} - CPU Usage %" } }, "inputs": { "input_IAwTOhNf": { "options": { "items": [], "token": "selected_server", "defaultValue": "" }, "title": "Server Name", "type": "input.multiselect", "dataSources": { "primary": "ds_dIoNDOrf" }, "showProgressBar": true, "showLastUpdated": true, "context": {} }, "input_mj9iUMvw": { "options": { "defaultValue": "-15m,now", "token": "tr_hMOOrvcD" }, "title": "Time Range Input Title", "type": "input.timerange" } }, "layout": { "type": "grid", "globalInputs": [ "input_VtWuBSik", "input_mj9iUMvw" ], "options": { "backgroundColor": "transparent" }, "structure": [ { "item": "viz_gsqlcpsd", "type": "repeating", "repeatFor": { "input": "input_VtWuBSik" }, "position": { "x": 0, "y": 0, "w": 1200, "h": 400 } } ] }, "dataSources": { "ds_xcdWhjuu": { "type": "ds.search", "options": { "queryParameters": { "earliest": "-24h@h", "latest": "now" }, "query": "index=cto_epe_observability sourcetype=otel_host_metrics measurement=otel_system_cpu_time \r\n| search url IN($selected_server$) OR url=\"default_server\"\r\n| eval state_filter=if(match(state, \"^(idle|interrupt|nice|softirq|steal|system|user|wait)$\"), 1, 0)\r\n| where state_filter = 1\r\n| sort 0 _time url cpu state\r\n| streamstats current=f last(counter) as prev by url cpu state\r\n| eval delta = counter - prev\r\n| where delta >= 0\r\n| bin _time span=1m\r\n| eventstats sum(delta) as total by _time, url, cpu\r\n| eval percent = round((delta / total) * 100, 2)\r\n| eval url_state = url . \"_\" . state \r\n| timechart span=1m avg(percent) by url_state\r\n| foreach * [eval <<FIELD>> = round('<<FIELD>>', 2)]" }, "name": "CPU_Util_Search_1" } }, "ds_dIoNDOrf": { "type": "ds.search", "options": { "query": "index=server | dedup server|table server", "queryParameters": { "earliest": "$global_time.earliest$", "latest": "$global_time.latest$" } }, "name": "Server_Search_1" }, "title": "Test_Multi Line chart" } @kiran_panchavat Thanks for the quick response. Your understanding is right. I believe your code is static , but I want dynamic according to the query results in multi select. Here's my full code
@Sudhagar  Are you looking something like this? Attached image. I created using some dummy data with static values.  {     "title": "Static CPU Usage Charts per Host",     "visualizations": ... See more...
@Sudhagar  Are you looking something like this? Attached image. I created using some dummy data with static values.  {     "title": "Static CPU Usage Charts per Host",     "visualizations": {         "viz_host123": {             "dataSources": {                 "primary": "ds_host123"             },             "options": {                 "legendPlacement": "right",                 "xAxisTitle": "Time",                 "yAxisTitle": "CPU Usage (%)"             },             "title": "host123 - CPU Usage %",             "type": "splunk.line"         },         "viz_host456": {             "dataSources": {                 "primary": "ds_host456"             },             "options": {                 "legendPlacement": "right",                 "xAxisTitle": "Time",                 "yAxisTitle": "CPU Usage (%)"             },             "title": "host456 - CPU Usage %",             "type": "splunk.line"         },         "viz_host789": {             "dataSources": {                 "primary": "ds_host789"             },             "options": {                 "legendPlacement": "right",                 "xAxisTitle": "Time",                 "yAxisTitle": "CPU Usage (%)"             },             "title": "host789 - CPU Usage %",             "type": "splunk.line"         }     },     "dataSources": {         "ds_host123": {             "options": {                 "query": "| makeresults count=10\n| streamstats count as row\n| eval _time = relative_time(now(), \"-\" . (10 - row) . \"m\")\n| eval host=\"host123\"\n| eval state_list=split(\"user,system,idle\", \",\")\n| mvexpand state_list\n| eval state=state_list\n| eval percent=case(state==\"user\",20+random()%10,state==\"system\",10+random()%5,state==\"idle\",70+random()%10)\n| eval host_state=host.\"_\".state\n| timechart span=1m avg(percent) by host_state",                 "queryParameters": {                     "earliest": "-30m",                     "latest": "now"                 }             },             "type": "ds.search"         },         "ds_host456": {             "options": {                 "query": "| makeresults count=10\n| streamstats count as row\n| eval _time = relative_time(now(), \"-\" . (10 - row) . \"m\")\n| eval host=\"host456\"\n| eval state_list=split(\"user,system,idle\", \",\")\n| mvexpand state_list\n| eval state=state_list\n| eval percent=case(state==\"user\",20+random()%10,state==\"system\",10+random()%5,state==\"idle\",70+random()%10)\n| eval host_state=host.\"_\".state\n| timechart span=1m avg(percent) by host_state",                 "queryParameters": {                     "earliest": "-30m",                     "latest": "now"                 }             },             "type": "ds.search"         },         "ds_host789": {             "options": {                 "query": "| makeresults count=10\n| streamstats count as row\n| eval _time = relative_time(now(), \"-\" . (10 - row) . \"m\")\n| eval host=\"host789\"\n| eval state_list=split(\"user,system,idle\", \",\")\n| mvexpand state_list\n| eval state=state_list\n| eval percent=case(state==\"user\",20+random()%10,state==\"system\",10+random()%5,state==\"idle\",70+random()%10)\n| eval host_state=host.\"_\".state\n| timechart span=1m avg(percent) by host_state",                 "queryParameters": {                     "earliest": "-30m",                     "latest": "now"                 }             },             "type": "ds.search"         }     },     "layout": {         "layoutDefinitions": {             "layout_1": {                 "options": {                     "backgroundColor": "transparent"                 },                 "structure": [                     {                         "item": "viz_host123",                         "position": {                             "h": 400,                             "w": 1200,                             "x": 0,                             "y": 0                         },                         "type": "block"                     },                     {                         "item": "viz_host456",                         "position": {                             "h": 400,                             "w": 1200,                             "x": 0,                             "y": 400                         },                         "type": "block"                     },                     {                         "item": "viz_host789",                         "position": {                             "h": 400,                             "w": 1200,                             "x": 0,                             "y": 800                         },                         "type": "block"                     }                 ],                 "type": "grid"             }         },         "tabs": {             "items": [                 {                     "label": "New tab",                     "layoutId": "layout_1"                 }             ]         }     } }  
I am trying to repeat line chart for multiple host selection. Each line chart should display the cpu usage for each selected hosts separately. Here is my full source code in Dashboard studio. { ... See more...
I am trying to repeat line chart for multiple host selection. Each line chart should display the cpu usage for each selected hosts separately. Here is my full source code in Dashboard studio. { "visualizations": { "viz_gsqlcpsd": { "type": "splunk.line", "dataSources": { "primary": "ds_xcdWhjuu" }, "title": "${selected_server:-All Servers} - CPU Usage %" } }, "inputs": { "input_VtWuBSik": { "options": { "items": [ { "label": "All", "value": "*" }, { "label": "host123", "value": "host123" }, { "label": "host1234", "value": "host1234" } ], "defaultValue": [ "*" ], "token": "selected_server" }, "title": "server", "type": "input.multiselect" }, "input_mj9iUMvw": { "options": { "defaultValue": "-15m,now", "token": "tr_hMOOrvcD" }, "title": "Time Range Input Title", "type": "input.timerange" } }, "layout": { "type": "grid", "globalInputs": [ "input_VtWuBSik", "input_mj9iUMvw" ], "options": { "backgroundColor": "transparent" }, "structure": [ { "item": "viz_gsqlcpsd", "type": "repeating", "repeatFor": { "input": "input_VtWuBSik" }, "position": { "x": 0, "y": 0, "w": 1200, "h": 400 } } ] }, "dataSources": { "ds_xcdWhjuu": { "type": "ds.search", "options": { "queryParameters": { "earliest": "-24h@h", "latest": "now" }, "query": "index=host_metrics measurement=cpu_time \r\n| search url IN($selected_server$) OR url=\"default_server\"\r\n| eval state_filter=if(match(state, \"^(idle|interrupt|nice|softirq|steal|system|user|wait)$\"), 1, 0)\r\n| where state_filter = 1\r\n| sort 0 _time url cpu state\r\n| streamstats current=f last(counter) as prev by url cpu state\r\n| eval delta = counter - prev\r\n| where delta >= 0\r\n| bin _time span=1m\r\n| eventstats sum(delta) as total by _time, url, cpu\r\n| eval percent = round((delta / total) * 100, 2)\r\n| eval url_state = url . \"_\" . state \r\n| timechart span=1m avg(percent) by url_state\r\n| foreach * [eval <<FIELD>> = round('<<FIELD>>', 2)]" }, "name": "CPU_Util_Search_1" } }, "title": "Test_Multi Line chart" }  
Added a note to  the original post that indexers are having no IO issues and plenty of idle cpu. This post is for the scenario where  replication queue is full causing pipeline queues full as well b... See more...
Added a note to  the original post that indexers are having no IO issues and plenty of idle cpu. This post is for the scenario where  replication queue is full causing pipeline queues full as well but plenty of resources(cpu/IO) are still available. 
One question though - won't the parallelIngestionPipelines starve the searches of cpu cores?
Added a note to  the original post that indexers are having no IO issues and plenty of idle cpu.
@hrawat  Further Insights on the Suggestion Shared by @gcusello  It is recommended that indexers are provisioned with 12 to 48 CPU cores, each running at 2 GHz or higher, to ensure optimal perfor... See more...
@hrawat  Further Insights on the Suggestion Shared by @gcusello  It is recommended that indexers are provisioned with 12 to 48 CPU cores, each running at 2 GHz or higher, to ensure optimal performance. The disk subsystem should support at least 800 IOPS, ideally using SSDs for hot and warm buckets to handle the indexing workload efficiently. https://docs.splunk.com/Documentation/Splunk/latest/Capacity/Referencehardware  For environments still using traditional hard drives, prioritize models with higher rotational speeds, and lower average latency and seek times to maximize IOPS. For further insights, refer to this guide on Analyzing I/O Performance in Linux. Note that insufficient disk I/O is one of the most common performance bottlenecks in Splunk deployments. It is crucial to thoroughly review disk subsystem requirements during hardware planning. If the indexer's CPU resources exceed those of the standard reference architecture, it may be beneficial to tune parallelization settings to further enhance performance for specific workloads.
Wait a second. Something doesn't add up here. Even ignoring the syntax of that 200MB cold volume limit, if you set hot/warm to 100GB, cold to 200GB you'll get at most 300GB of space. In ideal conditi... See more...
Wait a second. Something doesn't add up here. Even ignoring the syntax of that 200MB cold volume limit, if you set hot/warm to 100GB, cold to 200GB you'll get at most 300GB of space. In ideal conditions that's 30*10GB (in reality you need some buffer for acceleration summaries and pushing a filesystem to 100% usage is not a healthy practice anyway) but for your one index for which you've shown the config you have 90 days retention policy. Ok, you wrote that you have multiple indexes with different retention requirements but remember to take them all into account.
Hi @hrawat , two little questions: how many CPUs have you on your Indexers? what's the throughput on the storage of your indexers? in other words, have you iowait and delayed searches issues? p... See more...
Hi @hrawat , two little questions: how many CPUs have you on your Indexers? what's the throughput on the storage of your indexers? in other words, have you iowait and delayed searches issues? probably the problem is related to an insufficient processing capacity, so the easiest solution is adding some CPUs. If instead the problema is the second, the only solution is changing the storage that hasn't a sufficient IOPS: Splunk requires at least 800 IOPS. Ciao. Giuseppe
I enabled additional logging on the production setup and updated the passwords.conf and customfile.conf files—first on the search head captain (sh01), and then on another member (sh03). In both case... See more...
I enabled additional logging on the production setup and updated the passwords.conf and customfile.conf files—first on the search head captain (sh01), and then on another member (sh03). In both cases, logs were generated for the passwords.conf updates. However, there were no logs related to the customfile.conf file. The first set of logs corresponds to the update on the captain (sh01), and the second set corresponds to the update on the member (sh03). Sensitive fields have been redacted or anonymized. 05-30-2025 10:10:10.185 +0000 DEBUG ConfReplication [1692624 TcpChannelThread] - addCommit: to_repo=https://sh01.acme.com:8089, op_id=1252dcef9d0f33386e7feab562eba92d424515ea, applied_at=1748599810, asset_id=c922db4bf111d426f1e8eb78181cb8f43b185f52, asset_uri=/nobody/custom-app/passwords/credential:custom-app_realm:password:, optype=WRITE_STANZA, payload={ password = REDACTED [ { }, removable: yes ]\n }, extra_payload= 05-30-2025 10:10:13.591 +0000 DEBUG ConfReplication [2010047 ConfReplicationThread] - pullFrom_Locked: status=handling, from_repo=https://sh01.acme.com:8089, to_repo=https://sh03.acme.com:8089, op_id=1252dcef9d0f33386e7feab562eba92d424515ea, applied_at=1748599810, asset_id=c922db4bf111d426f1e8eb78181cb8f43b185f52, asset_uri=/nobody/custom-app/passwords/credential:custom-app_realm:password:, optype=WRITE_STANZA, payload={ password = REDACTED [ { }, removable: yes ]\n }, extra_payload= 05-30-2025 10:10:13.591 +0000 DEBUG ConfReplication [2010047 ConfReplicationThread] - pullFrom_Locked: status=applied, reason="", from_repo=https://sh01.acme.com:8089, to_repo=https://sh03.acme.com:8089, op_id=1252dcef9d0f33386e7feab562eba92d424515ea, applied_at=1748599813, asset_id=c922db4bf111d426f1e8eb78181cb8f43b185f52, asset_uri=/nobody/custom-app/passwords/credential:custom-app_realm:password:, optype=WRITE_STANZA, payload={ password = REDACTED [ { }, removable: yes ]\n }, extra_payload= 05-30-2025 10:10:10.497 +0000 DEBUG ConfReplication [3612371 TcpChannelThread] - addCommit: to_repo=https://sh03.acme.com:8089, op_id=481af55d46acfb6f4da973c3aac4af9e8ab2e0e6, applied_at=1748599810, asset_id=c922db4bf111d426f1e8eb78181cb8f43b185f52, asset_uri=/nobody/custom-app/passwords/credential:custom-app_realm:password:, optype=WRITE_STANZA, payload={ password = REDACTED [ { }, removable: yes ]\n }, extra_payload= 05-30-2025 10:10:10.501 +0000 DEBUG ConfReplication [2010047 ConfReplicationThread] - ConfOpStorage: toPush ptr=0x7ff55ebfcd50, pos=0, repo=https://sh03.acme.com:8089, op_id=481af55d46acfb6f4da973c3aac4af9e8ab2e0e6, applied_at=1748599810, asset_id=c922db4bf111d426f1e8eb78181cb8f43b185f52, asset_uri=/nobody/custom-app/passwords/credential:custom-app_realm:password:, optype=WRITE_STANZA, payload={ password = REDACTED [ { }, removable: yes ]\n }, extra_payload= 05-30-2025 10:10:10.507 +0000 DEBUG ConfReplication [1993289 TcpChannelThread] - acceptPush_Locked: status=handling, from_repo=https://sh03.acme.com:8089, to_repo=https://sh01.acme.com:8089, op_id=481af55d46acfb6f4da973c3aac4af9e8ab2e0e6, applied_at=1748599810, asset_id=c922db4bf111d426f1e8eb78181cb8f43b185f52, asset_uri=/nobody/custom-app/passwords/credential:custom-app_realm:password:, optype=WRITE_STANZA, payload={ password = REDACTED [ { }, removable: yes ]\n }, extra_payload= 05-30-2025 10:10:10.511 +0000 DEBUG ConfReplication [1993289 TcpChannelThread] - acceptPush_Locked: status=applied, reason="", from_repo=https://sh03.acme.com:8089, to_repo=https://sh01.acme.com:8089, op_id=481af55d46acfb6f4da973c3aac4af9e8ab2e0e6, applied_at=1748599810, asset_id=c922db4bf111d426f1e8eb78181cb8f43b185f52, asset_uri=/nobody/custom-app/passwords/credential:custom-app_realm:password:, optype=WRITE_STANZA, payload={ password = REDACTED [ { }, removable: yes ]\n }, extra_payload=  
Sorry, sent this twice and was not able to delete. 
Hi @danielbb  Tenable also publish an app to go with the TA-Tenable app (TenableAppForSplunk)  The recommended deployment for this is to install the TA and the App on your Searchhead(s):   Fo... See more...
Hi @danielbb  Tenable also publish an app to go with the TA-Tenable app (TenableAppForSplunk)  The recommended deployment for this is to install the TA and the App on your Searchhead(s):   For more into checkout the app on Splunkbase or the Tenable online docs at https://docs.tenable.com/integrations/Splunk/Content/Welcome.htm  Did this answer help you? If so, please consider: Adding karma to show it was useful Marking it as the solution if it resolved your issue Commenting if you need any clarification Your feedback encourages the volunteers in this community to continue contributing  
Here are the configs for on-prem customers willing to apply and avoid adding more hardware cost. 9.4.0 and above most of the indexing configs are automated that’s why dropped from 9.4.0 suggested li... See more...
Here are the configs for on-prem customers willing to apply and avoid adding more hardware cost. 9.4.0 and above most of the indexing configs are automated that’s why dropped from 9.4.0 suggested list. Note: Assuming replication queue is full for most of the indexers and as a result indexing pipeline is also full however indexers do have plenty of idle cpu and IO is not an issue. On-prem Splunk version 9.4.0 and above Indexes.conf [default] maxMemMB=100 Server.conf [general] autoAdjustQueue=true ( It can be applied on on any splunk instance UF/HF/SH/IDX) Splunk version 9.1 to 9.3.x Indexes.conf [default] maxMemMB=100 maxConcurrentOptimizes=2 maxRunningProcessGroups=32 processTrackerServiceInterval=0 Server.conf [general] parallelIngestionPipelines = 4 [queue=indexQueue] maxSize=500MB [queue=parsingQueue] maxSize=500MB [queue=httpInputQ] maxSize = 500MB maxMemMB, will try to minimize creation of tsidx files as much as possible at the cost of higher memory usage by mothership(main splunkd). maxConcurrentOptimizes, on indexing side it’s internally 1 no matter what the setting is set to. But on target replication side launching more splunk-optimize processes means pausing receiver until that splunk-optimize process is launched. So reducing it to keep receiver do more of indexing work than launching splunk-optimize process. With 9.4.0, both source (indexprocessor) and target(replication in thread) will internally auto adjust it to 1. maxRunningProcessGroups, allow more splunk-optimize processes concurrently. With 9.4.0, it's auto. processTrackerServiceInterval, run splunk-optimize processes ASAP. With 9.4.0, you don't have to change. parallelIngestionPipelines, have more receivers on target side. With 9.4.0, you can enable auto scaling of  pipelines. maxSize, don’t let huge batch ingestion by HEC client block queues and receive 503. With 9.4.0 autoAdjustQueue set to true, it's no more a fix size.
Here are the configs for on-prem customers willing to apply and avoid adding more hardware cost. 9.4.0 and above most of the indexing configs are automated that’s why dropped from 9.4.0 suggested ... See more...
Here are the configs for on-prem customers willing to apply and avoid adding more hardware cost. 9.4.0 and above most of the indexing configs are automated that’s why dropped from 9.4.0 suggested list. Note: Assuming replication queue is full for most of the indexers and as a result indexing pipeline is also full however indexers do have plenty of idle cpu and IO is not an issue. On-prem Splunk version 9.4.0 and above Indexes.conf [default] maxMemMB=100 Server.conf [general] autoAdjustQueue=true Splunk version 9.1 to 9.3.x Indexes.conf [default] maxMemMB=100 maxConcurrentOptimizes=2 maxRunningProcessGroups=32 processTrackerServiceInterval=0 Server.conf [general] parallelIngestionPipelines = 4 [queue=indexQueue] maxSize=500MB [queue=parsingQueue] maxSize=500MB [queue=httpInputQ] maxSize = 500MB maxMemMB, will try to minimize creation of tsidx files as much as possible at the cost of higher memory usage by mothership(main splunkd). maxConcurrentOptimizes, on indexing side it’s internally 1 no matter what the setting is set to. But on target replication side launching more splunk-optimize processes means pausing receiver until that splunk-optimize process is launched. So reducing it to keep receiver do more of indexing work than launching splunk-optimize process. With 9.4.0, both source (indexprocessor) and target(replication in thread) will internally auto adjust it to 1. maxRunningProcessGroups, allow more splunk-optimize processes concurrently. With 9.4.0, it's auto. processTrackerServiceInterval, run splunk-optimize processes ASAP. With 9.4.0, you don't have to change. parallelIngestionPipelines, have more receivers on target side. With 9.4.0, you can enable auto scaling of  pipelines. maxSize, don’t let huge batch ingestion by HEC client block queues and receive 503. With 9.4.0 autoAdjustQueue set to true, it's no more a fix size queue.
Hey @danielbb , Did you already check out the developer-supported Tenable App for Splunk? It should work with your sourcetypes: https://splunkbase.splunk.com/app/4061 Here's the docs for it: https... See more...
Hey @danielbb , Did you already check out the developer-supported Tenable App for Splunk? It should work with your sourcetypes: https://splunkbase.splunk.com/app/4061 Here's the docs for it: https://docs.tenable.com/integrations/Splunk/Content/Splunk2/TenableAppforSplunk.htm And there's also a full integration guide PDF that might be helpful: https://docs.tenable.com/integrations/Splunk/Content/PDF/Tenable_and_Splunk_Integration_Guide.pdf This might give you dashboards and visualizations for your Tenable.io data.  Cheers If this Helps, Please Upvote
Hey @mohsplunking , So a couple things on your setup: First, just to clarify - the UFs actually pull from the DS, not push to it. The deployment server is more like a config store that the forwarde... See more...
Hey @mohsplunking , So a couple things on your setup: First, just to clarify - the UFs actually pull from the DS, not push to it. The deployment server is more like a config store that the forwarders check in with and grab their apps/configs from. And yeah, you're totally right about needing the Windows TA on your heavy forwarder. You might see data without it, but you definitely want it installed on whatever's doing the parsing - which is your HF in this case. Otherwise you'll miss out on proper field extractions and parsing. Here's the install doc: https://docs.splunk.com/Documentation/WindowsAddOn/8.1.2/User/Install Yes - Windows TA goes on the HF (since that's where parsing happens), and then your output app handles forwarding everything along to the indexers. Cheers If this Helps, please Upvote
I copied the effective distsearch.conf from production (using btool) to my lab setup under $SPLUNK_HOME/etc/system/local. After restarting Splunk, I verified it again with btool to confirm it matched... See more...
I copied the effective distsearch.conf from production (using btool) to my lab setup under $SPLUNK_HOME/etc/system/local. After restarting Splunk, I verified it again with btool to confirm it matched the production configuration. Replication is still working fine in the lab setup, so it seems there's nothing wrong with distsearch.conf
Hello Splunkers, I have a question around Splunk Architecture, would greatly appreciate the inputs from Architects. In a Scenario where UF on log source>Heavy Forwarder>Indexer Basically  A Univer... See more...
Hello Splunkers, I have a question around Splunk Architecture, would greatly appreciate the inputs from Architects. In a Scenario where UF on log source>Heavy Forwarder>Indexer Basically  A Universal Forwarder get installed on a log source with a configuration to connect to Deployment server, Once it connects to DS, the DS will push the Output APP & the corresponding technology add-on i.e. Windows/Linux to the Universal Forwarder. The Output APP on the Log source(UF) is basically forwarding to heavy forwarder over standard port 9997 On the Heavy Forwarder an output APP under etc/apps  is there to forward to indexers. So the question is do I need to also have an Windows_TA/Linux TA app on heavy forwarder ? is it necessary ? if I dont install a TA , my understanding is heavy forwarder should still forward everything it receives over port 9997(without a TA/inputs.conf) to the next Splunk , is that correct ? Sorry I know its long reading but I hope to receive some responses. Thank you ,   regards, Moh
Thank you for the responses.  I copy/pasted some of the SOAR info below and as for the questions:   I did define the output variable in the custom code block config I am not using {0} in the samp... See more...
Thank you for the responses.  I copy/pasted some of the SOAR info below and as for the questions:   I did define the output variable in the custom code block config I am not using {0} in the sample block because it kept giving an error .  I was using {1} because that was grabbing the IP through a utility and that was working for me. The variable from the custom code block (extracted_ip_1) worked fine within the code block but was not set outside of it. code_3:customer_function:extraced_ip_1   def code_3(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, loop_state_json=None, **kwargs):     phantom.debug("code_3() called")     regex_extract_ipv4_3__result = phantom.collect2(container=container, datapath=["regex_extract_ipv4_3:custom_function_result.data.extracted_ipv4","regex_extract_ipv4_3:custom_function_result.data.input_value"])     container_artifact_data = phantom.collect2(container=container, datapath=["artifact:*.cef.cs1","artifact:*.cef.cs1Label"])     regex_extract_ipv4_3_data_extracted_ipv4 = [item[0] for item in regex_extract_ipv4_3__result]     regex_extract_ipv4_3_data_input_value = [item[1] for item in regex_extract_ipv4_3__result]     container_artifact_cef_item_0 = [item[0] for item in container_artifact_data]     container_artifact_cef_item_1 = [item[1] for item in container_artifact_data]     input_parameter_0 = ""     code_3__extracted_ip_1 = None     ################################################################################     ## Custom Code Start     ################################################################################    # Write your custom code here...     extracted_ip_1 = regex_extract_ipv4_3_data_extracted_ipv4[0]    ################################################################################     ## Custom Code End     ################################################################################       phantom.save_run_data(key="code_3:extracted_ip_1", value=json.dumps(code_3__extracted_ip_1))       run_query_4(container=container)       return