All Topics

Find Answers
Ask questions. Get answers. Find technical product solutions from passionate members of the Splunk community.

All Topics

Hello,everyone! At first, sorry for my bad English. I have a problem to join two result. The raw data is a reg file, like this:     Windows Registry Editor Version 5.00 [HKEY_LOCAL_MACHINE\SYS... See more...
Hello,everyone! At first, sorry for my bad English. I have a problem to join two result. The raw data is a reg file, like this:     Windows Registry Editor Version 5.00 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services] [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\XboxNetApiSvc] "DisplayName"="@%systemroot%\\system32\\XboxNetApiSvc.dll,-100" "ErrorControl"=dword:00000001 "ImagePath"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,\ 74,00,25,00,5c,00,73,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,73,\ 00,76,00,63,00,68,00,6f,00,73,00,74,00,2e,00,65,00,78,00,65,00,20,00,2d,00,\ 6b,00,20,00,6e,00,65,00,74,00,73,00,76,00,63,00,73,00,00,00 "Start"=dword:00000003 "Type"=dword:00000020 "Description"="@%systemroot%\\system32\\XboxNetApiSvc.dll,-101" "DependOnService"=hex(7):42,00,46,00,45,00,00,00,6d,00,70,00,73,00,73,00,76,00,\ 63,00,00,00,00,00 "ObjectName"="LocalSystem" "ServiceSidType"=dword:00000001 "RequiredPrivileges"=hex(7):53,00,65,00,54,00,63,00,62,00,50,00,72,00,69,00,76,\ 00,69,00,6c,00,65,00,67,00,65,00,00,00,53,00,65,00,49,00,6d,00,70,00,65,00,\ 72,00,73,00,6f,00,6e,00,61,00,74,00,65,00,50,00,72,00,69,00,76,00,69,00,6c,\ 00,65,00,67,00,65,00,00,00,00,00 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\XboxNetApiSvc\Parameters] "ServiceDll"="%SystemRoot%\system32\XboxNetApiSvc.dll" "ServiceDllUnloadOnStop"=dword:00000001 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\xboxgip] "ImagePath"=hex(2):5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,\ 74,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,64,00,72,\ 00,69,00,76,00,65,00,72,00,73,00,5c,00,78,00,62,00,6f,00,78,00,67,00,69,00,\ 70,00,2e,00,73,00,79,00,73,00,00,00 "Type"=dword:00000001 "Start"=dword:00000003 "ErrorControl"=dword:00000001 "Group"="NDIS" "Tag"=dword:00000001 "DisplayName"="@xboxgip.inf,%XBOXGIP_Desc%;Xbox Game Input Protocol Driver" "Description"="@xboxgip.inf,%XBOXGIP_Desc%;Xbox Game Input Protocol Driver" "Owners"=hex(7):78,00,62,00,6f,00,78,00,67,00,69,00,70,00,2e,00,69,00,6e,00,66,\ 00,00,00,00,00 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\xboxgip\Linkage] "Export"=hex(7):5c,00,44,00,65,00,76,00,69,00,63,00,65,00,5c,00,78,00,62,00,6f,\ 00,78,00,67,00,69,00,70,00,00,00,00,00 "Bind"=hex(7):00,00 "Route"=hex(7):00,00 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\xboxgip\Parameters] [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\XblGameSave] "DisplayName"="@%systemroot%\\system32\\XblGameSave.dll,-100" "ErrorControl"=dword:00000001 "ImagePath"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,\ 74,00,25,00,5c,00,73,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,73,\ 00,76,00,63,00,68,00,6f,00,73,00,74,00,2e,00,65,00,78,00,65,00,20,00,2d,00,\ 6b,00,20,00,6e,00,65,00,74,00,73,00,76,00,63,00,73,00,00,00 "Start"=dword:00000003 "Type"=dword:00000020 "Description"="@%systemroot%\\system32\\XblGameSave.dll,-101" "DependOnService"=hex(7):55,00,73,00,65,00,72,00,4d,00,61,00,6e,00,61,00,67,00,\ 65,00,72,00,00,00,58,00,62,00,6c,00,41,00,75,00,74,00,68,00,4d,00,61,00,6e,\ 00,61,00,67,00,65,00,72,00,00,00,00,00 "ObjectName"="LocalSystem" "FailureActions"=hex:80,51,01,00,00,00,00,00,00,00,00,00,04,00,00,00,14,00,00,\ 00,01,00,00,00,10,27,00,00,01,00,00,00,10,27,00,00,01,00,00,00,10,27,00,00,\ 00,00,00,00,00,00,00,00 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\XblGameSave\Parameters] "ServiceDll"="%SystemRoot%\System32\XblGameSave.dll" "ServiceDllUnloadOnStop"=dword:00000001 "ServiceIdleTimeout"=dword:0000003c [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Wof] "SupportedFeatures"=dword:00000003 "DisplayName"="Windows Overlay File System Filter Driver" "ErrorControl"=dword:00000001 "Group"="FSFilter Compression" "Start"=dword:00000000 "Type"=dword:00000002 "DependOnService"=hex(7):46,00,6c,00,74,00,4d,00,67,00,72,00,00,00,00,00 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Wof\Instances] "DefaultInstance"="Wof Instance" [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Wof\Instances\Wof Instance] "Altitude"="40700" "Flags"=dword:00000000 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\Wof\Parameters] [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\workerdd] [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\workerdd\Device0] "InstalledDisplayDrivers"=hex(7):57,00,4f,00,52,00,4b,00,45,00,52,00,44,00,44,\ 00,00,00,00,00 "VgaCompatible"=dword:00000000 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\workfolderssvc] "DisplayName"="@%systemroot%\\system32\\workfolderssvc.dll,-102" "ErrorControl"=dword:00000001 "Group"="LocalService" "ImagePath"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,\ 74,00,25,00,5c,00,53,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,73,\ 00,76,00,63,00,68,00,6f,00,73,00,74,00,2e,00,65,00,78,00,65,00,20,00,2d,00,\ 6b,00,20,00,4c,00,6f,00,63,00,61,00,6c,00,53,00,65,00,72,00,76,00,69,00,63,\ 00,65,00,00,00 "Start"=dword:00000003 "Type"=dword:00000020 "Description"="@%systemroot%\\system32\\workfolderssvc.dll,-101" "DependOnService"=hex(7):52,00,70,00,63,00,53,00,73,00,00,00,77,00,73,00,65,00,\ 61,00,72,00,63,00,68,00,00,00,00,00 "ObjectName"="NT AUTHORITY\\LocalService" "ServiceSidType"=dword:00000001 "RequiredPrivileges"=hex(7):53,00,65,00,49,00,6d,00,70,00,65,00,72,00,73,00,6f,\ 00,6e,00,61,00,74,00,65,00,50,00,72,00,69,00,76,00,69,00,6c,00,65,00,67,00,\ 65,00,00,00,00,00 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\wpcfltr] "DisplayName"="Family Safety Filter Driver" "ErrorControl"=dword:00000001 "Group"="NDIS" "ImagePath"=hex(2):73,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,44,00,\ 52,00,49,00,56,00,45,00,52,00,53,00,5c,00,77,00,70,00,63,00,66,00,6c,00,74,\ 00,72,00,2e,00,73,00,79,00,73,00,00,00 "Start"=dword:00000003 "Type"=dword:00000001 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\wpcfltr\Security] "Security"=hex:01,00,14,80,8c,00,00,00,98,00,00,00,14,00,00,00,30,00,00,00,02,\ 00,1c,00,01,00,00,00,02,80,14,00,ff,01,0f,00,01,01,00,00,00,00,00,01,00,00,\ 00,00,02,00,5c,00,04,00,00,00,00,00,14,00,fd,01,02,00,01,01,00,00,00,00,00,\ 05,12,00,00,00,00,00,18,00,ff,01,0f,00,01,02,00,00,00,00,00,05,20,00,00,00,\ 20,02,00,00,00,00,14,00,9d,01,02,00,01,01,00,00,00,00,00,05,04,00,00,00,00,\ 00,14,00,8d,01,02,00,01,01,00,00,00,00,00,05,06,00,00,00,01,01,00,00,00,00,\ 00,05,12,00,00,00,01,01,00,00,00,00,00,05,12,00,00,00 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\WPDBusEnum] "Start"=dword:00000003 "DisplayName"="@%SystemRoot%\\system32\\wpdbusenum.dll,-100" "ErrorControl"=dword:00000001 "ImagePath"=hex(2):25,00,53,00,79,00,73,00,74,00,65,00,6d,00,52,00,6f,00,6f,00,\ 74,00,25,00,5c,00,73,00,79,00,73,00,74,00,65,00,6d,00,33,00,32,00,5c,00,73,\ 00,76,00,63,00,68,00,6f,00,73,00,74,00,2e,00,65,00,78,00,65,00,20,00,2d,00,\ 6b,00,20,00,4c,00,6f,00,63,00,61,00,6c,00,53,00,79,00,73,00,74,00,65,00,6d,\ 00,4e,00,65,00,74,00,77,00,6f,00,72,00,6b,00,52,00,65,00,73,00,74,00,72,00,\ 69,00,63,00,74,00,65,00,64,00,00,00 "Type"=dword:00000020 "Description"="@%SystemRoot%\\system32\\wpdbusenum.dll,-101" "DependOnService"=hex(7):52,00,70,00,63,00,53,00,73,00,00,00,00,00 "ObjectName"="LocalSystem" "ServiceSidType"=dword:00000001 "RequiredPrivileges"=hex(7):53,00,65,00,41,00,75,00,64,00,69,00,74,00,50,00,72,\ 00,69,00,76,00,69,00,6c,00,65,00,67,00,65,00,00,00,53,00,65,00,43,00,68,00,\ 61,00,6e,00,67,00,65,00,4e,00,6f,00,74,00,69,00,66,00,79,00,50,00,72,00,69,\ 00,76,00,69,00,6c,00,65,00,67,00,65,00,00,00,53,00,65,00,43,00,72,00,65,00,\ 61,00,74,00,65,00,47,00,6c,00,6f,00,62,00,61,00,6c,00,50,00,72,00,69,00,76,\ 00,69,00,6c,00,65,00,67,00,65,00,00,00,53,00,65,00,43,00,72,00,65,00,61,00,\ 74,00,65,00,50,00,65,00,72,00,6d,00,61,00,6e,00,65,00,6e,00,74,00,50,00,72,\ 00,69,00,76,00,69,00,6c,00,65,00,67,00,65,00,00,00,53,00,65,00,49,00,6d,00,\ 70,00,65,00,72,00,73,00,6f,00,6e,00,61,00,74,00,65,00,50,00,72,00,69,00,76,\ 00,69,00,6c,00,65,00,67,00,65,00,00,00,00,00 "FailureActions"=hex:80,51,01,00,00,00,00,00,00,00,00,00,03,00,00,00,14,00,00,\ 00,01,00,00,00,c0,d4,01,00,01,00,00,00,e0,93,04,00,00,00,00,00,00,00,00,00 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\WPDBusEnum\BthActiveConnect] "ACInterval"=dword:00000078 "DCInterval"=dword:000000f0 [HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\WPDBusEnum\Parameters] "ServiceDllUnloadOnStop"=dword:00000001     You can save it to .reg file and import to splunk. The first search result is : The second search result is : And my problem is how to join this two search when SrvName=SrvName2,the final result should be like below: How to solve this problem with splunk? Thank you,my friends!!  
Hi all, I have a token "Duration", and the values which will be passed to the drilldown are duration<15, 15<duration<=25 and duration>25. How can i pass the value 15-25 as a token?
I am trying to write a query to calculate the amount of bytes  received and sent per day from one of our firewalls at our site to a firewall at another site. This is to create a series of daily metri... See more...
I am trying to write a query to calculate the amount of bytes  received and sent per day from one of our firewalls at our site to a firewall at another site. This is to create a series of daily metrics for management. I've come up with a query that succeeds most of the time. Current query:   index=syslogindex device=firewall vpn=site1-to-site2 | bin span=1d _time | stats range(rcvdbyte) as rcvdbyte range(sentbyte) as sentbyte by _time   However, this query fails on days when the vpn tunnel is reset.  The rcvdbyte and sentbyte fields that come from the firewall are summed values from the moment the VPN tunnel is started. When the tunnel is reset, it creates a new tunnelid and resets the rcvdbyte, sentbyte, and duration counts to zero. And the current query I am using calculates a massive spike for those days since the range of the rcvdbyte field is now zero minus whatever the previous summed amount of the rcvdbyte field was. There are a few ways I can think of changing the query to account for when the tunnel is reset. One of my ideas is to track tunnelid over time while still calculating daily rcvdbyte and sentbyte ranges. Another is to somehow track when rcvdbyte or sentbyte or even duration get reset to zero and do a different calculation for that day. Another solution is to just disregard the days when it is reset. However, I haven't been able to implement any of the solutions I have thought of. Does anyone have any different ideas or know how I can implement one of my ideas? An example event:   date=2021-06-01 time=23:50:43 device=firewall serialid=1234567891 loggingid=123456789 type=event subtype=vpn loggingdesc="tunnel statistics" loggingmsg="tunnel statistics" action=tunnel-stats remoteip=192.168.1.2 localip=192.168.2.2 remoteport=60000 localport=60000 vpn="site1-to-site2" tunnelid=1234567891 tunneltype="vpn" duration=10170 sentbyte=120 rcvdbyte=360    
Getting numerous such errors on the Indexer Clustering: Service Activity page   Failed to trigger replication (err='Cannot replicate remote storage enabled warm bucket, bid=index_name~680~3587E22... See more...
Getting numerous such errors on the Indexer Clustering: Service Activity page   Failed to trigger replication (err='Cannot replicate remote storage enabled warm bucket, bid=index_name~680~3587E22A-71BF-4194-XXX-C1115EECE until it's uploaded')    Is this normal ? if not, please suggest how to fix this. Thanks.
Hello everyone, I have a simple question. From some of the training I took, I was told that "Volume used today" resets at midnight? Is this true or is this false?
Hello, I was wondering if it is possible to use Splunk to query IIS logs for a monthly application hit count for multiple web applications on the same domain? The report I need to submit would look... See more...
Hello, I was wondering if it is possible to use Splunk to query IIS logs for a monthly application hit count for multiple web applications on the same domain? The report I need to submit would look something like: http://domain/webapp1/   -  ## total monthly hits http://domain/webapp2/   -  ## total monthly hits .... I just need the overall total monthly hit count and not the total unique IP address hit count. Any help would be much appreciated.  Thank you!
I searched around and trying to pin down options on sending Universal Forwarder logs to Splunk Cloud. Diagrams, likes and experiences deeply app preciated.
Hello!  I need help created a custom triggered alert condition where when I run the search below it will send me an alert when there is a new version created compared to the versions that were liste... See more...
Hello!  I need help created a custom triggered alert condition where when I run the search below it will send me an alert when there is a new version created compared to the versions that were listed yesterday. So the trigger alert would run once each day and if I had 1.1.1 and 1.1.2 the day before but yesterday I had it give me results with 1.1.1, 1.1.2, 1.1.3 then to send me an alert when that new version is detected. How would I go about setting up that custom alert?   | inputlookup program_version.csv | where date>=relative_time(now(), "-30d@d") | eval _time=date | timechart max(count) by version
To preface my question, I've gone over docs and multiple other questions trying to find a definitive solution, but am still running into a wall. I read through the props.conf documentation, the timez... See more...
To preface my question, I've gone over docs and multiple other questions trying to find a definitive solution, but am still running into a wall. I read through the props.conf documentation, the timezone documentation, and multiple other posts. The answer may be in front of me, but if so I'm missing it and I apologize in advance. My issue: I have a bunch of devices generating syslog events that are being sent straight to Splunk with no in-between. Cisco switches and routers, Palo Alto firewalls, NTP servers, environmental sensors, and RHEL hosts. All using index:syslog and sourcetype:syslog. While I recognize this is far from ideal, it is the environment I was handed when made the Splunk admin, and I'm trying to work through it. For the most part this works; with enough field-value pair tags, field extractions, and detailed search filters I'm getting the info I need from the hosts. The problem is that a few (12) of our hosts are using GMT as their timezone, while everything else is using the local time (CST) - this is something that cannot be changed, they must use GMT time. Also, the timezone is not identified within the text of the event. It's just a timestamp. Because of this, we're getting events from those hosts that, to Splunk, are occurring six hours in the future, findable only by using (earliest=+1h latest=+7h) in our searches. This isn't viable when trying to look at events from multiple hosts in conjunction. My fix was to try and add a timezone designation within props.conf, using a regex to identify the hosts affected in a single stanza. I put the regex together and verified it works by running a search using it, which pulled only the hosts I wanted. So, in Splunk/etc/system/local/props.conf I added the stanza: [host::(doma0wkst*|domsrv(10|11)|192.168.10(12|14|16|18))] TZ = UTC To identify the effected hosts (all hosts that started with "doma0wkst", domsrc10 & 11, and 192.168.10.12, .14, .16, .18) and tell Splunk they were reporting UTC time. My understanding was that Splunk would take this and automatically convert the event times to local so that they would align with all the other events we receive. But, this is not working. After adding that and restarting the Splunk service, I'm still getting events from the future. My second thought was to add multiple stanzas, one per host; if that is the best solution, that is what I will do. But I figured I would ask in here to see if there were a better solution first.
Hey all, Newbie here learning Splunk. I'm starting to get into dashboards and want to create either a pie chart or just a simple count of how many times a certain string occurs in a log file. | sta... See more...
Hey all, Newbie here learning Splunk. I'm starting to get into dashboards and want to create either a pie chart or just a simple count of how many times a certain string occurs in a log file. | stats count("no phase found for entry") count("no work order found") This returns two columns but they both have 0 in them. But if I just search for each string individually or with an OR statement, it returns all entries (which is around 118 combined). I've been reading through the Splunk Documentation on stats but can't seem to find an answer on how to combine two counts of anything. Any help is appreciated!
I've been trying to resolve this since October and not getting traction.  Turning to the community for help: I have seemingly contradictory information within the same log line makes me question- do... See more...
I've been trying to resolve this since October and not getting traction.  Turning to the community for help: I have seemingly contradictory information within the same log line makes me question- do we have an issue or not?   On the one hand, i think i do because the history command shows the search is cancelled... and I trust this information.  However, there are artifacts in the logs that make me question if the search is fully running (which appears to be true since "fully_completed_search=TRUE"... so I am now confused if we have a problem or not.) Why do searches show fully_completed_search=TRUE and has_error_warn=FALSE when the info field (and history command) show "cancelled" and have a tag of "error"   BOTTOM LINE QUESTION: Are my searches are running correctly and returning all results or not?    Sample _audit log search activity that I found - not sure if this gives any usable insight Audit:[timestamp=10-01-2021 16:31:40.338, user=redacted_user, action=search, info=canceled, search_id='1633105804.108286', has_error_warn=false, fully_completed_search=true, total_run_time=18.13, event_count=0, result_count=0, available_count=0, scan_count=133645, drop_count=0, exec_time=1633105804, api_et=1633104900.000000000, api_lt=1633105800.000000000, api_index_et=N/A, api_index_lt=N/A, search_et=1633104900.000000000, search_lt=1633105800.000000000, is_realtime=0, savedsearch_name="", search_startup_time="1270", is_prjob=false, acceleration_id="98DCBC55-D36C-4671-93CD-1A950D796EC4_search_redacted_user_311d202b50b71a64", app="search", provenance="N/A", mode="historical_batch", workload_pool=standard_perf, is_proxied=false, searched_buckets=53, eliminated_buckets=0, considered_events=133645, total_slices=331408, decompressed_slices=11305, duration.command.search.index=120, invocations.command.search.index.bucketcache.hit=53, duration.command.search.index.bucketcache.hit=0, invocations.command.search.index.bucketcache.miss=0, duration.command.search.index.bucketcache.miss=0, invocations.command.search.index.bucketcache.error=0, duration.command.search.rawdata=2533, invocations.command.search.rawdata.bucketcache.hit=0, duration.command.search.rawdata.bucketcache.hit=0, invocations.command.search.rawdata.bucketcache.miss=0, duration.command.search.rawdata.bucketcache.miss=0, invocations.command.search.rawdata.bucketcache.error=0, roles='redacted', search='search index=oswinsec (EventID=7036 OR EventID=50 OR EventID=56 OR EventID=1000 OR EventID=1001) | eval my_ts2 = _time*1000 | eval indextime=_indextime |table my_ts2,EventID | rename EventID as EventCode']
We are utilizing a deployment server to push out UF agent config to our Citrix VM, however, not all devices are reporting into Deployment server---they are showing in Splunk cloud as all devices send... See more...
We are utilizing a deployment server to push out UF agent config to our Citrix VM, however, not all devices are reporting into Deployment server---they are showing in Splunk cloud as all devices sending data   In working with support they suggested to rename  /opt/splunkforwarder/etc/instance.cfg to backup_instances.cfg (or something similar) This seems to work for the device to register, however, this will be overwritten when a new master image push out is done Has anyone encountered this before and what steps have you used to monitor devices on DS Thanks, Jeff My engineering team uses the following script for Splunk config Seal Script # generalize splunk Stop-Process -InputObject $p -Force Start-Sleep -Seconds 3 if (get-service -Name SplunkForwarder | Where status -eq "stopped")     {         write-host "Splunk Service Stopped..."     } $Host.PrivateData.VerboseForegroundColor = 'Yellow' start-process -nonewwindow -filepath "C:\Program Files\SplunkUniversalForwarder\bin\splunk.exe" -argumentlist 'clone-prep-clear-config' -wait -verbose write-host "Splunk Machine ID removed..."
Hi, I am trying to figure out a way in which i can display the creation time of notable event, the time it was assigned to someone, and then the time the status was set to Closed. I would then like ... See more...
Hi, I am trying to figure out a way in which i can display the creation time of notable event, the time it was assigned to someone, and then the time the status was set to Closed. I would then like to list the time difference between all 3 - it is for SLA purposes in our SOC. Note: When notables are created in my environment, the default status is "New" Seen some examples that produce the mean/average closure time for notables etc, but I am looking for a search that will show it for every notable created (say within the last 24 hours for example) Any help would be much appreciated!  
Hi Splunkers, I have an issue merging two identity lookup files on ES. In particular, my first lookup file has rows like the below:   identity priority email vagn low vag@gmail.com   Th... See more...
Hi Splunkers, I have an issue merging two identity lookup files on ES. In particular, my first lookup file has rows like the below:   identity priority email vagn low vag@gmail.com   The second lookup file looks like the below:   identity priority email vagn critical vag@gmail.com   I would expect that when I run the "| inputlookup append=T identity_lookup_expanded | entitymerge identity " command I would have a result like the below, yet this doesn't happen.   identity priority email vagn critical vag@gmail.com low   Any ideas? I have enabled the multivalue field for the "priority" field already so it can hold more than one value but didn't help.   Regards, Evang  
Hi All, We have request from end user for monitoring a CSV files which are placed in the file share folder and there is no splunk agent running in the file share machine. Example :  Server01  is ... See more...
Hi All, We have request from end user for monitoring a CSV files which are placed in the file share folder and there is no splunk agent running in the file share machine. Example :  Server01  is the actual application which is generating a report and Server02 is the file share machine where the reports are stored and shared with the user.  \\fileshare\power\Powerfile\TO\IAM\Export Files\OSBD - Terminated Users List.csv  --  Location of the file to be monitored in splunk.   Above mentioned path has required permission to access the file from the share drive In Server01 we have splunk UF agent running and inputs.conf configured for monitoring the log files present in the server.   Question:  Can we use the same app which is present in the server01 to monitor the file present in the server02 as it has the required permission to access the file from that server. Stanza in inputs.conf:  [monitor://\fileshare\power\Powerfile\TO\IAM\Export Files\OSBD - Terminated Users List.csv] sourcetype = powerfile:power:osbd_terminateduser index = indexname disabled = 0 ignoreOlderThan = 14d kindly guide me how to get this share folder to be monitored in splunk.
I have a raw where each event looks like this (simplified for this exampel): {"time": "2022-01-20 16:40:02.325216", "name": "name1", "deployment": "found", "secret": "correct"} If "deployment": "... See more...
I have a raw where each event looks like this (simplified for this exampel): {"time": "2022-01-20 16:40:02.325216", "name": "name1", "deployment": "found", "secret": "correct"} If "deployment": "not_found", I would like to have a table like: time name deployment 2022-01-20 16:40:02.325216 name1 not_found If "secret": "incorrect", I would like to have a table like: time name secret 2022-01-20 16:40:02.325216 name1 incorrect   Currently, my search looks like this:   index=index host=host source=source ("not_found" OR "incorrect") | table time name deployment secret   But this means that both fields (deployment and secret) will be shown no matter what their value is. @Ayn Is there a way to have a table which varies its fields depending on a certain condition? Thanks in advance! 
Hello everyone, I have read the documentation about exporting Splunk ES content as an app: https://docs.splunk.com/Documentation/ES/7.0.0/Admin/Export  but the objects available I have to export a... See more...
Hello everyone, I have read the documentation about exporting Splunk ES content as an app: https://docs.splunk.com/Documentation/ES/7.0.0/Admin/Export  but the objects available I have to export are more than 250 that the dropdown allows me to select. I would like to move ES app to another server with it's settings, custom menu, altered dashboards, datamodels etc included. Is there a way to export it? Thank you in advance. Chris
What app and add-on can best work with logs from imprivata.? Can Cisco Networks Add-on for Splunk Enterprise work? Has anyone with experience on this? [syslog/imprivata/*] host=imprivata sourcetyp... See more...
What app and add-on can best work with logs from imprivata.? Can Cisco Networks Add-on for Splunk Enterprise work? Has anyone with experience on this? [syslog/imprivata/*] host=imprivata sourcetype=imprivata index=imprivata disabled = false # ignoreOlderThan = 30 Read below "I need some help making sure we are getting logs from the Cisco AP and we need indexes created HF and SH. Also an parsing app for the Cisco AP logs. "
Hi all, I'm wondering how to use the icons and styles in this page: http://127.0.0.1:8000/en-US/static/docs/style/style-guide.html For example, where to find the code for using the accordion table... See more...
Hi all, I'm wondering how to use the icons and styles in this page: http://127.0.0.1:8000/en-US/static/docs/style/style-guide.html For example, where to find the code for using the accordion table? I don't want to use js or css, only what in this page of Splunk . Regards,  
Hi there, i'm a new splunk user and try to use the new Dashboard Studio variant of dashboards like the last example described here: https://docs.splunk.com/Documentation/Splunk/8.2.4/DashStudio/inp... See more...
Hi there, i'm a new splunk user and try to use the new Dashboard Studio variant of dashboards like the last example described here: https://docs.splunk.com/Documentation/Splunk/8.2.4/DashStudio/inputs#Example:_Search-based_cascading_inputs My Problem is the values for the dynamic multiselect input have whitespaces in it and as soon as i use the "IN" operator in the search query this retruns no entries. If i manually change the search query and put all the values in quotes it is working as expected. Is there any way to do this in the definition of the input? I can also append a        eval appDisplayName = \"\\\"\".appDisplayName.\"\\\"\"       to the ds.search query but this also adds the quotes to the display portion.   My complete json looks like this:       { "visualizations": { "viz_hSyaQ4tf": { "type": "splunk.table", "options": {}, "dataSources": { "primary": "ds_saMdKSzT" } } }, "dataSources": { "ds_saMdKSzT": { "type": "ds.search", "options": { "query": "sourcetype=\"azure:aad:signin\" userPrincipalName=$userPrincipalName$ AND appDisplayName IN ($appDisplayName$) | table createdDateTime userPrincipalName userId appDisplayName appId resourceDisplayName resourceId conditionalAccessStatus status.errorCode", "queryParameters": { "latest": "$global_time.latest$", "earliest": "$global_time.earliest$" } }, "name": "SignIns" }, "ds_XdUxasDT": { "type": "ds.search", "options": { "query": "sourcetype=\"azure:aad:signin\" | stats count by userPrincipalName", "queryParameters": { "latest": "$global_time.latest$", "earliest": "$global_time.earliest$" } }, "name": "userPrincipalName-stats" }, "ds_GQslD2fp": { "type": "ds.search", "options": { "query": "sourcetype=\"azure:aad:signin\" userPrincipalName=$userPrincipalName$ | stats count by appDisplayName", "queryParameters": { "latest": "$global_time.latest$", "earliest": "$global_time.earliest$" } }, "name": "appDisplayName-stats" } }, "defaults": { "dataSources": { "ds.search": { "options": { "queryParameters": { "latest": "$global_time.latest$", "earliest": "$global_time.earliest$" } } } } }, "inputs": { "input_global_trp": { "type": "input.timerange", "options": { "token": "global_time", "defaultValue": "-24h@h,now" }, "title": "Global Time Range" }, "input_hcQWlw8q": { "title": "Select App", "type": "input.multiselect", "options": { "items": ">frame(label, value) | prepend(formattedStatics) | objects()", "token": "appDisplayName" }, "dataSources": { "primary": "ds_GQslD2fp" }, "context": { "formattedConfig": { "number": { "prefix": "" } }, "formattedStatics": ">statics | formatByType(formattedConfig)", "statics": [ [ "All" ], [ "*" ] ], "label": ">primary | seriesByName(\"appDisplayName\") | renameSeries(\"label\") | formatByType(formattedConfig)", "value": ">primary | seriesByName(\"appDisplayName\") | renameSeries(\"value\") | formatByType(formattedConfig)" } }, "input_E26xAMU9": { "options": { "defaultValue": "user@domain.com", "token": "userPrincipalName" }, "title": "Select User", "type": "input.text" } }, "layout": { "type": "grid", "options": {}, "structure": [ { "item": "viz_hSyaQ4tf", "type": "block", "position": { "x": 0, "y": 0, "w": 1200, "h": 400 } } ], "globalInputs": [ "input_global_trp", "input_E26xAMU9", "input_hcQWlw8q" ] }, "description": "", "title": "Azure AD SignIns" }         This produces the not working query like this:       sourcetype="azure:aad:signin" userPrincipalName=bauera@herrenknecht.com AND appDisplayName IN (Microsoft Office 365 Portal,Windows Sign In,Office365 Shell WCSS-Client) | table createdDateTime userPrincipalName userId appDisplayName appId resourceDisplayName resourceId conditionalAccessStatus status.errorCode        I want it to be like this:       sourcetype="azure:aad:signin" userPrincipalName=bauera@herrenknecht.com AND appDisplayName IN ("Microsoft Office 365 Portal","Windows Sign In","Office365 Shell WCSS-Client") | table createdDateTime userPrincipalName userId appDisplayName appId resourceDisplayName resourceId conditionalAccessStatus status.errorCode         Thanks for your help.   Greetings Andreas