All Posts

Find Answers
Ask questions. Get answers. Find technical product solutions from passionate members of the Splunk community.

All Posts

It just suited my work sequence...
EXAMPLE DATA:   { "sourcetype": "testoracle_sourcetype", "data": { "cdb_tbs_check": [ { "check_error": "", "check_name": "cdb_tbs_check", "check_status": "OK", "current_use_mb": "1355", "percent_us... See more...
EXAMPLE DATA:   { "sourcetype": "testoracle_sourcetype", "data": { "cdb_tbs_check": [ { "check_error": "", "check_name": "cdb_tbs_check", "check_status": "OK", "current_use_mb": "1355", "percent_used": "2", "tablespace_name": "SYSTEM", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "cdb_tbs_check", "check_status": "OK", "current_use_mb": "23596", "percent_used": "36", "tablespace_name": "SYSAUX", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "cdb_tbs_check", "check_status": "OK", "current_use_mb": "29", "percent_used": "0", "tablespace_name": "UNDOTBS1", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "cdb_tbs_check", "check_status": "OK", "current_use_mb": "4", "percent_used": "0", "tablespace_name": "USERS", "total_physical_all_mb": "65536" } ], "fra_check": [ { "check_error": "", "check_name": "fra_check", "check_status": "OK", "flash_in_gb": "40", "flash_reclaimable_gb": "0", "flash_used_in_gb": "1.5", "percent_of_space_used": "3.74" } ], "global_parameters": { "check_error": "", "check_name": "General_parameters", "check_status": "OK", "database_major_version": "19", "database_minor_version": "0", "database_name": "C2N48617", "database_version": "19.0.0.0.0", "host_name": "flosclnrhv03.pharma.aventis.com", "instance_name": "C2N48617", "script_version": "1.0" }, "pdb_tbs_check": [ { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "76", "pdb_name": "O1S48633", "percent_used": "0", "tablespace_name": "UNDOTBS1", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "5", "pdb_name": "O1S48633", "percent_used": "0", "tablespace_name": "TOOLS", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "21", "pdb_name": "O1NN2467", "percent_used": "0", "tablespace_name": "UNDOTBS1", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "627", "pdb_name": "O1NN2467", "percent_used": "1", "tablespace_name": "SYSAUX", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "784", "pdb_name": "O1S48633", "percent_used": "1", "tablespace_name": "SYSTEM", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "1547", "pdb_name": "O1NN8944", "percent_used": "2", "tablespace_name": "SYSAUX", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "1149", "pdb_name": "O1S48633", "percent_used": "2", "tablespace_name": "USERS", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "58", "pdb_name": "O1NN8944", "percent_used": "0", "tablespace_name": "UNDOTBS1", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "7804", "pdb_name": "O1S48633", "percent_used": "12", "tablespace_name": "SYSAUX", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "1176", "pdb_name": "O1NN8944", "percent_used": "4", "tablespace_name": "USERS", "total_physical_all_mb": "32767" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "378", "pdb_name": "O1NN8944", "percent_used": "1", "tablespace_name": "INDX", "total_physical_all_mb": "32767" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "705", "pdb_name": "O1NN8944", "percent_used": "1", "tablespace_name": "SYSTEM", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "623", "pdb_name": "O1NN2467", "percent_used": "1", "tablespace_name": "SYSTEM", "total_physical_all_mb": "65536" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "3", "pdb_name": "O1S48633", "percent_used": "0", "tablespace_name": "AUDIT_TBS", "total_physical_all_mb": "8192" }, { "check_error": "", "check_name": "pdb_tbs_check", "check_status": "OK", "current_use_mb": "128", "pdb_name": "O1S48633", "percent_used": "0", "tablespace_name": "USRINDEX", "total_physical_all_mb": "65536" } ], "processes": { "check_error": "", "check_name": "processes", "check_status": "OK", "process_current_value": "294", "process_limit": "1000", "process_percent": "29.4" }, "queue_mem_check": [ { "check_error": "", "check_name": "queue_mem_check", "check_status": "OK", "queue_name": "AQ$_Q_PIWORKTASK_TAB_E", "queue_owner": "LIVE2459_VAL", "queue_sharable_mem": "4072" }, { "check_error": "", "check_name": "queue_mem_check", "check_status": "OK", "queue_name": "AQ$_Q_TASKREPORTWORKTASK_TAB_E", "queue_owner": "LIVE2459_VAL", "queue_sharable_mem": "4072" }, { "check_error": "", "check_name": "queue_mem_check", "check_status": "OK", "queue_name": "AQ$_Q_LABELWORKTASK_TAB_E", "queue_owner": "LIVE2459_VAL", "queue_sharable_mem": "4072" }, { "check_error": "", "check_name": "queue_mem_check", "check_status": "OK", "queue_name": "AQ$_Q_PIPROCESS_TAB_E", "queue_owner": "LIVE2459_VAL", "queue_sharable_mem": "4072" }, { "check_error": "", "check_name": "queue_mem_check", "check_status": "OK", "queue_name": "AQ$_ALERT_QT_E", "queue_owner": "SYS", "queue_sharable_mem": "4032" }, { "check_error": "", "check_name": "queue_mem_check", "check_status": "OK", "queue_name": "ALERT_QUE", "queue_owner": "SYS", "queue_sharable_mem": "0" } ], "script_version": "1.0", "sessions": { "check_error": "", "check_name": "sessions", "check_status": "OK", "sessions_current_value": "293", "sessions_limit": "1536", "sessions_percent": "19.08" } } }
i am trying to upload json file using UI in Splunk cloud and applying settings for parsing as below but data is coming as a single event  [custom_json_sourcetype] INDEXED_EXTRACTIONS = json SHOULD... See more...
i am trying to upload json file using UI in Splunk cloud and applying settings for parsing as below but data is coming as a single event  [custom_json_sourcetype] INDEXED_EXTRACTIONS = json SHOULD_LINEMERGE = false KV_MODE = json LINE_BREAKER = },\s*{ please advise correct settings to apply under sourcetypes in web when uploading here is the data:   {     "sourcetype": "testoracle_sourcetype",     "data": {         "cdb_tbs_check": [             {                 "check_error": "",                 "check_name": "cdb_tbs_check",                 "check_status": "OK",                 "current_use_mb": "1355",                 "percent_used": "2",                 "tablespace_name": "SYSTEM",                 "total_physical_all_mb": "65536"             },             {                 "check_error": "",                 "check_name": "cdb_tbs_check",                 "check_status": "OK",                 "current_use_mb": "23596",                 "percent_used": "36",                 "tablespace_name": "SYSAUX",                 "total_physical_all_mb": "65536"             },             {                 "check_error": "",                 "check_name": "cdb_tbs_check",                 "check_status": "OK",                 "current_use_mb": "29",                 "percent_used": "0",                 "tablespace_name": "UNDOTBS1",                 "total_physical_all_mb": "65536"             },            
Stumbled across this old query  as I need exact same functionality for inverse_transform() after SS pre-processing as my data vary in scale/level. Is there any plan of adding this any time soonish? ... See more...
Stumbled across this old query  as I need exact same functionality for inverse_transform() after SS pre-processing as my data vary in scale/level. Is there any plan of adding this any time soonish? Thanks, MCW
My apologies for bringing that old topic up again, but it's the only one about the error message and I stumbled across it while investigating on the same issue (different app version, but not the lat... See more...
My apologies for bringing that old topic up again, but it's the only one about the error message and I stumbled across it while investigating on the same issue (different app version, but not the latest, so it might be fixed). In summary, I could trace the problem back to local.meta, which listed a user as object owner whose Splunk account had been removed. Solution was to either re-assign the object to a valid user, or remove the owner entry (= assigns it to nobody).
Okay, everything should be working then... You can check which search peers returned the event data using the following search: index=* | stats values(splunk_server) by index So long as your se... See more...
Okay, everything should be working then... You can check which search peers returned the event data using the following search: index=* | stats values(splunk_server) by index So long as your search factor is met, the values of splunk_server should be the local peer names depending on the SH that you run that from. You can also check the search logs in the Job Inspector. What is your overall goal here? As I say, search affinity is not a security control and is only designed to make searches more efficient. All data in site1 is replicated to site2 and vice-versa anyway according to your config.
@payl_chdhry this one works. thanks! any idea what rest url to get all the guids? I saw a majority of the guids but still missing a couple of there hostnames.  | rest /services/server/info | rest s... See more...
@payl_chdhry this one works. thanks! any idea what rest url to get all the guids? I saw a majority of the guids but still missing a couple of there hostnames.  | rest /services/server/info | rest splunk_server_group=* /services/cluster/master/peers | rest splunk_server=* /services/search/distributed/peers
Thank you! I've been trying to find logs from process startup but not sure where these might be located?  What do you mean by 'the other side of the connection'?  
  Hi , Please check above two screenshot , i want to join these queries in such way where i will get AppID along with coluns in first search query  requirement is appid should come against ... See more...
  Hi , Please check above two screenshot , i want to join these queries in such way where i will get AppID along with coluns in first search query  requirement is appid should come against order id from from first screen shot   pls suggest . . 
Yes, I'm currently working on Splunk. I want to pull the data from Event Viewer and save them to the cvs file and then I add data for splunk is this the right way I want the data to be understandab... See more...
Yes, I'm currently working on Splunk. I want to pull the data from Event Viewer and save them to the cvs file and then I add data for splunk is this the right way I want the data to be understandable like botsv
I'm trying to integrate with Azure DB.  Connection type - MS-SQL Server using jTDS driver Port - 1433
ok is the latency explain in seconds? Imagine the latency is 180 does it mean i have to put -3m@m in earliest and now() in latest?
You can calculate the latency like this | eval latency=_indextime - _time However, this is for the events already in the event pipeline. You could use this to find a maximum latency over a period a... See more...
You can calculate the latency like this | eval latency=_indextime - _time However, this is for the events already in the event pipeline. You could use this to find a maximum latency over a period and apply this statically to your earliest value in your next search. However, this is still only a static value and there is no guarantee that you won't have missed some events with higher latencies. You could periodically rerun the latency calculator to see if you are missing any events and adjust your search accordingly.
Assuming you have already extracted the data field, and that the string in data is valid JSON (which you example is not), you could try this | spath input=data | where 'response.action.type'="UserCr... See more...
Assuming you have already extracted the data field, and that the string in data is valid JSON (which you example is not), you could try this | spath input=data | where 'response.action.type'="UserCreated" OR 'response.action.type'="TxCreated" | eval id = coalesce('response.resources{}.id', 'response.actors.id')
At a high level: 1. I would first look at the integration manual they state in the app -  - Omnis Data Streamer 6.3.5 Splunk Integration Guide - look for data onboarding or something on those line... See more...
At a high level: 1. I would first look at the integration manual they state in the app -  - Omnis Data Streamer 6.3.5 Splunk Integration Guide - look for data onboarding or something on those lines.  2. The App you have is just for mainly displaying data and, I think you would need the look the the TA - NETSCOUT Omnis Data Streamer App Add-on | Splunkbase (This is what helps get the data parsed and in to splunk)  Start by working out your exact Netscout device and the options it provides in terms of data (json/syslog/log files etc), look at the manual and workout what they suggest and follow that plan, test it and ingest it. Then use the App to help display the results.      Splunk has many options in getting data in, UF/Syslog/HEC and supports many different formats of data, such as Json, but first you must do some home work and work out the details.     
i just want to calculate the latency between _indextime and _time in order to define a time range in my alert which take into account this latency  
What does that even mean? _indextime is not calculated, it is the time when the event was indexed. It is like saying what is the average hour of the day? Earliest and latest relate to _time not _ind... See more...
What does that even mean? _indextime is not calculated, it is the time when the event was indexed. It is like saying what is the average hour of the day? Earliest and latest relate to _time not _indextime. Usually, _indextime is after _time as it takes time for the event to be logged, transmitted, parsed and indexed. Having said that, _time usually comes from the data in the event, which could be in the future as far as the event is concerned. Please explain what your goal is in more detail.
hi is anybody could give me a search to calculate the _indextime average for my events once it's done, what i have to do in the cron parameters of my alert to take into account this metric? thanks
Hello everyone, im new in Splunk and still need a lot to know. I want to ask question, how to forward data in JSON format from Netscout to Splunk? Should i use Univ Forwarder or maybe App on SplunkB... See more...
Hello everyone, im new in Splunk and still need a lot to know. I want to ask question, how to forward data in JSON format from Netscout to Splunk? Should i use Univ Forwarder or maybe App on SplunkBase? Thanks for the attention #Netscout #JSON
The best way of getting data from the company's systems is generally whatever is the easiest to get them out. Splunk can ingest data in many ways, but there are many standard ways of looking at data.... See more...
The best way of getting data from the company's systems is generally whatever is the easiest to get them out. Splunk can ingest data in many ways, but there are many standard ways of looking at data. What systems do you have and what logs are available. Do you currently use Splunk?