Open afaisman opened 7 months ago
npx create-react-app yaml-display-app cd yaml-display-app
import React, { useEffect, useState } from 'react'; import './App.css';
function App() { const [yamlData, setYamlData] = useState([]);
useEffect(() => { // Fetch YAML data from the Flask backend fetch('/get-yaml-data') .then(response => response.json()) .then(data => setYamlData(data)); }, []);
return (
YAML String |
---|
{item.yaml_string} |
); }
export default App;
your_project_folder/
│
├── backend/ # Flask Backend
│ ├── app.py # Flask application file
│ ├── venv/ # Virtual environment (optional, for Python dependencies)
│ ├── requirements.txt # Python dependencies (generated using pip freeze
)
│
├── frontend/ # React Frontend
│ ├── src/ # React source code
│ │ ├── App.js # Main React component
│ │ ├── index.js # Entry point for React app
│ │ ├── ...
│ ├── public/ # Public assets (e.g., index.html)
│ ├── node_modules/ # Node.js dependencies (created when you run npm install
)
│ ├── package.json # Node.js package configuration
│ ├── package-lock.json # Lock file for Node.js dependencies
│
├── .gitignore # Git ignore file (list of files/folders to be ignored by Git)
│
├── README.md # Project documentation
Uncaught runtime errors: × ERROR Failed to fetch TypeError: Failed to fetch at http://localhost:3000/static/js/bundle.js:32:5 at commitHookEffectListMount (http://localhost:3000/static/js/bundle.js:25954:30) at commitPassiveMountOnFiber (http://localhost:3000/static/js/bundle.js:27447:17) at commitPassiveMountEffects_complete (http://localhost:3000/static/js/bundle.js:27419:13) at commitPassiveMountEffects_begin (http://localhost:3000/static/js/bundle.js:27409:11) at commitPassiveMountEffects (http://localhost:3000/static/js/bundle.js:27399:7) at flushPassiveEffectsImpl (http://localhost:3000/static/js/bundle.js:29282:7) at flushPassiveEffects (http://localhost:3000/static/js/bundle.js:29235:18) at http://localhost:3000/static/js/bundle.js:29050:13 at workLoop (http://localhost:3000/static/js/bundle.js:36142:38)
import React, { useEffect, useState } from 'react'; import RefreshButton from './RefreshButton'; // Import the RefreshButton component import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter'; import { vscDarkPlus } from 'react-syntax-highlighter/dist/esm/styles/prism'; // Choose a code highlighting theme import { Controlled as CodeMirror } from 'react-codemirror2'; // Import the CodeMirror editor import 'codemirror/lib/codemirror.css'; // Import CodeMirror styles import 'codemirror/mode/yaml/yaml'; // Import YAML mode for CodeMirror import 'codemirror/mode/javascript/javascript'; // Import JSON mode for CodeMirror import './App.css'; import jsYaml from 'js-yaml'; // Import the js-yaml library
function App() { const [markupData, setMarkupData] = useState([]); const [editingIndex, setEditingIndex] = useState(null); // Track the index of the cell being edited const [editedMarkup, setEditedMarkup] = useState(""); // Track edited markup content const [autoRefresh, setAutoRefresh] = useState(false); // Track auto-refresh checkbox state const [isMarkupValid, setIsMarkupValid] = useState(true); // Track markup validation
const fetchData = () => { // Fetch markup data from the Flask backend (update the URL as needed) fetch('http://localhost:5000/get-markup-data') .then(response => response.json()) .then(data => setMarkupData(data)) .catch(error => console.error('Error fetching data:', error)); };
useEffect(() => { fetchData(); }, []);
useEffect(() => { let intervalId;
if (autoRefresh) {
// Set an interval to fetch data every 9 seconds
intervalId = setInterval(fetchData, 9000);
} else {
// Clear the interval if auto-refresh is turned off
clearInterval(intervalId);
}
return () => {
// Cleanup: clear the interval when the component unmounts
clearInterval(intervalId);
};
}, [autoRefresh]);
const handleEditClick = (index) => { setEditingIndex(index); // Set the index of the cell being edited setEditedMarkup(markupData[index].markup_string); // Set the edited markup content };
const handleSaveClick = (index) => { // Get the edited markup content from the state const updatedMarkup = editedMarkup;
try {
let parsedMarkup;
// Attempt parsing the markup as JSON
try {
parsedMarkup = JSON.parse(updatedMarkup);
} catch (jsonError) {
// If JSON parsing fails, try parsing as YAML
try {
parsedMarkup = jsYaml.safeLoad(updatedMarkup);
} catch (yamlError) {
// If both JSON and YAML parsing fail, mark markup as invalid
throw new Error('Invalid JSON and YAML');
}
}
// Markup is valid, proceed to save
setIsMarkupValid(true);
// Implement code to save the edited markup content to your backend (e.g., Flask)
const updateUrl = 'http://localhost:5000/update-markup-data'; // Update the URL as needed
const key = markupData[index].key
// Create a JSON object with the edited markup data
const requestBody = {
index: index, // You may want to send the index to identify which markup to update
key: key,
markup_string: updatedMarkup,
};
// Send a POST request to your backend to update the markup data
fetch(updateUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(requestBody),
})
.then(response => response.json())
.then(data => {
// Handle the response from the backend (if needed)
console.log('Markup data updated:', data);
// Update the markupData state with the new markup content
const updatedMarkupData = [...markupData];
updatedMarkupData[index].markup_string = updatedMarkup;
setMarkupData(updatedMarkupData);
// Reset editing index to stop editing
setEditingIndex(null);
})
.catch(error => console.error('Error updating markup data:', error));
} catch (error) {
// Markup is not valid, disable the "Save" button
setIsMarkupValid(false);
}
};
const handleCancelClick = () => { setEditingIndex(null); // Reset editing index to stop editing setEditedMarkup(""); // Clear any edited markup content setIsMarkupValid(true); // Reset markup validation to true when canceling };
return (
Index | Url | Markup | Actions |
---|---|---|---|
{index + 1} |
{item.key}
|
{editingIndex === index ? ( // Render CodeMirror editor when editing
|
{editingIndex === index ? ( // Render Save and Cancel buttons when editing <> > ) : ( )} |
); }
export default App;
from kubernetes import client, config import csv import time
def get_pods_count(): config.load_kube_config() # Load config from .kube/config v1 = client.AppsV1Api() namespace = "default" # Change as needed deployments = v1.list_namespaced_deployment(namespace)
pod_counts = {}
for dep in deployments.items:
pod_counts[dep.metadata.name] = dep.status.replicas
return pod_counts
def write_to_csv(pod_counts): with open('pod_counts.csv', mode='a', newline='') as file: writer = csv.writer(file) for deployment, count in pod_counts.items(): writer.writerow([time.strftime("%Y-%m-%d %H:%M:%S"), deployment, count])
while True: pod_counts = get_pods_count() write_to_csv(pod_counts) time.sleep(60) # Wait for 1 minute
import matplotlib.pyplot as plt import pandas as pd
def plot_data(csv_file): data = pd.read_csv(csv_file) for deployment in data['Deployment'].unique(): deployment_data = data[data['Deployment'] == deployment] plt.plot(deployment_data['Time'], deployment_data['Count'], label=deployment)
plt.xlabel('Time')
plt.ylabel('Pod Count')
plt.title('Pod Counts Over Time')
plt.legend()
plt.show()
plot_data('pod_counts.csv')
KeyError: Deployment
from kubernetes import client, config import csv import time import os
def get_pods_count(): config.load_kube_config() # Load config from .kube/config v1 = client.AppsV1Api() namespace = "default" # Change as needed deployments = v1.list_namespaced_deployment(namespace)
pod_counts = {}
for dep in deployments.items:
pod_counts[dep.metadata.name] = dep.status.replicas
return pod_counts
def write_to_csv(pod_counts): file_exists = os.path.isfile('pod_counts.csv') with open('pod_counts.csv', mode='a', newline='') as file: writer = csv.writer(file) if not file_exists: writer.writerow(['Time', 'Deployment', 'Count']) # Add CSV Headers for deployment, count in pod_counts.items(): writer.writerow([time.strftime("%Y-%m-%d %H:%M:%S"), deployment, count])
while True: pod_counts = get_pods_count() write_to_csv(pod_counts) time.sleep(60) # Wait for 1 minute
When EKS (Amazon Elastic Kubernetes Service) is specified as an assumable service, it means that an IAM role is configured to allow other entities, such as IAM users or roles, to assume that role specifically for interacting with the EKS service.
In IAM (Identity and Access Management) in AWS, roles can have policies attached that define what actions are allowed when that role is assumed. When EKS is specified as an assumable service in the assume role policy, it means that the role can be assumed by other entities to perform actions related to Amazon EKS, such as creating or managing clusters, nodes, or other resources within an EKS cluster.
This configuration is often used to grant specific permissions to certain users or roles for managing EKS resources without granting them broader access to other AWS services. It allows for granular access control and security within AWS environments.
kubectl get sa,deployments,pods -n
from kubernetes import client, config
def list_deployments(namespace='default'):
config.load_kube_config()
# Create a client instance for the AppsV1API
apps_v1 = client.AppsV1Api()
# List deployments in the specified namespace
print(f"Listing deployments in namespace {namespace}:")
deployments = apps_v1.list_namespaced_deployment(namespace)
for deployment in deployments.items:
print(f"- {deployment.metadata.name}")
return deployments
def list_pods_per_deployment(namespace='default'):
config.load_kube_config()
# Create a client instance for the CoreV1API
core_v1 = client.CoreV1Api()
# List pods in the specified namespace
pods = core_v1.list_namespaced_pod(namespace)
# Mapping of deployment names to pod count
deployment_pod_counts = {}
# Iterate over pods to count the pods per deployment
for pod in pods.items:
if pod.metadata.owner_references:
owner = pod.metadata.owner_references[0]
if owner.kind == "ReplicaSet":
# Extract the deployment name from the ReplicaSet name
deployment_name = owner.name.rsplit('-', 1)[0]
deployment_pod_counts[deployment_name] = deployment_pod_counts.get(deployment_name, 0) + 1
# Print the pod counts per deployment
for deployment, count in deployment_pod_counts.items():
print(f"Deployment {deployment} has {count} pod(s)")
def scale_deployment(namespace='default', deployment_name='', replicas=1):
config.load_kube_config()
# Create a client instance for the AppsV1API
apps_v1 = client.AppsV1Api()
# Scale the deployment
body = {'spec': {'replicas': replicas}}
apps_v1.patch_namespaced_deployment_scale(name=deployment_name, namespace=namespace, body=body)
print(f"Deployment {deployment_name} scaled to {replicas} replicas")
def main(): namespace = 'default' # Change this to your namespace if different
deployments = list_deployments(namespace)
list_pods_per_deployment(namespace)
# Scale the first deployment by one more pod if there is at least one deployment
if deployments.items:
first_deployment_name = deployments.items[0].metadata.name
current_replicas = deployments.items[0].spec.replicas
new_replicas = current_replicas + 1
scale_deployment(namespace, first_deployment_name, new_replicas)
else:
print("No deployments found to scale.")
if name == "main": main()
import boto3
def read_parameter(name): """Reads a parameter from AWS Parameter Store.""" ssm = boto3.client('ssm') try: parameter = ssm.get_parameter(Name=name, WithDecryption=True) return parameter['Parameter']['Value'] except ssm.exceptions.ParameterNotFound: print(f"Parameter {name} not found.") return None
def write_parameter(name, value): """Writes a new value to a parameter in AWS Parameter Store.""" ssm = boto3.client('ssm') try: ssm.put_parameter(Name=name, Value=value, Type='String', Overwrite=True) print(f"Parameter {name} updated successfully.") except Exception as e: print(f"Failed to update parameter {name}: {e}")
def main(): parameter_name = 'your_parameter_name' # Specify your parameter name here new_value = 'your_new_value' # Specify the new value here
# Read the current value of the parameter
current_value = read_parameter(parameter_name)
print(f"Current value: {current_value}")
# Update the parameter with a new value
write_parameter(parameter_name, new_value)
# Verify the update
updated_value = read_parameter(parameter_name)
print(f"Updated value: {updated_value}")
if name == "main": main()
kubectl get pods -n p11-realtime-1 -o wide
kubectl describe pod
kubectl describe clusterrole
kubectl exec -n p11-realtime-1
kubectl exec -n p11-realtime-1
troubleshoot the sts:AssumeRoleWithWebIdentity error and access issues with AWS Parameter Store using a boto3 and Python-based application running in the p11-realtime-1 namespace on EKS with the eks-test service account and app-esp-syn-eks-p11-1-role role, follow these steps:
Step 1: Verify the IAM Role and Policies
Go to the AWS Management Console.
Navigate to the IAM service.
In the IAM dashboard, go to 'Roles'.
Search for and select the app-esp-syn-eks-p11-1-role.
Review the permissions policies attached to this role to ensure it has the sts:AssumeRoleWithWebIdentity permission.
Check the trust relationships (Trust policies) for the role to ensure that it allows the eks-test service account in your EKS cluster to assume this role.
Step 2: Examine the Service Account in EKS
Run the following command to describe the eks-test service account:
sh
Copy code
kubectl describe sa eks-test -n p11-realtime-1
Check the annotations of the service account to confirm that it includes the ARN of the app-esp-syn-eks-p11-1-role.
Step 3: Check the OIDC Provider for EKS
In the AWS Management Console, go to the EKS service.
Select the cluster you are using.
In the cluster details page, check the OpenID Connect provider URL.
Go back to IAM, then to 'Identity Providers', and make sure that the OIDC provider for your EKS cluster is listed and correctly configured.
Step 4: Confirm EKS Pod Configuration
Check the pod configuration to ensure it is using the eks-test service account:
sh
Copy code
kubectl get pods -n p11-realtime-1 -o wide
kubectl describe pod
was on a Zoom call with HR. They mentioned updating the data on Tuesdays around 5 PM (EST, I believe, since the person I spoke with is in the EST zone). Therefore, my data should be updated this Tuesday. Additionally, I asked a few questions about how the system works. I was informed that it accounts for the previous 12 weeks from today (assuming today is Tuesday) and calculates percentages from there. It also considers weekends if one works on those days. This is why people can have consecutive work-from-office or work-from-home days, provided their manager has no objections
/`
import csv
import io
from datetime import datetime
class DeploymentHistory:
def __init__(self, max_size_bytes=4096):
self.records = []
self.max_size_bytes = max_size_bytes
def add_record(self, time, number_of_pods):
self.records.append({'time': time, 'number_of_pods': number_of_pods})
def normalize_records(self):
if not self.records:
return
normalized_records = [self.records[0]]
for record in self.records[1:]:
if record['number_of_pods'] != normalized_records[-1]['number_of_pods']:
normalized_records.append(record)
self.records = normalized_records
def _serialize_to_csv(self):
output = io.StringIO()
fieldnames = ['time', 'number_of_pods']
writer = csv.DictWriter(output, fieldnames=fieldnames)
writer.writeheader()
for record in self.records:
writer.writerow(record)
return output.getvalue()
def _deserialize_from_csv(self, csv_data):
input_io = io.StringIO(csv_data)
reader = csv.DictReader(input_io)
self.records = [row for row in reader]
def _is_oversized(self, serialized_data):
return len(serialized_data.encode('utf-8')) > self.max_size_bytes
def enforce_size_limit(self):
serialized_data = self._serialize_to_csv()
while self._is_oversized(serialized_data) and self.records:
self.records.pop(0) # Remove the oldest record
serialized_data = self._serialize_to_csv()
def save_to_aws_parameter_store(self, parameter_name):
serialized_data = self._serialize_to_csv()
self.enforce_size_limit()
# Placeholder for boto3 code to save `serialized_data` to AWS Parameter Store
print("Data saved to AWS Parameter Store under the name:", parameter_name)
def load_from_aws_parameter_store(self, parameter_name):
# Placeholder for boto3 code to load data from AWS Parameter Store
# Example (pseudocode):
# data = boto3.client('ssm').get_parameter(Name=parameter_name)['Parameter']['Value']
# For demonstration, assuming `data` is CSV formatted string loaded from AWS Parameter Store:
data = """time,number_of_pods
2024-02-09T12:00:00,5
2024-02-09T13:00:00,10"""
self._deserialize_from_csv(data)
print("Data loaded from AWS Parameter Store under the name:", parameter_name)
# Example usage
history = DeploymentHistory()
history.add_record(datetime.now().isoformat(), 5)
history.add_record(datetime.now().isoformat(), 10)
history.add_record(datetime.now().isoformat(), 10) # This record will be removed in normalize_records
history.normalize_records()
history.save_to_aws_parameter_store('DeploymentHistory')
history.load_from_aws_parameter_store('DeploymentHistory')
$ kubectl cp I:\git\aws-transcribe-eks-housekeeper\snippets\hk\mini_scale.py asr2-879b777bb-bmvls:/tmp -n p11-asr-2 error: one of src or dest must be a local file specification
kubectl cp I:/git/aws-transcribe-eks-housekeeper/snippets/hk/mini_scale.py asr2-879b777bb-bmvls:/tmp -n p11-asr-2
kubectl cp I:\git\aws-transcribe-eks-housekeeper\snippets\hk\mini_scale.py asr2-879b777bb-bmvls:/tmp -n p11-asr-2
kubectl cp I:\git\aws-transcribe-eks-housekeeper\snippets\hk\mini_scale.py asr2-879b777bb-bmvls:/tmp -n p11-asr-2
/kubectl cp I:\git\aws-transcribe-eks-housekeeper\snippets\hk\mini_scale.py asr2-879b777bb-bmvls:/tmp -n p11-asr-2
curl -o /dev/null -s -w "%{http_code}\n" http://kubectl-proxy-service:8001/api/
`import sys from kubernetes import client, config
try:
config.load_incluster_config()
except config.config_exception.ConfigException:
config.load_kube_config()
apps_v1 = client.AppsV1Api()
def scale_deployment_no_flask(namespace, deployment_name, replicas): print( f'scale_deployment_no_flask namespace={namespace} deplayment_name={deployment_name}, replicas={replicas}') if not all([namespace, deployment_name, replicas]): print("error: Missing parameters") return
try:
replicas = int(replicas)
except ValueError:
print("error: Invalid number of replicas")
return
try:
deployment = apps_v1.read_namespaced_deployment(deployment_name, namespace)
print(f'Deployemtn {deployment_name} currently has {deployment.spec.replicas} replicas')
if deployment.spec.replicas != replicas:
deployment.spec.replicas = replicas
apps_v1.replace_namespaced_deployment(name=deployment_name, namespace=namespace,
body=deployment)
print("Deployment scaled successfully")
return
else:
print("No need to scale")
return
except client.exceptions.ApiException as e:
print("Kubernetes API error: " + str(e))
return
except Exception as e:
print("error: " + str(e))
return
if name == 'main':
if len(sys.argv) != 4:
print("Usage: python mini_scale.py
namespace = sys.argv[1]
deployment = sys.argv[2]
try:
n_pods = int(sys.argv[3])
except ValueError:
print("n_pods must be an integer")
sys.exit(1)
except Exception as e:
print("" + str(e))
scale_deployment_no_flask(namespace, deployment, n_pods)
`
$ kubectl cp mini_scale.py eks-housekeeper-fd5b6d48d-7r4qk:/tmp -n p11-realtime-2 error: Internal error occurred: error executing command in container: failed to exec in container: failed to start exec "d1ffcb0dce5803a7daa6286886f878b7c40290e78f6c62a5e3805c7876cdaae5": OCI runtime exec failed: exec failed: unable to start container process: exec: "tar": executable file not found in $PATH: unknown
however this: kubectl cp mini_scale.py asr2-879b777bb-bmvls:/tmp -n p11-asr-2 works
import pydevd_pycharm pydevd_pycharm.settrace('0.0.0.0', port=12345, stdoutToServer=True, stderrToServer=True, suspend=False)
KUBERNETES_PORT_443_TCP_PROTO=tcp KUBERNETES_PORT_443_TCP_ADDR=10.100.0.1 KUBERNETES_PORT=tcp://10.100.0.1:443 KUBERNETES_SERVICE_PORT_HTTPS=443 KUBERNETES_PORT_443_TCP_PORT=443 KUBERNETES_PORT_443_TCP=tcp://10.100.0.1:443 KUBERNETES_SERVICE_PORT=443 KUBERNETES_SERVICE_HOST=10.100.0.1
`Given the environment variables you've provided, you can construct curl commands to test access to the Kubernetes API from within a pod. The key details are the Kubernetes API server's address (KUBERNETES_SERVICE_HOST) and port (KUBERNETES_SERVICE_PORT or KUBERNETES_SERVICE_PORT_HTTPS), which you can use to form the base URL for the API server.
Here are a few example curl commands:
sh Copy code curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/api
sh Copy code curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/api/v1/namespaces
sh
Copy code
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
POD_NAME=
curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \
-H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \
https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/api/v1/namespaces/$NAMESPACE/pods/$POD_NAME
Replace
sh
Copy code
NAMESPACE=
curl --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt \ -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT/api/v1/namespaces/$NAMESPACE/events?watch=true Important Notes:
These commands use the service account token found at /var/run/secrets/kubernetes.io/serviceaccount/token for authentication and the CA certificate at /var/run/secrets/kubernetes.io/serviceaccount/ca.crt to verify the Kubernetes API's SSL certificate.
Replace placeholder values like
`$ kubectl exec
dev shell : 2.3.20 : default profile R743104@CHCD1146C28N1 I:\ds $ kubectl exec eks-housekeeper-7f7d5fdf4c-qwcdn -- curl -sSk -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" https://kubernetes.default.svc/api/v1/namespaces <!DOCTYPE html>
Sorry, something went wrong!
Make sure you have the right address of the website or try again later.
SELECT * FROM "your_table_name" WHERE transaction_id = 'your_transaction_id';
SELECT * FROM "your_table_name" WHERE time >= timestamp 'start_time' AND time <= timestamp 'end_time';
Last time we discussed schedules, let's have a quick recap. Every namespace hosts one or more housekeeper pods. These pods utilize the Kubernetes API to retrieve the list of deployments and occasionally scale them based on schedules defined in AWS Parameter Store. The housekeeper does not assume the existence of these schedules; they may exist for some deployments and not for others.
Currently, the location of the schedules is determined by a path in the Parameter Store containing the namespace and deployment. However, Christopher, you might be correct; it could be better to simply define the path as an environment variable. We could have one parameter for all environments, but the variables should still reside in separate parameters.
These parameters can be updated, removed, or added, and the housekeeper should act accordingly without requiring a redeployment or restart. Schedules contain a list of operations, such as commands to scale, but they can perform any timed operations, including alerts or metric checks.
Schedules can be defined in JSON or YAML formats, and they can reference one another.
{"meta_region":"us-east-1","app_id":"cloudwatchlogs","meta_env":"DEV","@timestamp":"2024-02-15T14:47:47.181Z","loggroup":"/aws/eks/rttr-p11-eks1/containers","source":"gtidatabus","meta_lob":"CDAO","meta_kafka_timestamp":"1708008468069","meta_account_name":"xsphere","meta_cloud_timestamp":"1708008467181","meta_type":"aws:eks","meta_account_no":"701076004268","@version":"1","seal_id":"104120","meta_flow_group":"gold-platform-logging-5","message":{"message":{"cluster_name":"rttr-p11-eks1","kubernetes":{"master_url":"https://10.100.0.1:443/api","namespace_id":"aa9201b9-2c06-41ec-8e20-e4c9277fa584","labels":{"app_kubernetes_io/name":"s2t-aws-transcribe-eks-housekeeper","app_kubernetes_io/managed-by":"spinnaker","app":"eks-housekeeper","pod-template-hash":"769f58cc84"},"pod_ip":"100.72.124.218","container_image":"sha256:61d5d369144ec96fdbbe8c99e5cda93d8177a376d29123bab89aa5edc213a2ca","pod_id":"0572e5f4-b845-4a9c-8bb2-dd6d4cbbcc6b","container_name":"eks-housekeeper","host":"ip-100-72-125-55.ec2.internal","pod_name":"eks-housekeeper-769f58cc84-spzhf","container_image_id":"containerregistry-na.jpmchase.net/container-sandbox/speech2text/aws-transcribe-eks-housekeeper/feature@sha256:c6db2c3a9378962075036f61357e567e56e0b1a3e09f598ecc9deb25e9facfaa","namespace_labels":{"core_eks_jpmc/rbac-processed":"true","app_kubernetes_io/managed-by":"spinnaker","pathfinder_gencert/webhook":"ignore","app_kubernetes_io/name":"s2t-aws-transcribe-eks-housekeeper","kubernetes_io/metadata_name":"p11-asr-1","core_eks_jpmc/namespace":"false","metadata_eks_jpmc/tenant-id":"base","pod-security_kubernetes_io/enforce":"restricted"},"namespace_name":"p11-asr-1"},"logtag":"F","log":"2024-02-15 14:47:31 INFO: log_operation.py log_operation #:32 {\"timestamp\": \"2024-02-15T14:47:31.371471\", \"service\": \"HousekeepingService\", \"operation\": \"ImmediateScaleOperation\", \"status\": \"Success\", \"execution_time\": null, \"details\": {\"region\": \"us-east-1\", \"cluster\": \"p11\", \"namespace\": \"p11-asr-1\", \"deployment\": \"eks-housekeeper\", \"target_percentage\": null, \"current_num_of_pods\": 1, \"target_num_of_pods\": 1}, \"message\": \"\", \"error\": null}","stream":"stdout","seal_id":"104120","docker":{"container_id":"25f5367da3d8274b2e7f342a8aee1e65aabf25e4529d54a527e00a017bafcaa7"},"time_nanoseconds":"2024-02-15T14:47:31.371646947"},"id":"38089861270961553578758892637141402621078828926551851057"},"s_sourcetype":"aws:eks_containers","logstream":"eks-housekeeper-769f58cc84-spzhf_p11-asr-1_eks-housekeeper-25f5367da3d8274b2e7f342a8aee1e65aabf25e4529d54a527e00a017bafcaa7"}
index=your_index_here sourcetype="aws:eks_containers" loggroup="/aws/eks/rttr-p11-eks1/containers" | spath input=message.log | table _time, message.operation, message.details.deployment
2024-02-15 14:49:59 INFO: log_operation.py log_operation #:32 {"timestamp": "2024-02-15T14:49:59.704192", "service": "HousekeepingService", "operation": "WatermarkOperation", "status": "Success", "execution_time": "Now", "details": {"region": "us-east-1", "cluster": "p11", "namespace": "p11-realtime-1", "deployment": "transcribe-engine", "target_percentage": null, "current_num_of_pods": 24, "target_num_of_pods": 24}, "message": "", "error": null}
index=your_index_name "INFO: log_operation.py log_operation"
| rex field=_raw "^(?
from flask import Flask, jsonify import yaml
app = Flask(name)
Define a route to return YAML data
@app.route('/get-yaml-data') def get_yaml_data(): yaml_data = [ {'yaml_string': 'yaml_string_1: value_1\nkey_2: value_2'}, {'yaml_string': 'yaml_string_2: value_3\nkey_4: value_4'}
Add more YAML strings as needed
if name == 'main': app.run(debug=True)