afaisman / Test

0 stars 0 forks source link

Test #1

Open afaisman opened 7 months ago

afaisman commented 7 months ago

import yaml import tkinter as tk

def apply_yaml_syntax_highlighting(text_widget, yaml_content): try: parsed_yaml = yaml.safe_load(yaml_content)

    # Example: Apply different tags for keys and values
    for key, value in parsed_yaml.items():
        start = f"{key}:"
        end = f"{value}\n"
        start_index = text_widget.search(start, "1.0", tk.END)
        end_index = text_widget.search(end, start_index, tk.END)

        text_widget.tag_add("yaml_key", start_index, f"{start_index} lineend")
        text_widget.tag_add("yaml_value", end_index, f"{end_index} lineend")

    text_widget.tag_config("yaml_key", foreground="blue")
    text_widget.tag_config("yaml_value", foreground="green")

except yaml.YAMLError as exc:
    print("Error in YAML parsing:", exc)
afaisman commented 7 months ago

root = tk.Tk() text = tk.Text(root) text.pack()

Example YAML content

yaml_content = """ key1: value1 key2: value2 """

text.insert(tk.END, yaml_content) apply_yaml_syntax_highlighting(text, yaml_content)

root.mainloop()

afaisman commented 7 months ago

import tkinter as tk from tkinter import scrolledtext

Initialize the Tkinter root window

root = tk.Tk() root.title("Tkinter Table with YAML Content")

Define the size of the table

rows = 10 columns = 3

Sample YAML content

yaml_sample = """ key1: value1 key2: value2 key3: value3 """

Create and place the text widgets in a grid

for row in range(rows): for col in range(columns): text = scrolledtext.ScrolledText(root, wrap=tk.WORD, height=5, width=20) text.grid(row=row, column=col, padx=5, pady=5, sticky="nsew") text.insert(tk.END, yaml_sample) # Insert the sample YAML content

Run the Tkinter main loop

root.mainloop()

afaisman commented 7 months ago

import tkinter as tk from tkinter import scrolledtext import yaml

def apply_yaml_syntax_highlighting(text_widget): yaml_content = text_widget.get("1.0", tk.END) try: parsed_yaml = yaml.safe_load(yaml_content)

    # Reset previous tags
    text_widget.tag_remove("yaml_key", "1.0", tk.END)
    text_widget.tag_remove("yaml_value", "1.0", tk.END)

    # Apply tags for keys and values (simple implementation)
    for key, value in parsed_yaml.items():
        start = f"{key}:"
        end = f"{value}\n"
        start_index = text_widget.search(start, "1.0", tk.END)
        end_index = text_widget.search(end, start_index, tk.END)

        text_widget.tag_add("yaml_key", start_index, f"{start_index} lineend")
        text_widget.tag_add("yaml_value", f"{start_index} +1c", end_index)

    text_widget.tag_config("yaml_key", foreground="blue")
    text_widget.tag_config("yaml_value", foreground="green")

except yaml.YAMLError as exc:
    print("Error in YAML parsing:", exc)

Initialize the Tkinter root window

root = tk.Tk() root.title("Tkinter Table with YAML Syntax Highlighting")

Define the size of the table

rows = 10 columns = 3

Sample YAML content

yaml_sample = """ key1: value1 key2: value2 key3: value3 """

Create and place the text widgets in a grid

for row in range(rows): for col in range(columns): text = scrolledtext.ScrolledText(root, wrap=tk.WORD, height=5, width=20) text.grid(row=row, column=col, padx=5, pady=5, sticky="nsew") text.insert(tk.END, yaml_sample) # Insert the sample YAML content apply_yaml_syntax_highlighting(text) # Apply syntax highlighting

Make rows and columns resizable

root.grid_rowconfigure(tuple(range(rows)), weight=1) root.grid_columnconfigure(tuple(range(columns)), weight=1)

Run the Tkinter main loop

root.mainloop()

afaisman commented 7 months ago

import tkinter as tk from tkinter import scrolledtext import yaml

def apply_yaml_syntax_highlighting(text_widget): yaml_content = text_widget.get("1.0", tk.END) try: parsed_yaml = yaml.safe_load(yaml_content)

    # Reset previous tags
    text_widget.tag_remove("yaml_key", "1.0", tk.END)
    text_widget.tag_remove("yaml_value", "1.0", tk.END)

    # Apply tags for keys and values (simple implementation)
    for key, value in parsed_yaml.items():
        start = f"{key}:"
        end = f"{value}\n"
        start_index = text_widget.search(start, "1.0", tk.END)
        end_index = text_widget.search(end, start_index, tk.END)

        text_widget.tag_add("yaml_key", start_index, f"{start_index} lineend")
        text_widget.tag_add("yaml_value", f"{start_index} +1c", end_index)

    text_widget.tag_config("yaml_key", foreground="blue")
    text_widget.tag_config("yaml_value", foreground="green")

except yaml.YAMLError as exc:
    print("Error in YAML parsing:", exc)

Initialize the Tkinter root window

root = tk.Tk() root.title("Scrollable Tkinter Table with YAML Syntax Highlighting")

Create a canvas and a scrollbar

canvas = tk.Canvas(root) scrollbar = tk.Scrollbar(root, command=canvas.yview) canvas.configure(yscrollcommand=scrollbar.set)

Pack the canvas and scrollbar

scrollbar.pack(side=tk.RIGHT, fill=tk.Y) canvas.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)

Create a frame inside the canvas

frame = tk.Frame(canvas)

Add the frame to the canvas's window

canvas.create_window((0, 0), window=frame, anchor='nw')

def on_frame_configure(event=None):

Update the scroll region to encompass the frame

canvas.configure(scrollregion=canvas.bbox("all"))

frame.bind("", on_frame_configure)

Define the size of the table

rows = 100 columns = 3

Sample YAML content

yaml_sample = """ key1: value1 key2: value2 key3: value3 """

Create and place the text widgets in a grid inside the frame

for row in range(rows): for col in range(columns): text = scrolledtext.ScrolledText(frame, wrap=tk.WORD, height=5, width=20) text.grid(row=row, column=col, padx=5, pady=5, sticky="nsew") text.insert(tk.END, yaml_sample) # Insert the sample YAML content apply_yaml_syntax_highlighting(text) # Apply syntax highlighting

Run the Tkinter main loop

root.mainloop()

afaisman commented 1 month ago
# accumulates all operations sorted by date in self._operation_data_dict map.
def _preprocess_channel_data(self, verbose_: bool):
    ret_code = True
    try:
        ret_code_helper = RetCodeHelper(entity_name_="ChannelData", phrase_default_="Upsert to DB ")
        db = self._channel_data_db_mngr
        operation_data_dict = {}
        for channel_data_key in self._channel_data_dict.keys():
            channel_data = self._channel_data_dict.get(channel_data_key)

            for operation_key in channel_data._operations_dict.keys():
                operation_data = channel_data._operations_dict.get(operation_key)
                date = operation_data.get("date")
                operation_data_dict[(channel_data, date)] = operation_data

        sorted_operations = sorted(operation_data_dict.items(), key=lambda item: item[0][1])

        for index, ((channel_data, date), operation_data) in enumerate(sorted_operations):
            self._operation_data_dict[(channel_data, date)] = (index, operation_data)

        ret_code_helper.print(verbose_=verbose_)
    except Exception as e:
        ChannelDataMngr.LOGGER.error("Exception e:{}".format(e))
        ret_code = False
    return ret_code

def upsert_data_todb(self, verbose: bool) -> bool: try: ret_code_helper = RetCodeHelper(entityname="ChannelData", phrasedefault="Upsert to DB ") db = self._channel_data_db_mngr ret_code = False

        total_identifiers_count = 0
        for index, ((channel_data, date), (enum_index, operation_data)) in enumerate(self._operation_data_dict.items()):
            data = operation_data.get("data")
            total_identifiers_count = total_identifiers_count + len(data)
            print(f"Enumerated Index: {index}, Enum Index: {enum_index}, Channel Data: {channel_data.get_channel_name()}, Date: {date}, Operation Data Type: {operation_data.get('type')}")

        print(f'total_identifiers_count={total_identifiers_count}')

        ChannelData.LOGGER.info("{}".format("*" * 40))
        count = 0
        count_error = 0
        for index, ((channel_data, date), (enum_index, operation_data)) in enumerate(self._operation_data_dict.items()):
            date = operation_data.get("date")
            type = operation_data.get("type")
            note = operation_data.get("note")
            data = operation_data.get("data")
            identifier_type_id = db.get_identifier_type_id_from_name(identifier_type_name_=type, verbose_=verbose_)
            description = "date:{} note:{}".format(str(date), note)
            for datum in data:
                ret_code_local = db.upsert_table_identifiers(identifier_name_=datum, identifier_type_id_=identifier_type_id, env_id_=channel_data._env_id, client_id_=channel_data._client_id, channel_config_id_=1, description_=description, region_id_=channel_data._region_id, verbose_=verbose_)
                helper_key = "channel_name:{:<40s} date:{:<10s} type:{:<10s} ret_code:{:<6s} datum:{}".format(channel_data._channel_name, str(date), type, str(ret_code_local), datum)
                ret_code_helper.addOrUpdateWithErrMsg(key_=helper_key, attr_=RetCodeHelper.RET_CODE_ATTR, value_=ret_code_local, error_msg_=None)
                count += 1
                if ret_code_local is False:
                    count_error += 1
            #ChannelData.LOGGER.info("channel_name:{:<40s} operation:{:<6s} date:{:<10s} type:{:<15s} count:{} count_error:{} note:{} data:{}".format(channel_data._channel_name, operation_key, str(date), type, count, count_error, note, data))
            ChannelData.LOGGER.info("channel_name:{:<40s} date:{:<10s} type:{:<15s} count:{} count_error:{} note:{} data:{}".format(channel_data._channel_name, str(date), type, count, count_error, note, data))

            ChannelData.LOGGER.info("{}".format("-" * 40))
            ret_code = True
        return ret_code
    except Exception as e:
        ChannelDataMngr.LOGGER.error("Exception e:{}".format(e))
        ret_code = False
    return ret_code
afaisman commented 1 month ago

Add comments, especially Comment on the order of the sort it is earliest to the latest. Make the sorting more readable, avoid using list comprehensions and use variables here sorted_operations = sorted(operation_data_dict.items(), key=lambda item: item[0][1])

Add more comments, especially when sorting by datetime, make sure it is sorted by datetime, not alphabetically

use variables meaningful names instead of item[0], item[0][1]

say - 'the variable some_good_name is datetime.

afaisman commented 1 month ago

def _preprocess_channeldata(self, verbose: bool): """ Processes channel data to accumulate all operations sorted by date from earliest to latest. Stores processed data with enumeration in self._operation_data_dict. """ ret_code = True try:

Helper for handling return codes and error messages

        ret_code_helper = RetCodeHelper(entity_name_="ChannelData", phrase_default_="Upsert to DB ")
        db = self._channel_data_db_mngr
        operation_data_dict = {}
        operations_count = 0

        # Accumulate operations from channel data
        for channel_data_key in self._channel_data_dict.keys():
            channel_data = self._channel_data_dict.get(channel_data_key)

            for operation_key in channel_data._operations_dict.keys():
                operation_data = channel_data._operations_dict.get(operation_key)
                date = operation_data.get("date")
                operation_data_dict[(channel_data, date, operations_count)] = operation_data
                operations_count = operations_count + 1
afaisman commented 1 month ago
def _preprocess_channel_data(self, verbose_: bool):
    """
    Processes channel data to accumulate all operations sorted by date from earliest to latest.
    Stores processed data with enumeration in self._operation_data_dict.
    """
    ret_code = True
    try:
        # Helper for handling return codes and error messages
        ret_code_helper = RetCodeHelper(entity_name_="ChannelData", phrase_default_="Upsert to DB ")
        db = self._channel_data_db_mngr
        operation_data_dict = {}
        operations_count = 0

        # Accumulate operations from channel data
        for channel_data_key in self._channel_data_dict.keys():
            channel_data = self._channel_data_dict.get(channel_data_key)

            for operation_key in channel_data._operations_dict.keys():
                operation_data = channel_data._operations_dict.get(operation_key)
                date = operation_data.get("date")
                operation_data_dict[(channel_data, date, operations_count)] = operation_data
                operations_count = operations_count + 1

        def extract_date(key):
            """
            Extracts the date and operations_count from the key for sorting.
            The key is expected to be a tuple (channel_data, date, operations_count).
            """
            _, date, operations_count = key
            return (date, operations_count)
        # Sort operations by date and operations_count using the extract_date function
        sorted_operations = sorted(operation_data_dict.items(), key=lambda x: extract_date(x[0]))

        # Store sorted and enumerated operations in the class dictionary
        for index, ((channel_data, date), operation_data) in enumerate(sorted_operations):
            self._operation_data_dict[(channel_data, date)] = (index, operation_data)

        ret_code_helper.print(verbose_=verbose_)
    except Exception as e:
        ChannelDataMngr.LOGGER.error("Exception e:{}".format(e))
        ret_code = False
    return ret_code
afaisman commented 1 month ago
def upsert_data_to_db(self, verbose_: bool) -> bool:
    try:
        ret_code_helper = RetCodeHelper(entity_name_="ChannelData", phrase_default_="Upsert to DB ")
        db = self._channel_data_db_mngr
        ret_code = False

        total_identifiers_count = 0
        for index, ((channel_data, date, operations_count), operation_data) in enumerate(self._operation_data_dict.items()):
            data = operation_data.get("data")
            total_identifiers_count += len(data)
            print(f"Enumerated Index: {index}, Channel Data: {channel_data.get_channel_name()}, Date: {date}, Operations Count: {operations_count}, Operation Data Type: {operation_data.get('type')}")

        print(f'Total Identifiers Count: {total_identifiers_count}')

        ChannelData.LOGGER.info("{}".format("*" * 40))
        count = 0
        count_error = 0
        for index, ((channel_data, date, operations_count), (enum_index, operation_data)) in enumerate(self._operation_data_dict.items()):
            date = operation_data.get("date")
            type = operation_data.get("type")
            note = operation_data.get("note")
            data = operation_data.get("data")
            identifier_type_id = db.get_identifier_type_id_from_name(identifier_type_name_=type, verbose_=verbose_)
            description = "date:{} note:{}".format(str(date), note)
            for datum in data:
                ret_code_local = db.upsert_table_identifiers(identifier_name_=datum, identifier_type_id_=identifier_type_id, env_id_=channel_data._env_id, client_id_=channel_data._client_id, channel_config_id_=1, description_=description, region_id_=channel_data._region_id, verbose_=verbose_)
                helper_key = "channel_name:{:<40s} date:{:<10s} type:{:<10s} ret_code:{:<6s} datum:{}".format(channel_data._channel_name, str(date), type, str(ret_code_local), datum)
                ret_code_helper.addOrUpdateWithErrMsg(key_=helper_key, attr_=RetCodeHelper.RET_CODE_ATTR, value_=ret_code_local, error_msg_=None)
                count += 1
                if ret_code_local is False:
                    count_error += 1
            #ChannelData.LOGGER.info("channel_name:{:<40s} operation:{:<6s} date:{:<10s} type:{:<15s} count:{} count_error:{} note:{} data:{}".format(channel_data._channel_name, operation_key, str(date), type, count, count_error, note, data))
            ChannelData.LOGGER.info("channel_name:{:<40s} date:{:<10s} type:{:<15s} count:{} count_error:{} note:{} data:{}".format(channel_data._channel_name, str(date), type, count, count_error, note, data))

            ChannelData.LOGGER.info("{}".format("-" * 40))
            ret_code = True
        return ret_code
    except Exception as e:
        ChannelDataMngr.LOGGER.error("Exception e:{}".format(e))
        ret_code = False
    return ret_code
afaisman commented 1 month ago
# -------------------------------------------
# https://pythonexamples.org/python-sqlite3-select-from-table/
# return = True if successful insert else return False
# -------------------------------------------
def insert_table_identifiers(self, identifier_name_: str, identifier_type_id_: int, env_id_: int, region_id_: int, client_id_: int, channel_config_id_: int, description_: str) -> bool:
    ret_code = False
    try:
        conn = sqlite3.connect(self._path)
        cursor = conn.cursor()
        ChannelDataDBMngr.LOGGER.debug("Opened database successfully")

        cursor.execute(
            "INSERT INTO identifiers(identifierName,identifierTypeId,envId,regionId,clientId,channelConfigId,description) values(?,?,?,?,?,?,?);",
            (
                identifier_name_,
                identifier_type_id_,
                env_id_,
                region_id_,
                client_id_,
                channel_config_id_,
                description_,
            ),
        )
        ChannelDataDBMngr.LOGGER.debug("Operation done successfully")
        conn.commit()
        conn.close()

        ret_code = True
    except Exception as e:
        ChannelDataDBMngr.LOGGER.error("Exception e:{}".format(e))
    return ret_code
afaisman commented 1 month ago

def upsert_table_identifiers(self, identifiername: str, identifier_typeid: int, envid: int, regionid: int, clientid: int, channel_configid: int, description: str, verbose: bool = False) -> bool: ret_code = False try: conn = sqlite3.connect(self._path) cursor = conn.cursor() ChannelDataDBMngr.LOGGER.debug("Opened database successfully")

        cursor.execute(
            "INSERT INTO identifiers(identifierName,identifierTypeId,envId,regionId,clientId,channelConfigId,description) values(?,?,?,?,?,?,?) on conflict(identifierName) do update set description=? where identifierTypeId=? and envId=? and regionId=? and clientId=? and channelConfigId=? returning *;",
            (
                identifier_name_,
                identifier_type_id_,
                env_id_,
                region_id_,
                client_id_,
                channel_config_id_,
                description_,
                description_,
                identifier_type_id_,
                env_id_,
                region_id_,
                client_id_,
                channel_config_id_,
            ),
        )

        # if a record is found to update when the 'on conflict' clause is triggered, then there will be
        # a returned result; we just print out the result and set the ret_code=true in all cases a result is
        # returned
        # if *no record* is found for the update, it means that the upsert was trying to update a number but with
        # new, different values for one or more of the columns specified in the where clause eg
        # envId=? and regionId=? and clientId=? and channelConfigId=?
        # in this case, we want to return 'false' eg the client is trying to create a new record but this is in
        # violation of the uniqueness constraint - so return false
        myresult = cursor.fetchall()
        for x in myresult:
            if verbose_ is True:
                ChannelDataDBMngr.LOGGER.info(x)
            ret_code = True

        if verbose_ is True:
            ChannelDataDBMngr.LOGGER.info("Operation done successfully")
        conn.commit()
        conn.close()

        ret_code = True
    except Exception as e:
        ChannelDataDBMngr.LOGGER.error("Exception e:{}".format(e))
    return ret_code
afaisman commented 1 month ago
def upsert_table_identifiers(self, identifier_name_: str, identifier_type_id_: int, env_id_: int, region_id_: int, client_id_: int, channel_config_id_: int, description_: str, verbose_: bool = False) -> bool:
    ret_code = False
    try:
        conn = sqlite3.connect(self._path)
        cursor = conn.cursor()
        ChannelDataDBMngr.LOGGER.debug("Opened database successfully")

        cursor.execute(
            "INSERT INTO identifiers(identifierName,identifierTypeId,envId,regionId,clientId,channelConfigId,description) values(?,?,?,?,?,?,?) on conflict(identifierName) do update set description=?, clientId=?, channelConfigId=? where identifierTypeId=? and envId=? and regionId=? and clientId=? and channelConfigId=? returning *;",
            (
                identifier_name_,
                identifier_type_id_,
                env_id_,
                region_id_,
                client_id_,
                channel_config_id_,
                description_,
                description_,
                identifier_type_id_,
                env_id_,
                region_id_,
                client_id_,
                channel_config_id_,
            ),
        )

        # if a record is found to update when the 'on conflict' clause is triggered, then there will be
        # a returned result; we just print out the result and set the ret_code=true in all cases a result is
        # returned
        # if *no record* is found for the update, it means that the upsert was trying to update a number but with
        # new, different values for one or more of the columns specified in the where clause eg
        # envId=? and regionId=? and clientId=? and channelConfigId=?
        # in this case, we want to return 'false' eg the client is trying to create a new record but this is in
        # violation of the uniqueness constraint - so return false
        myresult = cursor.fetchall()
        for x in myresult:
            if verbose_ is True:
                ChannelDataDBMngr.LOGGER.info(x)
            ret_code = True

        if verbose_ is True:
            ChannelDataDBMngr.LOGGER.info("Operation done successfully")
        conn.commit()
        conn.close()

        ret_code = True
    except Exception as e:
        ChannelDataDBMngr.LOGGER.error("Exception e:{}".format(e))
    return ret_code
afaisman commented 1 month ago
def _preprocess_channel_data(self, verbose_: bool):
    """
    Processes channel data to accumulate all operations sorted by date from earliest to latest.
    Stores processed data with enumeration in self._operation_data_dict.
    """
    ret_code = True
    try:
        # Helper for handling return codes and error messages
        ret_code_helper = RetCodeHelper(entity_name_="ChannelData", phrase_default_="Upsert to DB ")
        operation_data_dict = {}
        operations_count = 0

        # Accumulate operations from channel data
        for channel_data_key in self._channel_data_dict.keys():
            channel_data = self._channel_data_dict.get(channel_data_key)

            for operation_key in channel_data._operations_dict.keys():
                operation_data = channel_data._operations_dict.get(operation_key)
                date = operation_data.get("date")
                operation_data_dict[(channel_data, date, operations_count)] = operation_data
                operations_count += 1

        # 0 = {ChannelData} <espAuto.models.channelData.channelData.ChannelData object at 0x000001AFEBFA09D0>
        # 1 = {date} datetime.date(2023, 12, 2)
        # 2 = {int} 0
        # (ChannelData, datetime, i)
        def extract_date(key):
            """
            Extracts the date and operations_count from the key for sorting.
            The key is expected to be a tuple (channel_data, date, operations_count).
            """
            _, date, operations_count = key
            return (date, operations_count)

        # Sort operations by date and operations_count using the extract_date function
        # x = (ChannelData, datetime, i)
        sorted_operations = sorted(operation_data_dict.items(), key=lambda x: extract_date(x[0]))
        # at this point
        # sorted_operations == [(ChannelData, datetime, num1), (ChannelData, datetime, num2) .. ]
        # sorted by date

        # Store sorted and enumerated operations in the class dictionary
        for index, ((channel_data, date, _), operation_data) in enumerate(sorted_operations):
            self._operation_data_dict[(channel_data, date, index)] = operation_data

        ret_code_helper.print(verbose_=verbose_)
    except Exception as e:
        ChannelDataMngr.LOGGER.error("Exception e:{}".format(e))
        ret_code = False
    return ret_code
afaisman commented 1 month ago
def test_operations_are_loaded_from_data_yaml_and_ordered(self):
    # create a channel data db mngr pointing to a DB for use by this test only
    channel_data_db_mngr = ChannelDataDBMngr()
    file_name = "channelDataDBTest_" + str(uuid.uuid4()) + ".db"

    # initialize the db mngr
    channel_data_db_mngr.initialize(channel_data_path_=ChannelDataDBMngr.CHANNEL_DATA_PATH_TEST, channel_data_db_file_name_=file_name)

    # set up tables and load default data into the db manager eg create a fully functional and loaded channel db
    channel_data_db_mngr.set_up_db_and_insert_defaults()

    # create a channel data mangr, injecting the channel data db mngr
    channel_data_mngr = ChannelDataMngr()
    # initialize the data manager - this step will load all the data from the data.yml into the mngr
    ret_code = channel_data_mngr.initialize(channel_data_file_env_=Env.DEV, channel_data_db_mngr_=channel_data_db_mngr, path_=ChannelDataMngr.CHANNEL_DATA_PATH_TEST, channel_data_file_name_=ChannelDataMngr.CHANNEL_DATA_FILE_NAME_TEST)
    self.assertTrue(ret_code)

    # Initialize a variable to hold the last seen date
    last_date = None
    is_sorted_ascending = True  # Assume dates are sorted unless proven otherwise

    # Iterate over the operations and check the order of dates directly
    for key in channel_data_mngr._operation_data_dict.keys():
        op = channel_data_mngr._operation_data_dict[key]
        current_date = op.get("date")

        # If last_date is not None and current_date is earlier than last_date, then dates are not sorted
        if last_date is not None and current_date < last_date:
            is_sorted_ascending = False
            break  # No need to check further; we found dates out of order
        # Update last_date to the current date
        last_date = current_date

    TestChannelDataMngr.LOGGER.info("Dates are in ascending order:" + str(is_sorted_ascending))
    self.assertTrue(is_sorted_ascending)

def test_operations_are_loaded_from_data_yaml_and_filtered_by_threshold(self):
    # create a channel data db mngr pointing to a DB for use by this test only
    channel_data_db_mngr = ChannelDataDBMngr()
    file_name = "channelDataDBTest_" + str(uuid.uuid4()) + ".db"

    # initialize the db mngr
    channel_data_db_mngr.initialize(channel_data_path_=ChannelDataDBMngr.CHANNEL_DATA_PATH_TEST, channel_data_db_file_name_=file_name)

    # set up tables and load default data into the db manager eg create a fully functional and loaded channel db
    channel_data_db_mngr.set_up_db_and_insert_defaults()

    # now test the dates of the operations are older then threshold
    # Specify the threshold date against which to check the operation dates
    threshold_date = datetime.date(2023, 12, 1)  # Change this date as needed
    channel_data_mngr = ChannelDataMngr()
    # initialize the data manager - this time filder out newer then threshhold date operations
    channel_data_mngr.initialize(channel_data_file_env_=Env.DEV, channel_data_db_mngr_=channel_data_db_mngr, path_=ChannelDataMngr.CHANNEL_DATA_PATH_TEST, channel_data_file_name_=ChannelDataMngr.CHANNEL_DATA_FILE_NAME_TEST, current_date_=threshold_date)

    TestChannelDataMngr.LOGGER.info(f'Number of operations older than {threshold_date.strftime("%Y-%m-%d")} : {len(channel_data_mngr._operation_data_dict)}')
    self.assertTrue(len(channel_data_mngr._operation_data_dict) == 1)
afaisman commented 1 month ago

`index=cloudwatchlogs loggroup="/aws/eks/*/containers" seal_id="104120" meta_account_no=425436473140 meta_env=PRD

        (meta_region="*")  
        (streamingStatus="ENDED") 
        isCaller="*" 
        (telemetryState="END") 
        (transactionRetCode="*")
        (eventType="*") 

        channelConfigName     = * NOT channelConfigName = "CIB-S2T-SyntheticTests-104120" AND NOT channelConfigName = "NA" 
        callId                = "*" 
        kvsResourceName       = "*"
        fromNumber            = "*"
        toNumber              = "*"
        transactionId         = "*"
        S2T_REX_T1_callType   = "*" 
        S2T_REX_T1_callSourceAppType= "CISCO"       
        |
        search telemetryState="BEGIN" OR telemetryState="END" OR  telemetryState="IN-PROGRESS"
        |
        eval zoomHostEmail1 = if( isnull(S2T_REX_T1_ZoomHostEmail),"*",S2T_REX_T1_ZoomHostEmail) |
        search (zoomHostEmail1 = "*") |

        eval appLogTimeEpochUTC = strptime(appLogTimeUTC, "%Y-%m-%d %H:%M:%S.%3Q") |
        eval appLogTimeEpochEST     = strptime(appLogTimeUTC+" UTC","%Y-%m-%d %H:%M:%S.%3Q %Z") |
        eval appLogTimeEpochEST2    = strftime(appLogTimeEpochEST, "%Y-%m-%d %H:%M:%S.%3Q %Z")|
        eval splunkIngestionTimeEST = strftime(_time, "%Y-%m-%d %H:%M:%S.%3Q %Z")|
        eval _time                  = appLogTimeEpochEST |

        eval timeAndAppLogTimeUTCDeltaSec=_time-appLogTimeEpochEST |

        sort appLogTimeEpochEST |

        fields S2T_REX_T1_StartTime appLogTimeUTC appLogTimeEpochEST2 S2T_REX_T1_ApplicationVersion S2T_REX_T1_callType S2T_REX_T1_callSourceAppType elapsedTimeFullCall elapsedTime S2T_REX_T1_ASRSessionTime legCount eventType transactionRetCode _time splunkIngestionTimeEST callId isCaller streamingStatus telemetryState transactionId startFragmentNumber fromNumber toNumber S2T_REX_T1_fromName S2T_REX_T1_toName S2T_REX_T1_PraestoAgentId  S2T_REX_T1_PraestoAgentSid direction msgsSentLeg msgsSent kvsResourceName kvsEndpoint channelConfigName channelConfigVersion S2T_REX_T1_ZoomHostEmail S2T_REX_T1_ZoomMeetingId S2T_REX_T1_ZoomMeetingAgenda S2T_REX_T1_ZoomleadCandMatchMin S2T_REX_T1_ZoomleadCandMatchAvg  S2T_REX_T1_ZoomTotalMatchMin S2T_REX_T1_ZoomTotalMatchAvg S2T_REX_T1_ZoomMatchConfAvg S2T_REX_T1_ZoomTotalAmbTxnsSent S2T_REX_T1_ZoomSngSpkrTscrptPcnt S2T_REX_T1_ZoomToNonTscrbdIntvls S2T_REX_T1_SpkrIntvlsTcrbdPcnt S2T_REX_T1_ZoomTotalTscrptErrors S2T_REX_T1_counterpartyLineType S2T_REX_T1_counterpartyDefBtnLbl  S2T_REX_T1_counterPartyLineDesc S2T_REX_T1_counterpartyInstance S2T_REX_T1_paIdentity timeAndAppLogTimeUTCDeltaSec S2T_REX_T1_KubeCluster S2T_REX_T1_KubeNamespace hostName loggroup logstream retryCount errorMsgs 

        S2T_REX_T1_mkvEventCount S2T_REX_T1_KvsRecAttemptLeg S2T_REX_T1_KvsRecAttempt S2T_REX_T1_mkvEventDiffTagCount S2T_REX_T1_mkvEventNoNextMCount S2T_REX_T1_mkvEventLast 

        | fields - _raw |

        rename S2T_REX_T1_toName as toName S2T_REX_T1_fromName as fromName S2T_REX_T1_PraestoAgentId as PraestoAgentId S2T_REX_T1_PraestoAgentSid as PraestoAgentSid S2T_REX_T1_StartTime as startTime S2T_REX_T1_callType as CallType S2T_REX_T1_callSourceAppType as CallAppSrcType S2T_REX_T1_ZoomMeetingId as ZoomMeetingId  S2T_REX_T1_ZoomHostEmail as ZoomHostEmail S2T_REX_T1_ZoomleadCandMatchMin as ZoomLeadCandidateMatchMin S2T_REX_T1_ZoomleadCandMatchAvg as ZoomleadCandMatchAvg S2T_REX_T1_ZoomMeetingAgenda as ZoomMeetingAgenda S2T_REX_T1_ZoomTotalMatchAvg as ZoomTotalMatchAvg S2T_REX_T1_ZoomTotalMatchMin as ZoomTotalMatchMin S2T_REX_T1_ZoomMatchConfAvg as ZoomMatchConfAvg S2T_REX_T1_ZoomTotalAmbTxnsSent as ZoomTotalAmbiguousTranscriptionSent S2T_REX_T1_ZoomSngSpkrTscrptPcnt as ZoomSingleSpkrTranscriptionPcnt S2T_REX_T1_ZoomToNonTscrbdIntvls as ZoomTotalNonTranscribedIntervals S2T_REX_T1_SpkrIntvlsTcrbdPcnt as ZoomSpeakerIntervalsTranscribedPercent  S2T_REX_T1_ZoomTotalTscrptErrors as ZoomTotalTranscriptErrors S2T_REX_T1_ASRSessionTime as asrSessionTime

         S2T_REX_T1_counterparyName as TVScounterparyName S2T_REX_T1_counterpartyLineType as TVSlineType S2T_REX_T1_counterpartyDefBtnLbl as TVSdefaultButtonLabel S2T_REX_T1_counterpartyInstance as TVScounterpartyInstance S2T_REX_T1_paIdentity as TVSpaIdentity S2T_REX_T1_counterPartyLineDesc as TVScounterPartyLineDescription

          S2T_REX_T1_mkvEventCount as mkvEventCount S2T_REX_T1_KvsRecAttemptLeg as KvsRecAttemptLegCount S2T_REX_T1_KvsRecAttempt as KvsRecAttemptCount S2T_REX_T1_mkvEventDiffTagCount as  mkvEventDiffTagCount S2T_REX_T1_mkvEventNoNextMCount as mkvEventNoNextMCount S2T_REX_T1_mkvEventLast as mkvEventLast

          S2T_REX_T1_ApplicationVersion as applicationVersion S2T_REX_T1_KubeCluster as kubeCluster S2T_REX_T1_KubeNamespace as kubeNamespace`
afaisman commented 1 month ago

`import datetime

import requests from requests.auth import HTTPBasicAuth import json import time

username = 'R743104' password = '!!'

Configuration

url = 'https://wh-1a907fee12.svr.us.jpmchase.net:13008' auth = HTTPBasicAuth(username, password) verify_ssl = False # Change to True in production with valid SSL certificates

Start a search job

search_query = """ search index=cloudwatchlogs loggroup="/aws/lambda/abc" OR "aws/containers" sourcetype="aws:lambda" | eval caller=coalesce(caller, 'unknown') | search telemetryState="BEGIN" OR telemetryState="END" | eval actualStartTime=strptime(_time, "%Y-%m-%dT%H:%M:%S.%3Q") | sort 0 - _time | fields + _time, src_ip, user, action """ search_endpoint = f"{url}/services/search/jobs" headers = { 'Accept': 'application/xml', # Set header to accept XML response, matching the expected response format } response = requests.post(search_endpoint, auth=HTTPBasicAuth(username, password), headers=headers, verify=False)

Write response content to file with a timestamp in the filename

timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S') filename = f'response{timestamp}.xml'

with open(filename, 'w') as file: file.write(response.text)

print(f'Response saved to {filename}')

`

afaisman commented 1 month ago

3223 Hanover St, Floor 02

Palo Alto, CA, 94304-1121, United States

Christopher L Fabri

afaisman commented 1 month ago

index=cloudwatchlogs loggroup="/aws/eks//containers" seal_id="104120" meta_account_no=647191266616 meta_env=TEST logstream="transcribe-engine*"

        (meta_region="*")  
        (streamingStatus="STARTED") 
        isCaller="false" 
        (telemetryState="*") 
        (transactionRetCode="*")
        (eventType="END") 

        channelConfigName = * 
        callId          = "*" 
        kvsResourceName = "*"
        fromNumber      = "*"
        toNumber        = "*"
        transactionId   = "*"
        S2T_REX_T1_callType = "CHIME" 
        S2T_REX_T1_callSourceAppType= "*"    
        |
        search telemetryState="BEGIN" OR telemetryState="END" OR  telemetryState="IN-PROGRESS"
        |
        eval zoomHostEmail1 = if( isnull(S2T_REX_T1_ZoomHostEmail),"*",S2T_REX_T1_ZoomHostEmail) |
        search (zoomHostEmail1 = *) |

        eval appLogTimeEpochUTC = strptime(appLogTimeUTC, "%Y-%m-%d %H:%M:%S.%3Q") |
        eval appLogTimeEpochEST     = strptime(appLogTimeUTC+" UTC","%Y-%m-%d %H:%M:%S.%3Q %Z") |
        eval appLogTimeEpochEST2    = strftime(appLogTimeEpochEST, "%Y-%m-%d %H:%M:%S.%3Q %Z")|
        eval splunkIngestionTimeEST = strftime(_time, "%Y-%m-%d %H:%M:%S.%3Q %Z")|
        eval _time                  = appLogTimeEpochEST |

        eval timeAndAppLogTimeUTCDeltaSec=_time-appLogTimeEpochEST |

        sort appLogTimeEpochEST |

        fields S2T_REX_T1_StartTime appLogTimeEpochEST2 elapsedTimeFullCall elapsedTime S2T_REX_T1_ASRSessionTime legCount eventType streamingStatus transactionRetCode telemetryState msgsSent msgsSentLeg  errorMsgs retryCount callId isCaller S2T_REX_T1_callType transactionId  startFragmentNumber fromNumber toNumber S2T_REX_T1_fromName S2T_REX_T1_toName S2T_REX_T1_paIdentity S2T_REX_T1_PraestoAgentId S2T_REX_T1_PraestoAgentSid direction  kvsResourceName kvsEndpoint channelConfigName channelConfigVersion S2T_REX_T1_ZoomHostEmail S2T_REX_T1_ZoomMeetingId S2T_REX_T1_ZoomMeetingAgenda S2T_REX_T1_ZoomleadCandMatchMin S2T_REX_T1_ZoomleadCandMatchAvg  S2T_REX_T1_ZoomTotalMatchMin S2T_REX_T1_ZoomTotalMatchAvg S2T_REX_T1_ZoomMatchConfAvg S2T_REX_T1_ZoomTotalAmbTxnsSent S2T_REX_T1_ZoomSngSpkrTscrptPcnt S2T_REX_T1_ZoomToNonTscrbdIntvls S2T_REX_T1_SpkrIntvlsTcrbdPcnt S2T_REX_T1_ZoomTotalTscrptErrors S2T_REX_T1_counterpartyLineType S2T_REX_T1_counterpartyDefBtnLbl  S2T_REX_T1_counterPartyLineDesc S2T_REX_T1_counterpartyInstance timeAndAppLogTimeUTCDeltaSec S2T_REX_T1_ApplicationVersion S2T_REX_T1_KubeCluster S2T_REX_T1_KubeNamespace hostName loggroup logstream   

        S2T_REX_T1_mkvEventCount S2T_REX_T1_KvsRecAttemptLeg S2T_REX_T1_KvsRecAttempt S2T_REX_T1_mkvEventDiffTagCount S2T_REX_T1_mkvEventNoNextMCount S2T_REX_T1_mkvEventLast 

        | fields - _raw |

        rename S2T_REX_T1_toName as toName S2T_REX_T1_fromName as fromName S2T_REX_T1_ASRSessionTime as asrTime  S2T_REX_T1_PraestoAgentId as PraestoAgentId S2T_REX_T1_PraestoAgentSid as PraestoAgentSid S2T_REX_T1_StartTime as startTime S2T_REX_T1_callType as CallType S2T_REX_T1_callSourceAppType as CallAppSrcType 
            elapsedTimeFullCall as "Elapsed Time Full Call"
            legCount as "Leg Count"
            streamingStatus as "Streaming Status"
             retryCount as "Transcribe Retry Count"
             msgsSent as "Msgs Sent"
             msgsSentLeg as "Msgs Sent Leg"
             S2T_REX_T1_ZoomMeetingId as ZoomMeetingId
             S2T_REX_T1_ZoomHostEmail as ZoomHostEmail
             S2T_REX_T1_ZoomleadCandMatchMin as ZoomLeadCandidateMatchMin S2T_REX_T1_ZoomleadCandMatchAvg as ZoomleadCandMatchAvg S2T_REX_T1_ZoomMeetingAgenda as ZoomMeetingAgenda S2T_REX_T1_ZoomTotalMatchAvg as ZoomTotalMatchAvg S2T_REX_T1_ZoomTotalMatchMin as ZoomTotalMatchMin S2T_REX_T1_ZoomMatchConfAvg as ZoomMatchConfAvg S2T_REX_T1_ZoomTotalAmbTxnsSent as ZoomTotalAmbiguousTranscriptionSent S2T_REX_T1_ZoomSngSpkrTscrptPcnt as ZoomSingleSpkrTranscriptionPcnt S2T_REX_T1_ZoomToNonTscrbdIntvls as ZoomTotalNonTranscribedIntervals S2T_REX_T1_SpkrIntvlsTcrbdPcnt as ZoomSpeakerIntervalsTranscribedPercent  S2T_REX_T1_ZoomTotalTscrptErrors as ZoomTotalTranscriptErrors
            S2T_REX_T1_paIdentity as paIdentity
            S2T_REX_T1_KubeCluster as cluster
            S2T_REX_T1_KubeNamespace as namespace
            S2T_REX_T1_ApplicationVersion as "app version"

         S2T_REX_T1_counterparyName as TVScounterparyName S2T_REX_T1_counterpartyLineType as TVSlineType S2T_REX_T1_counterpartyDefBtnLbl as TVSdefaultButtonLabel S2T_REX_T1_counterpartyInstance as TVScounterpartyInstance  S2T_REX_T1_counterPartyLineDesc as TVScounterPartyLineDescription

          S2T_REX_T1_mkvEventCount as mkvEventCount S2T_REX_T1_KvsRecAttemptLeg as KvsRecAttemptLegCount S2T_REX_T1_KvsRecAttempt as KvsRecAttemptCount S2T_REX_T1_mkvEventDiffTagCount as  mkvEventDiffTagCount S2T_REX_T1_mkvEventNoNextMCount as mkvEventNoNextMCount S2T_REX_T1_mkvEventLast as mkvEventLast
afaisman commented 1 month ago

startTime,appLogTimeEpochEST2,"Elapsed Time Full Call",elapsedTime,asrTime,"Leg Count",eventType,"Streaming Status",transactionRetCode,telemetryState,"Msgs Sent","Msgs Sent Leg",errorMsgs,"Transcribe Retry Count",callId,isCaller,CallType,transactionId,startFragmentNumber,fromNumber,toNumber,fromName,toName,paIdentity,PraestoAgentId,PraestoAgentSid,direction,kvsResourceName,kvsEndpoint,channelConfigName,channelConfigVersion,ZoomHostEmail,ZoomMeetingId,ZoomMeetingAgenda,ZoomLeadCandidateMatchMin,ZoomleadCandMatchAvg,ZoomTotalMatchMin,ZoomTotalMatchAvg,ZoomMatchConfAvg,ZoomTotalAmbiguousTranscriptionSent,ZoomSingleSpkrTranscriptionPcnt,ZoomTotalNonTranscribedIntervals,ZoomSpeakerIntervalsTranscribedPercent,ZoomTotalTranscriptErrors,TVSlineType,TVSdefaultButtonLabel,TVScounterPartyLineDescription,TVScounterpartyInstance,timeAndAppLogTimeUTCDeltaSec,"app version",cluster,namespace,hostName,loggroup,logstream,mkvEventCount,KvsRecAttemptLegCount,KvsRecAttemptCount,mkvEventDiffTagCount,mkvEventNoNextMCount,mkvEventLast,"_time" "2024-08-09T19:58:05.847Z","2024-08-09 19:59:28.730 GMT","00:01:22.853","00:01:22.853",,1,END,STARTED,SUCCEEDED,END,1,1,"{}",,"677148119d05b097e62ef067aebab1a3@169.103.88.131",false,CHIME,"65cffddb-e84c-4021-9f4b-daeaec3d754f",91343852333292569697648946964045520169435558750,919709485206,103445990002,null,null,null,,,Outbound,"ChimeVoiceConnector-e1c87kank2bdcdtugdvkbf-b00804f9-f82b-4ad3-a650-a7ed93d3ba8c/1705417483089",,"AWM-ServiceConnectPraesto-89749","1.0.1",,,,,,,,,,,,,,null,null,null,null,"0.000000",,,,,"/aws/eks/rttr-p13-eks1/containers","transcribe-engine-56dd77784-x9vms_p13-realtime-1_transcribe-engine-8e168db71ef13f6f8ad91264a0d2ef467af014773c95e88746e1ee6cb48d04de",5,3,754,0,5,"MKVEvent(_conditionType=END_BY_NO_NEXT_MKV, _eventDateTime=2024-08-09T19:59:25.092936Z, _eventData=Stop processing KVS stream - MKVReader returns no next MKV record. transactionId:65cffddb-e84c-4021-9f4b-daeaec3d754f Fragment Meta: Optional[FragmentMetadata(fragmentNumberString=91343852333292569999706316549678323030067342858, serverSideTimestampMillis=1723233544641, producerSideTimestampMillis=1723233545775, fragmentNumber=91343852333292569999706316549678323030067342858, success=true, errorId=0, errorCode=null, millisBehindNow=OptionalLong[180], continuationToken=Optional[91343852333292569999706316549678323030067342858])]) mkvEventCount","2024-08-09T19:59:28.730+00:00"

afaisman commented 1 month ago

startTime,appLogTimeEpochEST2,"Elapsed Time Full Call",elapsedTime,asrTime,"Leg Count",eventType,"Streaming Status",transactionRetCode,telemetryState,"Msgs Sent","Msgs Sent Leg",errorMsgs,"Transcribe Retry Count",callId,isCaller,CallType,transactionId,startFragmentNumber,fromNumber,toNumber,fromName,toName,paIdentity,PraestoAgentId,PraestoAgentSid,direction,kvsResourceName,kvsEndpoint,channelConfigName,channelConfigVersion,ZoomHostEmail,ZoomMeetingId,ZoomMeetingAgenda,ZoomLeadCandidateMatchMin,ZoomleadCandMatchAvg,ZoomTotalMatchMin,ZoomTotalMatchAvg,ZoomMatchConfAvg,ZoomTotalAmbiguousTranscriptionSent,ZoomSingleSpkrTranscriptionPcnt,ZoomTotalNonTranscribedIntervals,ZoomSpeakerIntervalsTranscribedPercent,ZoomTotalTranscriptErrors,TVSlineType,TVSdefaultButtonLabel,TVScounterPartyLineDescription,TVScounterpartyInstance,timeAndAppLogTimeUTCDeltaSec,"app version",cluster,namespace,hostName,loggroup,logstream,mkvEventCount,KvsRecAttemptLegCount,KvsRecAttemptCount,mkvEventDiffTagCount,mkvEventNoNextMCount,mkvEventLast,"_time" "2024-08-09T19:58:05.847Z","2024-08-09 19:59:28.730 GMT","00:01:22.853","00:01:22.853",,1,END,STARTED,SUCCEEDED,END,1,1,"{}",,"677148119d05b097e62ef067aebab1a3@169.103.88.131",false,CHIME,"65cffddb-e84c-4021-9f4b-daeaec3d754f",91343852333292569697648946964045520169435558750,919709485206,103445990002,null,null,null,,,Outbound,"ChimeVoiceConnector-e1c87kank2bdcdtugdvkbf-b00804f9-f82b-4ad3-a650-a7ed93d3ba8c/1705417483089",,"AWM-ServiceConnectPraesto-89749","1.0.1",,,,,,,,,,,,,,null,null,null,null,"0.000000",,,,,"/aws/eks/rttr-p13-eks1/containers","transcribe-engine-56dd77784-x9vms_p13-realtime-1_transcribe-engine-8e168db71ef13f6f8ad91264a0d2ef467af014773c95e88746e1ee6cb48d04de",5,3,754,0,5,"MKVEvent(_conditionType=END_BY_NO_NEXT_MKV, _eventDateTime=2024-08-09T19:59:25.092936Z, _eventData=Stop processing KVS stream - MKVReader returns no next MKV record. transactionId:65cffddb-e84c-4021-9f4b-daeaec3d754f Fragment Meta: Optional[FragmentMetadata(fragmentNumberString=91343852333292569999706316549678323030067342858, serverSideTimestampMillis=1723233544641, producerSideTimestampMillis=1723233545775, fragmentNumber=91343852333292569999706316549678323030067342858, success=true, errorId=0, errorCode=null, millisBehindNow=OptionalLong[180], continuationToken=Optional[91343852333292569999706316549678323030067342858])]) mkvEventCount","2024-08-09T19:59:28.730+00:00" "2024-08-09T19:58:59.075Z","2024-08-09 19:59:28.820 GMT","00:00:29.708","00:00:29.708",,1,END,STARTED,SUCCEEDED,END,1,1,"{}",,"33970068_16802317_133074529@169.70.110.205",false,CHIME,"b44e1788-a8ea-4455-a92e-231087951d75",91343852333212940442561954163242923191417256401,"+12156403414","+13127321065","Mathew Kraut",null,null,,,Outbound,"ChimeVoiceConnector-e1c87kank2bdcdtugdvkbf-b2bf6bf0-077b-4e55-8d07-dbe734e20421/1715004994582",,"AWM-AdvisorConnect-90176","1.0.1",,,,,,,,,,,,,,null,null,null,null,"0.000000",,,,,"/aws/eks/rttr-p11-eks1/containers","transcribe-engine-566f4fb65d-jnxqz_p11-realtime-1_transcribe-engine-be55cda53f93aec36747879d1dcdf73da74c7323d65afd67f5191d3133a2504a",5,3,1497,0,5,"MKVEvent(_conditionType=END_BY_NO_NEXT_MKV, _eventDateTime=2024-08-09T19:59:28.149262Z, _eventData=Stop processing KVS stream - MKVReader returns no next MKV record. transactionId:b44e1788-a8ea-4455-a92e-231087951d75 Fragment Meta: Optional[FragmentMetadata(fragmentNumberString=91343852333212940492079555734658136530274026704, serverSideTimestampMillis=1723233547793, producerSideTimestampMillis=1723233548009, fragmentNumber=91343852333212940492079555734658136530274026704, success=true, errorId=0, errorCode=null, millisBehindNow=OptionalLong[893], continuationToken=Optional[91343852333212940492079555734658136530274026704])]) mkvEventCount","2024-08-09T19:59:28.820+00:00" "

afaisman commented 1 month ago
startFragmentNumber
loggroup
        logstream
mkvEventLast
afaisman commented 1 month ago

Columns in the CSV file: ['startTime', 'appLogTimeEpochEST2', 'Elapsed Time Full Call', 'elapsedTime', 'asrTime', 'Leg Count', 'eventType', 'Streaming Status', 'transactionRetCode', 'telemetryState', 'Msgs Sent', 'Msgs Sent Leg', 'errorMsgs', 'Transcribe Retry Count', 'callId', 'isCaller', 'CallType', 'transactionId', 'startFragmentNumber', 'fromNumber', 'toNumber', 'fromName', 'toName', 'paIdentity', 'PraestoAgentId', 'PraestoAgentSid', 'direction', 'kvsResourceName', 'kvsEndpoint', 'channelConfigName', 'channelConfigVersion', 'ZoomHostEmail', 'ZoomMeetingId', 'ZoomMeetingAgenda', 'ZoomLeadCandidateMatchMin', 'ZoomleadCandMatchAvg', 'ZoomTotalMatchMin', 'ZoomTotalMatchAvg', 'ZoomMatchConfAvg', 'ZoomTotalAmbiguousTranscriptionSent', 'ZoomSingleSpkrTranscriptionPcnt', 'ZoomTotalNonTranscribedIntervals', 'ZoomSpeakerIntervalsTranscribedPercent', 'ZoomTotalTranscriptErrors', 'TVSlineType', 'TVSdefaultButtonLabel', 'TVScounterPartyLineDescription', 'TVScounterpartyInstance', 'timeAndAppLogTimeUTCDeltaSec', 'app version', 'cluster', 'namespace', 'hostName', 'loggroup', 'logstream', 'mkvEventCount', 'KvsRecAttemptLegCount', 'KvsRecAttemptCount', 'mkvEventDiffTagCount', 'mkvEventNoNextMCount', 'mkvEventLast', '_time']

afaisman commented 1 month ago

-- call_log definition

CREATE TABLE call_log ( startTime TEXT, appLogTimeEpochEST2 TEXT, elapsedTime TEXT, asrTime TEXT, LegCount INTEGER, eventType TEXT, StreamingStatus TEXT, transactionRetCode TEXT, telemetryState TEXT, MsgsSent INTEGER, MsgsSentLeg INTEGER, errorMsgs TEXT, TranscribeRetryCount INTEGER, callId TEXT PRIMARY KEY, isCaller BOOLEAN, CallType TEXT, transactionId TEXT, fromNumber TEXT, toNumber TEXT, fromName TEXT, toName TEXT, paIdentity TEXT, PraestoAgentId TEXT, PraestoAgentSid TEXT, direction TEXT, channelConfigName TEXT, channelConfigVersion TEXT, ZoomHostEmail TEXT, ZoomMeetingId TEXT, ZoomTotalTranscriptErrors INTEGER, timeAndAppLogTimeUTCDeltaSec REAL, app_version TEXT, cluster TEXT, namespace TEXT, hostName TEXT, _time TEXT );

afaisman commented 1 month ago

` def put_cloudwatch_metrics_scale(self, deploymentname: str, transactionid: str, current_num_of_pods: int, target_num_ofpods: int, region: str, namespace: str, cluster: str, housekeeperid: str): try: cloudwatch = boto3.client("cloudwatch", regionname=region) retcode = cloudwatch.put_metricdata(Namespace="Housekeeper", MetricData=[ {"MetricName": "PodCountTarget", "Dimensions": [{"Name": "Cluster", "Value": cluster}, {"Name": "Region", "Value": region}, {"Name": "Namespace", "Value": namespace}, {"Name": "Deployment", "Value": deploymentname}, {"Name": "transaction_id", "Value": transactionid}, {"Name": "housekeeper_id", "Value": housekeeperid} ], "Value": target_num_of_pods,

"Current": current_num_of_pods,

                                        "Unit": "Count"}
                                   ])
        AWSService.LOGGER.info(f"put_cloudwatch_metrics_scale retcode={retcode}")
    except Exception as e:
        AWSService.LOGGER.error(f"Exception: {e}")
        return None

`

afaisman commented 1 month ago

`import logging import os

import boto3 from botocore.exceptions import ClientError from s2t.infra.auto.services.aws.parameterStore.parameterStoreClientProxy import ParameterStoreClientProxy

""" Provides a suite of services to interact with AWS resources, specifically for managing parameter storage and CloudWatch metrics related to Kubernetes deployments. Facilitates operations such as saving serialized deployment data to AWS Parameter Store and posting custom metrics to CloudWatch.

class AWSService: LOGGER = logging.getLogger(name) LOGGER.setLevel(logging.INFO)

AWS_PARAMETER_STORE_PREFIX_DEFAULT = 'please_define_AWS_PARAMETER_STORE_PREFIX_in_Spinnaker'

AWS_PARAMETER_STORE_PREFIX_DEFAULT = "/application/s2t/esp/houseKeeper"

def __init__(self):
    pass

def get_parameter_store_prefix(self):
    try:
        env_var = os.environ.get("AWS_PARAMETER_STORE_PREFIX")
        if env_var is not None:
            return env_var
        else:
            return AWSService.AWS_PARAMETER_STORE_PREFIX_DEFAULT
    except Exception as e:
        AWSService.LOGGER.error(f"Exception: {e}")
        return None

def save_data_to_parameterstore(self, nsDeployment, region_: str):
    try:
        if nsDeployment is None:
            return

        # str_serialized = self.to_str(skip_operations_=False, skip_data=True)
        parameter_store_key_ = nsDeployment.url
        # self._save_to_ps(parameter_store_key_, str_serialized)

        # !!!!!!!!!!!!!
        #str_serialized_data = nsDeployment.to_str(skip_operations_=True, skip_data_=False)
        str_serialized_data = nsDeployment.data_to_str()
        # self._save_to_ps(parameter_store_key_ + "_data", str_serialized_data)
        self.save_to_ps(parameter_store_key_=parameter_store_key_ + "_data", str_serialized_=str_serialized_data, region_=region_)
    except Exception as e:
        AWSService.LOGGER.error(f"Exception: {e}")
        return None

def put_cloudwatch_metrics_watcher(self, deployment_name_: str, transaction_id_: str, current_num_of_pods: int, target_num_of_pods: int, region_: str, namespace_: str, cluster_: str, housekeeper_id_: str):
    try:
        cloudwatch = boto3.client("cloudwatch", region_name=region_)
        retcode = cloudwatch.put_metric_data(Namespace="Housekeeper",
                                   MetricData=[
                                       {"MetricName": "PodCountExpectedMinusActual",
                                        "Dimensions": [{"Name": "Cluster", "Value": cluster_},
                                                       {"Name": "Region", "Value": region_},
                                                       {"Name": "Namespace", "Value": namespace_},
                                                       {"Name": "Deployment", "Value": deployment_name_},
                                                       {"Name": "transaction_id", "Value": transaction_id_},
                                                       {"Name": "housekeeper_id", "Value": housekeeper_id_}
                                                       ],
                                        "Value": target_num_of_pods - current_num_of_pods,
                                        "Unit": "Count"}
                                   ])  # The difference calculated
        AWSService.LOGGER.info(f"put_cloudwatch_metrics_watcher, retcode={retcode}")
    except Exception as e:
        AWSService.LOGGER.error(f"Exception: {e}")
        return None

def put_cloudwatch_metrics_scale(self, deployment_name_: str, transaction_id_: str, current_num_of_pods: int, target_num_of_pods: int, region_: str, namespace_: str, cluster_: str, housekeeper_id_: str):
    try:
        cloudwatch = boto3.client("cloudwatch", region_name=region_)
        retcode = cloudwatch.put_metric_data(Namespace="Housekeeper",
                                   MetricData=[
                                       {"MetricName": "PodCountTarget",
                                        "Dimensions": [{"Name": "Cluster", "Value": cluster_},
                                                       {"Name": "Region", "Value": region_},
                                                       {"Name": "Namespace", "Value": namespace_},
                                                       {"Name": "Deployment", "Value": deployment_name_},
                                                       {"Name": "transaction_id", "Value": transaction_id_},
                                                       {"Name": "housekeeper_id", "Value": housekeeper_id_}
                                                       ],
                                        "Value": target_num_of_pods,
                                        # "Current": current_num_of_pods,
                                        "Unit": "Count"}
                                   ])
        AWSService.LOGGER.info(f"put_cloudwatch_metrics_scale retcode={retcode}")
    except Exception as e:
        AWSService.LOGGER.error(f"Exception: {e}")
        return None

def save_to_ps(self, parameter_store_key_, str_serialized_, region_: str):
    try:
        ssm_client = boto3.client("ssm", region_name=region_)
        parameter_store_proxy = ParameterStoreClientProxy(client_=ssm_client)
        response = parameter_store_proxy.put_parameter(parameter_store_key_, "", str_serialized_, dry_run_=False)
        AWSService.LOGGER.info(f"Parameter {parameter_store_key_} set.")
        return response
    except ClientError as e:
        AWSService.LOGGER.error(f"Exception: {e}")
        return None

`

afaisman commented 1 month ago

import datetime import sqlite3

from test.staging.snippets.channel_configs_release.channel_tools.application.utils.db import DB

Example usage:

db_file_path = 'call_log.db'

Get the sorted call counts

def get_failed_calls(db_file_path): """Return a list of tuples for calls where transactionRetCode == 'FAILED', including the errorMsgs."""

Connect to the SQLite database

conn = sqlite3.connect(db_file_path)
cursor = conn.cursor()

# Query to get failed calls with error messages
cursor.execute("""
    SELECT fromNumber, toNumber, errorMsgs 
    FROM call_log 
    WHERE transactionRetCode = 'FAILED'
""")
failed_calls = cursor.fetchall()

# Normalize the phone numbers and prepare the result tuples
result = [(normalize_number(from_num), normalize_number(to_num), error_msg) for from_num, to_num, error_msg in
          failed_calls]

# Close the database connection
conn.close()

return result

def normalize_number(number): """Normalize phone numbers to a consistent format starting with +1."""

Remove the leading '+' if present

if number.startswith('+'):
    number = number[1:]
# Remove any leading non-digit characters
number = ''.join(filter(str.isdigit, number))
# If the number has 10 digits, assume it's a US number and add '1' as the country code
if len(number) == 10:
    number = '1' + number
# Ensure the number starts with '+1'
return '+1' + number

def compare_phone_numbers(number1, number2): """Compare two phone numbers to determine if they are equivalent.""" normalized_number1 = normalize_number(number1) normalized_number2 = normalize_number(number2) return normalized_number1 == normalized_number2

def report_call_counts(db_file_path, start_time=None, end_time=None): """Return a list of tuples with the phone number, count of calls where it was toNumber, and count where it was fromNumber, sorted by fromNumber counts."""

Connect to the SQLite database

conn = sqlite3.connect(db_file_path)
cursor = conn.cursor()

# Base query to retrieve relevant columns from the call_log table
query = "SELECT fromNumber, toNumber FROM call_log"
params = []

# Add time filtering to the query if start_time and end_time are provided
if start_time and end_time:
    query += " WHERE startTime BETWEEN ? AND ?"
    params.extend([start_time, end_time])
elif start_time:
    query += " WHERE startTime >= ?"
    params.append(start_time)
elif end_time:
    query += " WHERE startTime <= ?"
    params.append(end_time)

# Execute the query
cursor.execute(query, params)
rows = cursor.fetchall()

# Dictionary to store the counts
number_counts = {}

# Process each row to count occurrences
for from_number, to_number in rows:
    # Normalize both numbers
    normalized_from = normalize_number(from_number)
    normalized_to = normalize_number(to_number)

    # Count the fromNumber
    if normalized_from in number_counts:
        number_counts[normalized_from][1] += 1
    else:
        number_counts[normalized_from] = [0, 1]

    # Count the toNumber
    if normalized_to in number_counts:
        number_counts[normalized_to][0] += 1
    else:
        number_counts[normalized_to] = [1, 0]

# Convert the dictionary to a list of tuples
result = [(number, counts[0], counts[1]) for number, counts in number_counts.items()]

# Sort the results by fromNumber counts (descending)
result.sort(key=lambda x: x[2], reverse=True)

# Close the database connection
conn.close()

return result

def report_last_n_days_call_counts(db_file_path, n_days): """Return call counts for calls started during the last N days."""

Calculate the start_time and end_time

end_time = datetime.datetime.utcnow()
start_time = end_time - datetime.timedelta(days=n_days)

# Convert the start and end times to ISO format strings
start_time_str = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
end_time_str = end_time.strftime('%Y-%m-%dT%H:%M:%SZ')

# Call report_call_counts with the calculated time range
return report_call_counts(db_file_path, start_time=start_time_str, end_time=end_time_str)

def is_number_in_call_counts(call_counts, number): """Return True if the given number is present in the call counts, False otherwise.""" for record in call_counts: if compare_phone_numbers(record[0], number): return True

return False

call_counts = report_call_counts(db_file_path) for item in call_counts: print(item)

Get the failed calls with error messages

failed_calls = get_failed_calls(db_file_path)

for item in failed_calls:

print(item)

db = DB() identifiers = db.db_list_identifiers_by_type('number') ret = report_call_counts(db_file_path),

ret1 = report_last_n_days_call_counts(db_file_path, 1)

ret2 = report_last_n_days_call_counts(db_file_path, 2)

ret3 = report_last_n_days_call_counts(db_file_path, 3)

count_used = 0 res = [] for i in identifiers: if not is_number_in_call_counts(ret, i[0]): res.append(i) else: count_used +=1

pass

list 1) load identifiers, list 2) all from and to combined within the last nDays

print all identifiers or type number from 1) which are not in the list 2

afaisman commented 1 month ago

`import datetime import sqlite3

from test.staging.snippets.channel_configs_release.channel_tools.application.utils.db import DB

Example usage:

db_file_path = 'call_log.db'

Get the sorted call counts

def get_failed_calls(db_file_path): """Return a list of tuples for calls where transactionRetCode == 'FAILED', including the errorMsgs."""

Connect to the SQLite database

conn = sqlite3.connect(db_file_path)
cursor = conn.cursor()

# Query to get failed calls with error messages
cursor.execute("""
    SELECT fromNumber, toNumber, errorMsgs 
    FROM call_log 
    WHERE transactionRetCode = 'FAILED'
""")
failed_calls = cursor.fetchall()

# Normalize the phone numbers and prepare the result tuples
result = [(normalize_number(from_num), normalize_number(to_num), error_msg) for from_num, to_num, error_msg in
          failed_calls]

# Close the database connection
conn.close()

return result

def normalize_number(number): """Normalize phone numbers to a consistent format starting with +1."""

Remove the leading '+' if present

if number.startswith('+'):
    number = number[1:]
# Remove any leading non-digit characters
number = ''.join(filter(str.isdigit, number))
# If the number has 10 digits, assume it's a US number and add '1' as the country code
if len(number) == 10:
    number = '1' + number
# Ensure the number starts with '+1'
return '+1' + number

def compare_phone_numbers(number1, number2): """Compare two phone numbers to determine if they are equivalent.""" normalized_number1 = normalize_number(number1) normalized_number2 = normalize_number(number2) return normalized_number1 == normalized_number2

def report_call_counts(db_file_path, start_time=None, end_time=None): """Return a list of tuples with the phone number, count of calls where it was toNumber, and count where it was fromNumber, sorted by fromNumber counts."""

Connect to the SQLite database

conn = sqlite3.connect(db_file_path)
cursor = conn.cursor()

# Base query to retrieve relevant columns from the call_log table
query = "SELECT fromNumber, toNumber FROM call_log"
params = []

# Add time filtering to the query if start_time and end_time are provided
if start_time and end_time:
    query += " WHERE startTime BETWEEN ? AND ?"
    params.extend([start_time, end_time])
elif start_time:
    query += " WHERE startTime >= ?"
    params.append(start_time)
elif end_time:
    query += " WHERE startTime <= ?"
    params.append(end_time)

# Execute the query
cursor.execute(query, params)
rows = cursor.fetchall()

# Dictionary to store the counts
number_counts = {}

# Process each row to count occurrences
for from_number, to_number in rows:
    # Normalize both numbers
    normalized_from = normalize_number(from_number)
    normalized_to = normalize_number(to_number)

    # Count the fromNumber
    if normalized_from in number_counts:
        number_counts[normalized_from][1] += 1
    else:
        number_counts[normalized_from] = [0, 1]

    # Count the toNumber
    if normalized_to in number_counts:
        number_counts[normalized_to][0] += 1
    else:
        number_counts[normalized_to] = [1, 0]

# Convert the dictionary to a list of tuples
result = [(number, counts[0], counts[1]) for number, counts in number_counts.items()]

# Sort the results by fromNumber counts (descending)
result.sort(key=lambda x: x[2], reverse=True)

# Close the database connection
conn.close()

return result

def report_last_n_days_call_counts(db_file_path, n_days): """Return call counts for calls started during the last N days."""

Calculate the start_time and end_time

end_time = datetime.datetime.utcnow()
start_time = end_time - datetime.timedelta(days=n_days)

# Convert the start and end times to ISO format strings
start_time_str = start_time.strftime('%Y-%m-%dT%H:%M:%SZ')
end_time_str = end_time.strftime('%Y-%m-%dT%H:%M:%SZ')

# Call report_call_counts with the calculated time range
return report_call_counts(db_file_path, start_time=start_time_str, end_time=end_time_str)

def is_number_in_call_counts(call_counts, number): """Return True if the given number is present in the call counts, False otherwise.""" for record in call_counts: if compare_phone_numbers(record[0], number): return True

return False

call_counts = report_call_counts(db_file_path) for item in call_counts: print(item)

Get the failed calls with error messages

failed_calls = get_failed_calls(db_file_path)

for item in failed_calls:

print(item)

db = DB() identifiers = db.db_list_identifiers_by_type('number') ret = report_call_counts(db_file_path),

ret1 = report_last_n_days_call_counts(db_file_path, 1)

ret2 = report_last_n_days_call_counts(db_file_path, 2)

ret3 = report_last_n_days_call_counts(db_file_path, 3)

count_used = 0 res = [] for i in identifiers: if not is_number_in_call_counts(ret, i[0]): res.append(i) else: count_used +=1

pass

list 1) load identifiers, list 2) all from and to combined within the last nDays

print all identifiers or type number from 1) which are not in the list 2

`

afaisman commented 1 month ago

103454230025 '+1103454230025'

afaisman commented 1 month ago

-- call_log definition

CREATE TABLE call_log ( startTime TEXT, appLogTimeEpochEST2 TEXT, elapsedTime TEXT, asrTime TEXT, LegCount INTEGER, eventType TEXT, StreamingStatus TEXT, transactionRetCode TEXT, telemetryState TEXT, MsgsSent INTEGER, MsgsSentLeg INTEGER, errorMsgs TEXT, TranscribeRetryCount INTEGER, callId TEXT PRIMARY KEY, isCaller BOOLEAN, CallType TEXT, transactionId TEXT, fromNumber TEXT, toNumber TEXT, fromName TEXT, toName TEXT, paIdentity TEXT, PraestoAgentId TEXT, PraestoAgentSid TEXT, direction TEXT, channelConfigName TEXT, channelConfigVersion TEXT, ZoomHostEmail TEXT, ZoomMeetingId TEXT, ZoomTotalTranscriptErrors INTEGER, timeAndAppLogTimeUTCDeltaSec REAL, app_version TEXT, cluster TEXT, namespace TEXT, hostName TEXT, _time TEXT );

afaisman commented 3 weeks ago

`import pandas as pd import sqlite3

def create_call_log_db(csv_file_path, db_file_path):

Load the CSV file into a DataFrame

df = pd.read_csv(csv_file_path)

# Debug: Print the column names to identify any discrepancies
print("Columns in the CSV file:", df.columns.tolist())

# Drop the specified columns, including the ones prefixed with 'TVS' and 'kvs', and the Zoom-related columns mentioned
columns_to_drop = [
    'startFragmentNumber', 'loggroup', 'logstream', 'mkvEventLast',
    'kvsResourceName', 'kvsEndpoint', 'KvsRecAttemptLegCount', 'KvsRecAttemptCount',
    'mkvEventDiffTagCount', 'mkvEventNoNextMCount', 'mkvEventCount',  # Drop mkvEventCount and related columns
    'TVSlineType', 'TVSdefaultButtonLabel', 'TVScounterPartyLineDescription',
    'TVScounterpartyInstance', 'ZoomMeetingAgenda', 'ZoomLeadCandidateMatchMin',
    'ZoomleadCandMatchAvg', 'ZoomTotalMatchMin', 'ZoomTotalMatchAvg', 'ZoomMatchConfAvg',
    'ZoomTotalAmbiguousTranscriptionSent', 'ZoomSingleSpkrTranscriptionPcnt',
    'ZoomTotalNonTranscribedIntervals', 'ZoomSpeakerIntervalsTranscribedPercent'
]
df = df.drop(columns=[col for col in columns_to_drop if col in df.columns])

# Rename DataFrame columns to match the database schema (removing spaces)
df.rename(columns={
    'Elapsed Time Full Call': 'elapsedTimeFullCall',
    'Leg Count': 'LegCount',
    'Streaming Status': 'StreamingStatus',
    'Msgs Sent': 'MsgsSent',
    'Msgs Sent Leg': 'MsgsSentLeg',
    'Transcribe Retry Count': 'TranscribeRetryCount',
    'app version': 'app_version'
}, inplace=True)

# Ensure 'callId' is unique by removing duplicates
df.drop_duplicates(subset=['callId'], inplace=True)

# Connect to (or create) the SQLite database
conn = sqlite3.connect(db_file_path)

# Create the table with explicit schema if it does not exist
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS call_log (
    startTime TEXT,
    appLogTimeEpochEST2 TEXT,
    elapsedTimeFullCall TEXT,
    asrTime TEXT,
    LegCount INTEGER,
    eventType TEXT,
    StreamingStatus TEXT,
    transactionRetCode TEXT,
    telemetryState TEXT,
    MsgsSent INTEGER,
    MsgsSentLeg INTEGER,
    errorMsgs TEXT,
    TranscribeRetryCount INTEGER,
    callId TEXT,
    isCaller BOOLEAN,
    CallType TEXT,
    transactionId TEXT,
    fromNumber TEXT,
    toNumber TEXT,
    fromName TEXT,
    toName TEXT,
    paIdentity TEXT,
    PraestoAgentId TEXT,
    PraestoAgentSid TEXT,
    direction TEXT,
    channelConfigName TEXT,
    channelConfigVersion TEXT,
    ZoomHostEmail TEXT,
    ZoomMeetingId TEXT,
    ZoomTotalTranscriptErrors INTEGER,
    timeAndAppLogTimeUTCDeltaSec REAL,
    app_version TEXT,
    cluster TEXT,
    namespace TEXT,
    hostName TEXT,
    CallAppSrcType TEXT,
    _time TEXT
)
""")

# Commit changes and close the connection
conn.commit()
conn.close()

print(f"Database created (if not exists) with table 'call_log' in '{db_file_path}'.")

def upsert_call_log_db(csv_file_path, db_file_path):

Ensure the database and table are created

create_call_log_db(csv_file_path, db_file_path)

# Load the CSV file into a DataFrame
df = pd.read_csv(csv_file_path)

# Drop the specified columns, including the ones prefixed with 'TVS' and 'kvs', and the Zoom-related columns mentioned
columns_to_drop = [
    'startFragmentNumber', 'loggroup', 'logstream', 'mkvEventLast', 'elapsedTime',
    'kvsResourceName', 'kvsEndpoint', 'KvsRecAttemptLegCount', 'KvsRecAttemptCount',
    'mkvEventDiffTagCount', 'mkvEventNoNextMCount', 'mkvEventCount',  # Drop mkvEventCount and related columns
    'TVSlineType', 'TVSdefaultButtonLabel', 'TVScounterPartyLineDescription',
    'TVScounterpartyInstance', 'ZoomMeetingAgenda', 'ZoomLeadCandidateMatchMin',
    'ZoomleadCandMatchAvg', 'ZoomTotalMatchMin', 'ZoomTotalMatchAvg', 'ZoomMatchConfAvg',
    'ZoomTotalAmbiguousTranscriptionSent', 'ZoomSingleSpkrTranscriptionPcnt',
    'ZoomTotalNonTranscribedIntervals', 'ZoomSpeakerIntervalsTranscribedPercent'
]
df = df.drop(columns=[col for col in columns_to_drop if col in df.columns])

# Rename DataFrame columns to match the database schema (removing spaces)
df.rename(columns={
    'Elapsed Time Full Call': 'elapsedTimeFullCall',
    'Leg Count': 'LegCount',
    'Streaming Status': 'StreamingStatus',
    'Msgs Sent': 'MsgsSent',
    'Msgs Sent Leg': 'MsgsSentLeg',
    'Transcribe Retry Count': 'TranscribeRetryCount',
    'app version': 'app_version'
}, inplace=True)

# Connect to the SQLite database
conn = sqlite3.connect(db_file_path)
cursor = conn.cursor()

# Perform the upsert operation
# for _, row in df.iterrows():
#     placeholders = ', '.join(['?'] * len(row))
#     columns = ', '.join([f'"{col}"' for col in row.index])  # Quote column names to handle SQL keywords and spaces
#     sql = f"""
#     INSERT INTO call_log ({columns})
#     VALUES ({placeholders})
#     ON CONFLICT(callId) DO UPDATE SET
#         {', '.join([f'"{col}" = excluded."{col}"' for col in row.index if col != 'callId'])}
#     """
#     cursor.execute(sql, tuple(row))

for _, row in df.iterrows():
    placeholders = ', '.join(['?'] * len(row))
    columns = ', '.join([f'"{col}"' for col in row.index])  # Quote column names to handle SQL keywords and spaces
    sql = f"""
    INSERT INTO call_log ({columns})
    VALUES ({placeholders})

    """
    cursor.execute(sql, tuple(row))

# Commit changes and close the connection
conn.commit()
conn.close()

print(f"Data from '{csv_file_path}' has been upserted into '{db_file_path}'.")

Execution: Load 'input.csv', create the database and table if not present, and upsert the data

csv_file_path = 'input.csv' db_file_path = 'call_log.db'

upsert_call_log_db(csv_file_path, db_file_path)`

afaisman commented 3 weeks ago

import sqlite3 import pandas as pd

Function to convert elapsed time from "HH:MM:SS.SSS" format to seconds

def convert_to_seconds(time_str): hours, minutes, seconds = time_str.split(':') total_seconds = int(hours) 3600 + int(minutes) 60 + float(seconds) return total_seconds

Connect to the SQLite database

conn = sqlite3.connect('call_log.db')

Query to retrieve channelConfigName and elapsedTimeFullCall from the call_log table

query = "SELECT channelConfigName, elapsedTimeFullCall FROM call_log"

Load the data into a pandas DataFrame

df = pd.read_sql_query(query, conn)

Convert elapsedTimeFullCall to seconds

df['elapsedTimeFullCallSeconds'] = df['elapsedTimeFullCall'].apply(convert_to_seconds)

Convert seconds to hours

df['elapsedTimeFullCallHours'] = df['elapsedTimeFullCallSeconds'] / 3600

Group by channelConfigName and sum the elapsed time in hours, formatted to one decimal place

report_df = df.groupby('channelConfigName')['elapsedTimeFullCallHours'].sum().reset_index()

Format the elapsedTimeFullCallHours to one decimal place

report_df['elapsedTimeFullCallHours'] = report_df['elapsedTimeFullCallHours'].round(1)

Close the connection

conn.close()

Print the resulting report

print(report_df)

Optionally, save the report to a CSV file

report_df.to_csv('elapsed_time_report_in_hours.csv', index=False)

afaisman commented 3 weeks ago

-- identifiers definition

CREATE TABLE identifiers (id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, identifierName VARCHAR(50) NOT NULL, identifierTypeId INT not NULL, envId INT NOT NULL, regionId INT NOT NULL, clientId INT NOT NULL, channelConfigId INT NOT NULL, description VARCHAR(256), createDateTime DATETIME NOT NULL DEFAULT (datetime(CURRENT_TIMESTAMP)), updateDateTime DATETIME NOT NULL DEFAULT (datetime(CURRENT_TIMESTAMP)),
FOREIGN KEY(identifierTypeId) REFERENCES identifierTypes(id), FOREIGN KEY(regionId) REFERENCES regions(id), FOREIGN KEY(clientId) REFERENCES clients(id), FOREIGN KEY(envId) REFERENCES envs(id),
UNIQUE (identifierName) );

afaisman commented 2 weeks ago

`` def create_table_channel_configs(self): conn = None cursor = None try: conn = sqlite3.connect(self._path) cursor = conn.cursor()

"""CREATE TABLE IF NOT EXISTS identifiers_audit

        sql_query = """CREATE TABLE  IF NOT EXISTS channelConfig (
            id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
            name TEXT(256) NOT NULL,
            regionId INTEGER,
            envId INTEGER,
            clientId INTEGER,
            createDateTime TEXT,
            updateDateTime INTEGER,
            description TEXT(256), 
            overrides TEXT(4096),
            CONSTRAINT channelConfig_UN UNIQUE (id),
            CONSTRAINT unique_name UNIQUE (name),
            CONSTRAINT channelConfig_FK_1 FOREIGN KEY (clientId) REFERENCES clients(id),
            CONSTRAINT channelConfig_FK FOREIGN KEY (envId) REFERENCES envs(id),
            CONSTRAINT channelConfig_FK_2 FOREIGN KEY (regionId) REFERENCES regions(id)
            );"""
        cursor.execute(sql_query)
        result = cursor.fetchone()
        if result is not None:
            return result[0]  # clientId
        else:
            return None  # No matching record found
    except Exception as e:
        ChannelDataDBMngr.LOGGER.error(f"Failed to create table: {e}")
        return None
    finally:
        if cursor is not None:
            cursor.close()
        if conn is not None:
            conn.close()

def get_clientid_by_channel_name(self, name):
    conn = None
    cursor = None
    try:
        conn = sqlite3.connect(self._path)
        cursor = conn.cursor()
        sql_query = "SELECT clientId FROM channelConfig WHERE name = ?"
        cursor.execute(sql_query, (name,))
        result = cursor.fetchone()
        if result is not None:
            return result[0]  # clientId
        else:
            return None  # No matching record found
    except Exception as e:
        ChannelDataDBMngr.LOGGER.error(f"Failed to get clientId for {name}: {e}")
        return None
    finally:
        if cursor is not None:
            cursor.close()
        if conn is not None:
            conn.close()

def get_overrides_by_channel_name(self, name):
    conn = None
    cursor = None
    try:
        conn = sqlite3.connect(self._path)
        cursor = conn.cursor()
        sql_query = "SELECT overrides FROM channelConfig WHERE name = ?"
        cursor.execute(sql_query, (name,))
        result = cursor.fetchone()
        if result is not None:
            return result[0]
        else:
            return None
    except Exception as e:
        ChannelDataDBMngr.LOGGER.error(f"Failed to get clientId for {name}: {e}")
        return None
    finally:
        if cursor is not None:
            cursor.close()
        if conn is not None:
            conn.close()

def upsert_channel_config_data(self, name_, geo_region_id_, envId_, clientId_, description_, overrides_):
    conn = None
    cursor = None

    sql_upsert_channel_config_data = """
        INSERT INTO channelConfig (name, regionId, envId, clientId, description, overrides)
        VALUES (?, ?, ?, ?, ?, ?)
        ON CONFLICT(name) DO UPDATE SET
        regionId = excluded.regionId,
        envId = excluded.envId,
        clientId = excluded.clientId,
        description = excluded.description,
        overrides = excluded.overrides;
        """

    try:
        conn = sqlite3.connect(self._path)
        cursor = conn.cursor()
        cursor.execute(sql_upsert_channel_config_data, (name_, geo_region_id_, envId_, clientId_, description_, overrides_))
        conn.commit()
        ChannelDataDBMngr.LOGGER.info(f"Upserted data for {name_}")
    except Exception as e:
        ChannelDataDBMngr.LOGGER.error(f"Failed to upsert data for {name_}: {e}")
    finally:
        if cursor is not None:
            cursor.close()
        if conn is not None:
            conn.close()

def set_channel_config_overrides(self, channel_name_, overrides_):
    conn = None
    cursor = None
    sql_set_channel_config_overrides = """
    INSERT INTO channelConfig (name, overrides)
    VALUES (?, ?)
    ON CONFLICT(name) DO UPDATE SET
    overrides = excluded.overrides;
    """
    try:
        conn = sqlite3.connect(self._path)
        cursor = conn.cursor()
        cursor.execute(sql_set_channel_config_overrides, (channel_name_, overrides_))
        conn.commit()
        ChannelDataDBMngr.LOGGER.info(f"overrides set for {channel_name_}")
    except Exception as e:
        ChannelDataDBMngr.LOGGER.error(f"Failed to set overrides for {channel_name_}: {e}")
    finally:
        if cursor is not None:
            cursor.close()
        if conn is not None:
            conn.close()

def get_channel_config_id_by_name(self, name):
    conn = None
    cursor = None
    try:
        sql_query = "SELECT id FROM channelConfig WHERE name = ?"
        conn = sqlite3.connect(self._path)
        cursor = conn.cursor()
        cursor.execute(sql_query, (name,))
        result = cursor.fetchone()
        if result is not None:
            return result[0]  # channelConfigId
        else:
            return None  # No matching record found
    except Exception as e:
        ChannelDataDBMngr.LOGGER.error(f"Failed to get channelConfigId for {name}: {e}")
        return 777777
    finally:
        if cursor is not None:
            cursor.close()
        if conn is not None:
            conn.close()
afaisman commented 2 weeks ago
def list_table_channelconfigs(self) -> bool:
afaisman commented 2 weeks ago

`import json import unittest from typing import List, Tuple, Any, Union

from deepdiff import DeepDiff

def compare_json(json1: dict, json2: dict) -> Tuple[List[str], List[str], List[str]]: diff = DeepDiff(json1, json2, ignore_order=True)

nodes_added = list(diff.get("dictionary_item_added", []))
nodes_changed = list(diff.get("values_changed", {}).keys())
nodes_removed = list(diff.get("dictionary_item_removed", []))

return nodes_added, nodes_changed, nodes_removed

def clean_path(path: str) -> str: return path.replace("root['", "").replace("']['", "/").replace("']", "")

def generate_transformation_script(json1: dict, json2: dict, awsregion: str, environment_: str) -> str: nodes_added, nodes_changed, nodes_removed = compare_json(json1, json2)

script = []

for path in nodes_removed:
    clean = clean_path(path)
    script.append(f"remove_value(json_obj, '{clean}')")

for path in nodes_added:
    clean = clean_path(path)
    value = get_value_from_path(json2, clean)
    script.append(f"add_value(json_obj, '{clean}', {repr(value)})")

for path in nodes_changed:
    clean = clean_path(path)
    if "subscribertelephonenumberurlsset" in clean.lower():
        continue
    value = get_value_from_path(json2, clean)
    script.append(f"set_value(json_obj, '{clean}', {repr(value)})")

script_content = "\n\t".join(script)
if script_content.strip() == "":
    script_content = "pass"
return f"if aws_region == '{aws_region_}' and environment == '{environment_}':\n\t{script_content}\n"

def generate_transformation_script_from_files(json1: str, json2: str, awsregion: str, environment_: str) -> str: content1 = "" with open(json1, "r") as f_json1: content1 = f_json1.read() json1 = json.loads(content1) content2 = "" with open(json2, "r") as f_json2: content2 = f_json2.read() json2 = json.loads(content2)

return generate_transformation_script(json1=json1, json2=json2, aws_region_=aws_region_, environment_=environment_)

def get_value_from_path(json_obj: Union[dict, list], path: str) -> Any: keys = path.split("/") current = json_obj for key in keys: if key.isdigit(): key = int(key) current = current[key] return current

def add_value(json_obj: Union[dict, list], path: str, value: Any) -> None: set_value(json_obj, path, value)

def set_value(json_obj: Union[dict, list], path: str, value: Any) -> None: keys = path.split("/") current = json_obj for key in keys[:-1]: if key.isdigit(): key = int(key) if isinstance(current, list) and isinstance(key, int): while len(current) <= key: current.append({}) current = current[key] elif isinstance(current, dict): if key not in current: current[key] = [] if keys[keys.index(key) + 1].isdigit() else {} current = current[key] else: raise ValueError(f"Invalid path: {path}")

last_key = keys[-1]
if last_key.isdigit():
    last_key = int(last_key)
if isinstance(current, list) and isinstance(last_key, int):
    while len(current) <= last_key:
        current.append(None)
    current[last_key] = value
elif isinstance(current, dict):
    current[last_key] = value
else:
    raise ValueError(f"Invalid path: {path}")

def remove_value(json_obj: Union[dict, list], path: str) -> None: keys = path.split("/") current = json_obj for key in keys[:-1]: if key.isdigit(): key = int(key) if isinstance(current, list) and isinstance(key, int): if key < len(current): current = current[key] else: return elif isinstance(current, dict): if key in current: current = current[key] else: return else: return

last_key = keys[-1]
if last_key.isdigit():
    last_key = int(last_key)
if isinstance(current, list) and isinstance(last_key, int):
    if last_key < len(current):
        current.pop(last_key)
elif isinstance(current, dict) and last_key in current:
    current.pop(last_key)

def apply_transformation_script(json1: dict, script: str, aws_region: str, environment: str) -> dict:

Create a copy of the json1 object to apply the transformations

json_obj = json.loads(json.dumps(json1))  # Deep copy to avoid modifying the original json1

# Execute the script to apply the transformations
exec(script, {"json_obj": json_obj, "aws_region": aws_region, "environment": environment, "add_value": add_value, "set_value": set_value, "remove_value": remove_value})

return json_obj

Example usage:

json1 = {"name": "John", "age": 30, "address": {"city": "New York", "zipcode": "10001"}}

#

json2 = {"name": "John", "age": 31, "address": {"city": "Los Angeles", "zipcode": "90001"}, "phone": "123-456-7890"}

#

Generate transformation script

transformation_script = generate_transformation_script(json1, json2, awsregion="defaultregion", environment="default_env")

print("Transformation Script:\n", transformation_script)

#

Apply transformation script to json1

transformed_json = apply_transformation_script(json1, transformation_script, aws_region="us-east-1", environment="prd")

print("Transformed JSON:\n", transformed_json)

Unit test

class TestJsonTransformation(unittest.TestCase): def test_compare_json(self): json1 = {"a": 1, "b": 2} json2 = {"a": 1, "b": 3, "c": 4} nodes_added, nodes_changed, nodes_removed = compare_json(json1, json2) self.assertEqual(nodes_added, ["root['c']"]) self.assertEqual(nodes_changed, ["root['b']"]) self.assertEqual(nodes_removed, [])

def test_generate_transformation_script(self):
    json1 = {"name": "John", "age": 30, "address": {"city": "New York", "zipcode": "10001"}}

    json2 = {"name": "John", "age": 31, "address": {"city": "Los Angeles", "zipcode": "90001"}, "phone": "123-456-7890"}

    expected_script_lines = sorted(["add_value(json_obj, 'phone', '123-456-7890')", "set_value(json_obj, 'address/city', 'Los Angeles')", "set_value(json_obj, 'address/zipcode', '90001')", "set_value(json_obj, 'age', 31)"])

    generated_script = generate_transformation_script(json1, json2, "some_aws_region", "some_aws_environment")
    generated_script_lines = [line.strip() for line in sorted(generated_script.strip().split("\n")[1:])]  # Skip the first line (the if statement)
    expected_script_lines = [line.strip() for line in expected_script_lines]
    self.assertEqual(generated_script_lines, expected_script_lines)

def test_apply_transformation_script(self):
    json1 = {"name": "John", "age": 30, "address": {"city": "New York", "zipcode": "10001"}}

    json2 = {"name": "John", "age": 31, "address": {"city": "Los Angeles", "zipcode": "90001"}, "phone": "123-456-7890"}

    transformation_script = generate_transformation_script(json1, json2, "some_aws_region", "some_aws_environment")
    transformed_json = apply_transformation_script(json1, transformation_script, aws_region="us-east-1", environment="prd")

    self.assertEqual(transformed_json, json2)

def test_add_value(self):
    json_obj = {}
    add_value(json_obj, "a/b/c", 1)
    self.assertEqual(json_obj, {"a": {"b": {"c": 1}}})

def test_set_value(self):
    json_obj = {}
    set_value(json_obj, "a/b/c", 1)
    self.assertEqual(json_obj, {"a": {"b": {"c": 1}}})

    json_obj = {"a": [1, 2, 3]}
    set_value(json_obj, "a/1", 5)
    self.assertEqual(json_obj, {"a": [1, 5, 3]})

def test_remove_value(self):
    json_obj = {"a": {"b": {"c": 1}}}
    remove_value(json_obj, "a/b/c")
    self.assertEqual(json_obj, {"a": {"b": {}}})

    json_obj = [{"a": {"b": 2}}]
    remove_value(json_obj, "0/a/b")
    self.assertEqual(json_obj, [{"a": {}}])

if name == "main": unittest.main() `

afaisman commented 2 weeks ago

`import json import unittest from typing import List, Tuple, Any, Union

from deepdiff import DeepDiff

def compare_json(json1: dict, json2: dict) -> Tuple[List[str], List[str], List[str]]: diff = DeepDiff(json1, json2, ignore_order=True)

nodes_added = list(diff.get("dictionary_item_added", []))
nodes_changed = list(diff.get("values_changed", {}).keys())
nodes_removed = list(diff.get("dictionary_item_removed", []))

return nodes_added, nodes_changed, nodes_removed

def clean_path(path: str) -> str: return path.replace("root['", "").replace("']['", "/").replace("']", "")

def generate_transformation_script(json1: dict, json2: dict, awsregion: str, environment_: str) -> str: nodes_added, nodes_changed, nodes_removed = compare_json(json1, json2)

script = []

for path in nodes_removed:
    clean = clean_path(path)
    script.append(f"remove_value(json_obj, '{clean}')")

for path in nodes_added:
    clean = clean_path(path)
    value = get_value_from_path(json2, clean)
    script.append(f"add_value(json_obj, '{clean}', {repr(value)})")

for path in nodes_changed:
    clean = clean_path(path)
    if "subscribertelephonenumberurlsset" in clean.lower():
        continue
    value = get_value_from_path(json2, clean)
    script.append(f"set_value(json_obj, '{clean}', {repr(value)})")

script_content = "\n\t".join(script)
if script_content.strip() == "":
    script_content = "pass"
return f"if aws_region == '{aws_region_}' and environment == '{environment_}':\n\t{script_content}\n"

def generate_transformation_script_from_files(json1: str, json2: str, awsregion: str, environment_: str) -> str: content1 = "" with open(json1, "r") as f_json1: content1 = f_json1.read() json1 = json.loads(content1) content2 = "" with open(json2, "r") as f_json2: content2 = f_json2.read() json2 = json.loads(content2)

return generate_transformation_script(json1=json1, json2=json2, aws_region_=aws_region_, environment_=environment_)

def get_value_from_path(json_obj: Union[dict, list], path: str) -> Any: keys = path.split("/") current = json_obj for key in keys: if key.isdigit(): key = int(key) current = current[key] return current

def add_value(json_obj: Union[dict, list], path: str, value: Any) -> None: set_value(json_obj, path, value)

def set_value(json_obj: Union[dict, list], path: str, value: Any) -> None: keys = path.split("/") current = json_obj for key in keys[:-1]: if key.isdigit(): key = int(key) if isinstance(current, list) and isinstance(key, int): while len(current) <= key: current.append({}) current = current[key] elif isinstance(current, dict): if key not in current: current[key] = [] if keys[keys.index(key) + 1].isdigit() else {} current = current[key] else: raise ValueError(f"Invalid path: {path}")

last_key = keys[-1]
if last_key.isdigit():
    last_key = int(last_key)
if isinstance(current, list) and isinstance(last_key, int):
    while len(current) <= last_key:
        current.append(None)
    current[last_key] = value
elif isinstance(current, dict):
    current[last_key] = value
else:
    raise ValueError(f"Invalid path: {path}")

def remove_value(json_obj: Union[dict, list], path: str) -> None: keys = path.split("/") current = json_obj for key in keys[:-1]: if key.isdigit(): key = int(key) if isinstance(current, list) and isinstance(key, int): if key < len(current): current = current[key] else: return elif isinstance(current, dict): if key in current: current = current[key] else: return else: return

last_key = keys[-1]
if last_key.isdigit():
    last_key = int(last_key)
if isinstance(current, list) and isinstance(last_key, int):
    if last_key < len(current):
        current.pop(last_key)
elif isinstance(current, dict) and last_key in current:
    current.pop(last_key)

def apply_transformation_script(json1: dict, script: str, aws_region: str, environment: str) -> dict:

Create a copy of the json1 object to apply the transformations

json_obj = json.loads(json.dumps(json1))  # Deep copy to avoid modifying the original json1

# Execute the script to apply the transformations
exec(script, {"json_obj": json_obj, "aws_region": aws_region, "environment": environment, "add_value": add_value, "set_value": set_value, "remove_value": remove_value})

return json_obj

Example usage:

json1 = {"name": "John", "age": 30, "address": {"city": "New York", "zipcode": "10001"}}

#

json2 = {"name": "John", "age": 31, "address": {"city": "Los Angeles", "zipcode": "90001"}, "phone": "123-456-7890"}

#

Generate transformation script

transformation_script = generate_transformation_script(json1, json2, awsregion="defaultregion", environment="default_env")

print("Transformation Script:\n", transformation_script)

#

Apply transformation script to json1

transformed_json = apply_transformation_script(json1, transformation_script, aws_region="us-east-1", environment="prd")

print("Transformed JSON:\n", transformed_json)

Unit test

class TestJsonTransformation(unittest.TestCase): def test_compare_json(self): json1 = {"a": 1, "b": 2} json2 = {"a": 1, "b": 3, "c": 4} nodes_added, nodes_changed, nodes_removed = compare_json(json1, json2) self.assertEqual(nodes_added, ["root['c']"]) self.assertEqual(nodes_changed, ["root['b']"]) self.assertEqual(nodes_removed, [])

def test_generate_transformation_script(self):
    json1 = {"name": "John", "age": 30, "address": {"city": "New York", "zipcode": "10001"}}

    json2 = {"name": "John", "age": 31, "address": {"city": "Los Angeles", "zipcode": "90001"}, "phone": "123-456-7890"}

    expected_script_lines = sorted(["add_value(json_obj, 'phone', '123-456-7890')", "set_value(json_obj, 'address/city', 'Los Angeles')", "set_value(json_obj, 'address/zipcode', '90001')", "set_value(json_obj, 'age', 31)"])

    generated_script = generate_transformation_script(json1, json2, "some_aws_region", "some_aws_environment")
    generated_script_lines = [line.strip() for line in sorted(generated_script.strip().split("\n")[1:])]  # Skip the first line (the if statement)
    expected_script_lines = [line.strip() for line in expected_script_lines]
    self.assertEqual(generated_script_lines, expected_script_lines)

def test_apply_transformation_script(self):
    json1 = {"name": "John", "age": 30, "address": {"city": "New York", "zipcode": "10001"}}

    json2 = {"name": "John", "age": 31, "address": {"city": "Los Angeles", "zipcode": "90001"}, "phone": "123-456-7890"}

    transformation_script = generate_transformation_script(json1, json2, "some_aws_region", "some_aws_environment")
    transformed_json = apply_transformation_script(json1, transformation_script, aws_region="us-east-1", environment="prd")

    self.assertEqual(transformed_json, json2)

def test_add_value(self):
    json_obj = {}
    add_value(json_obj, "a/b/c", 1)
    self.assertEqual(json_obj, {"a": {"b": {"c": 1}}})

def test_set_value(self):
    json_obj = {}
    set_value(json_obj, "a/b/c", 1)
    self.assertEqual(json_obj, {"a": {"b": {"c": 1}}})

    json_obj = {"a": [1, 2, 3]}
    set_value(json_obj, "a/1", 5)
    self.assertEqual(json_obj, {"a": [1, 5, 3]})

def test_remove_value(self):
    json_obj = {"a": {"b": {"c": 1}}}
    remove_value(json_obj, "a/b/c")
    self.assertEqual(json_obj, {"a": {"b": {}}})

    json_obj = [{"a": {"b": 2}}]
    remove_value(json_obj, "0/a/b")
    self.assertEqual(json_obj, [{"a": {}}])

if name == "main": unittest.main() `

afaisman commented 1 week ago

` def create_table_identifiers(self) -> bool: ret_code = False try: conn = sqlite3.connect(self._path) ChannelDataDBMngr.LOGGER.debug("Opened database successfully")

        conn.execute(
            """CREATE TABLE IF NOT EXISTS identifiers
                 (id INTEGER  PRIMARY KEY AUTOINCREMENT NOT NULL,
                 identifierName      VARCHAR(50)    NOT NULL,
                 identifierTypeId  INT    not NULL,
                 envId            INT    NOT NULL,
                 regionId         INT    NOT NULL,
                 clientId         INT    NOT NULL,
                 channelConfigId  INT    NOT NULL,
                 description      VARCHAR(256),
                 createDateTime  DATETIME NOT NULL  DEFAULT (datetime(CURRENT_TIMESTAMP)),
                 updateDateTime  DATETIME NOT NULL  DEFAULT (datetime(CURRENT_TIMESTAMP)),                  
                 FOREIGN KEY(identifierTypeId) REFERENCES identifierTypes(id),
                 FOREIGN KEY(regionId) REFERENCES regions(id),
                 FOREIGN KEY(clientId) REFERENCES clients(id),
                 FOREIGN KEY(envId) REFERENCES envs(id),                     
                 UNIQUE (identifierName)
                 );"""
        )

        conn.execute(
            """
                 CREATE TRIGGER IF NOT EXISTS update_routing_identifiers UPDATE OF id,identifierName,identifierTypeId,envId,regionId,clientId,channelConfigId,description ON identifiers 
                  BEGIN
                    UPDATE identifiers SET updateDateTime = datetime(CURRENT_TIMESTAMP) WHERE id = old.id;
                  END;
                 """
        )

        ChannelDataDBMngr.LOGGER.debug("Operation done successfully")
        conn.commit()
        conn.close()

        ret_code = True
    except Exception as e:
        ChannelDataDBMngr.LOGGER.error("Exception e:{}".format(e))
    return ret_code

`

afaisman commented 3 days ago

`-- call_log definition

CREATE TABLE call_log ( startTime TEXT, appLogTimeEpochEST2 TEXT, elapsedTimeFullCall TEXT, asrTime TEXT, LegCount INTEGER, eventType TEXT, StreamingStatus TEXT, transactionRetCode TEXT, telemetryState TEXT, MsgsSent INTEGER, MsgsSentLeg INTEGER, errorMsgs TEXT, TranscribeRetryCount INTEGER, callId TEXT, isCaller BOOLEAN, CallType TEXT, transactionId TEXT, fromNumber TEXT, toNumber TEXT, fromName TEXT, toName TEXT, paIdentity TEXT, PraestoAgentId TEXT, PraestoAgentSid TEXT, direction TEXT, channelConfigName TEXT, channelConfigVersion TEXT, ZoomHostEmail TEXT, ZoomMeetingId TEXT, ZoomTotalTranscriptErrors INTEGER, timeAndAppLogTimeUTCDeltaSec REAL, app_version TEXT, cluster TEXT, namespace TEXT, hostName TEXT, CallAppSrcType TEXT, _time TEXT, UNIQUE(callId, isCaller) -- Composite unique constraint );`

afaisman commented 3 days ago

-- identifiers definition

CREATE TABLE identifiers ( id INTEGER, identifierName VARCHAR(33), identifierTypeId INTEGER, envId INTEGER, regionId INTEGER, clientId INTEGER, channelConfigId INTEGER, description VARCHAR(135), createDateTime VARCHAR(19), updateDateTime VARCHAR(19) );

afaisman commented 1 day ago

pcl aws login --role-arn arn:aws:iam::902878509172:role/104120-redshift-read --sid R743104 --profile-name adfs --domain NAEAST

aws redshift get-cluster-credentials --cluster-identifier redshift-8b0800e84914 --db-user redshift_read --db-name mdldb_s2t_902878509172 --duration-seconds 3600 --region us-east-1